]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/core/manager.c
exec-util: don't say sd-executor to avoid ambiguity
[thirdparty/systemd.git] / src / core / manager.c
CommitLineData
db9ecf05 1/* SPDX-License-Identifier: LGPL-2.1-or-later */
a7334b09 2
60918275 3#include <errno.h>
400f1a33
LP
4#include <fcntl.h>
5#include <linux/kd.h>
e46b13c8 6#include <sys/epoll.h>
400f1a33 7#include <sys/inotify.h>
e1414003 8#include <sys/ioctl.h>
ca6ce62d 9#include <sys/mount.h>
400f1a33 10#include <sys/reboot.h>
8742514c 11#include <sys/timerfd.h>
40efaaed 12#include <sys/utsname.h>
400f1a33
LP
13#include <sys/wait.h>
14#include <unistd.h>
830f6caa 15
349cc4a5 16#if HAVE_AUDIT
4927fcae 17#include <libaudit.h>
830f6caa 18#endif
60918275 19
718db961 20#include "sd-daemon.h"
718db961 21#include "sd-messages.h"
3536f49e 22#include "sd-path.h"
81527be1 23
57b7a260 24#include "all-units.h"
d68c645b 25#include "alloc-util.h"
400f1a33
LP
26#include "audit-fd.h"
27#include "boot-timestamps.h"
28#include "bus-common-errors.h"
29#include "bus-error.h"
30#include "bus-kernel.h"
31#include "bus-util.h"
00d9ef85 32#include "clean-ipc.h"
af6b0ecc 33#include "clock-util.h"
29e6b0c1 34#include "common-signal.h"
08951245 35#include "confidential-virt.h"
28db6fbf 36#include "constants.h"
19d22d43 37#include "core-varlink.h"
786d19fd 38#include "creds-util.h"
400f1a33
LP
39#include "dbus-job.h"
40#include "dbus-manager.h"
41#include "dbus-unit.h"
42#include "dbus.h"
d063a527 43#include "dirent-util.h"
400f1a33 44#include "env-util.h"
4f5dd394 45#include "escape.h"
ec75e8e0 46#include "event-util.h"
89711996 47#include "exec-util.h"
d3070fbd 48#include "execute.h"
400f1a33 49#include "exit-status.h"
3ffd4af2 50#include "fd-util.h"
0d39fa9c 51#include "fileio.h"
385093b7 52#include "generator-setup.h"
60918275 53#include "hashmap.h"
baa6a42d 54#include "initrd-util.h"
9e5fd717 55#include "inotify-util.h"
5cfa33e0 56#include "install.h"
19d22d43 57#include "io-util.h"
0690160e 58#include "label-util.h"
d904afc7 59#include "load-fragment.h"
786d19fd 60#include "locale-setup.h"
16354eff 61#include "log.h"
400f1a33 62#include "macro.h"
3ffd4af2 63#include "manager.h"
2a341bb9 64#include "manager-dump.h"
a01ba4b2 65#include "manager-serialize.h"
0a970718 66#include "memory-util.h"
35cd0ba5 67#include "mkdir-label.h"
ca6ce62d 68#include "mount-util.h"
4bd03515 69#include "os-util.h"
6bedfcbb 70#include "parse-util.h"
400f1a33
LP
71#include "path-lookup.h"
72#include "path-util.h"
aa25e19b 73#include "plymouth-util.h"
d61a4dbb 74#include "pretty-print.h"
400f1a33 75#include "process-util.h"
6bb00842 76#include "psi-util.h"
ea430986 77#include "ratelimit.h"
31ce987c 78#include "rlimit-util.h"
c6878637 79#include "rm-rf.h"
45ae2f72 80#include "selinux-util.h"
400f1a33 81#include "signal-util.h"
57b7a260 82#include "socket-util.h"
514f4ef5 83#include "special.h"
8fcde012 84#include "stat-util.h"
8b43440b 85#include "string-table.h"
07630cea 86#include "string-util.h"
400f1a33 87#include "strv.h"
dd1db3c2 88#include "strxcpyx.h"
6123dfaa 89#include "sysctl-util.h"
a6ecbf83 90#include "syslog-util.h"
400f1a33
LP
91#include "terminal-util.h"
92#include "time-util.h"
93#include "transaction.h"
63e8df04 94#include "uid-range.h"
affb60b1 95#include "umask-util.h"
400f1a33 96#include "unit-name.h"
00d9ef85 97#include "user-util.h"
5dc4c17f 98#include "virt.h"
e96d6be7 99#include "watchdog.h"
60918275 100
a47806fa 101#define NOTIFY_RCVBUF_SIZE (8*1024*1024)
d8fdc620 102#define CGROUPS_AGENT_RCVBUF_SIZE (8*1024*1024)
a47806fa 103
03b717a3 104/* Initial delay and the interval for printing status messages about running jobs */
1b4154a8
ZJS
105#define JOBS_IN_PROGRESS_WAIT_USEC (2*USEC_PER_SEC)
106#define JOBS_IN_PROGRESS_QUIET_WAIT_USEC (25*USEC_PER_SEC)
fd08a840 107#define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3)
03b717a3
MS
108#define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3
109
5238e957 110/* If there are more than 1K bus messages queue across our API and direct buses, then let's not add more on top until
e0a08581
LP
111 * the queue gets more empty. */
112#define MANAGER_BUS_BUSY_THRESHOLD 1024LU
113
114/* How many units and jobs to process of the bus queue before returning to the event loop. */
115#define MANAGER_BUS_MESSAGE_BUDGET 100U
116
94f0b13b 117#define DEFAULT_TASKS_MAX ((CGroupTasksMax) { 15U, 100U }) /* 15% */
ea09a416 118
718db961 119static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
d8fdc620 120static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
718db961
LP
121static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
122static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
123static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
00d9ef85 124static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
718db961 125static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata);
752b5905 126static int manager_dispatch_run_queue(sd_event_source *source, void *userdata);
575b300b 127static int manager_dispatch_sigchld(sd_event_source *source, void *userdata);
bbf5fd8e 128static int manager_dispatch_timezone_change(sd_event_source *source, const struct inotify_event *event, void *userdata);
64691d20 129static int manager_run_environment_generators(Manager *m);
e801700e 130static int manager_run_generators(Manager *m);
06a4eb07 131static void manager_vacuum(Manager *m);
718db961 132
1b4154a8 133static usec_t manager_watch_jobs_next_time(Manager *m) {
3889fc6f
ZJS
134 usec_t timeout;
135
136 if (MANAGER_IS_USER(m))
137 /* Let the user manager without a timeout show status quickly, so the system manager can make
138 * use of it, if it wants to. */
139 timeout = JOBS_IN_PROGRESS_WAIT_USEC * 2 / 3;
140 else if (show_status_on(m->show_status))
141 /* When status is on, just use the usual timeout. */
142 timeout = JOBS_IN_PROGRESS_WAIT_USEC;
143 else
144 timeout = JOBS_IN_PROGRESS_QUIET_WAIT_USEC;
145
146 return usec_add(now(CLOCK_MONOTONIC), timeout);
1b4154a8
ZJS
147}
148
b646fc32
LB
149static bool manager_is_confirm_spawn_disabled(Manager *m) {
150 assert(m);
151
152 if (!m->confirm_spawn)
153 return true;
154
155 return access("/run/systemd/confirm_spawn_disabled", F_OK) >= 0;
156}
157
2ae56591 158static void manager_watch_jobs_in_progress(Manager *m) {
e5723c89 159 usec_t next;
cfa9677b 160 int r;
e5723c89 161
718db961 162 assert(m);
03b717a3 163
42bf1ae1
FB
164 /* We do not want to show the cylon animation if the user
165 * needs to confirm service executions otherwise confirmation
166 * messages will be screwed by the cylon animation. */
b0eb2944 167 if (!manager_is_confirm_spawn_disabled(m))
42bf1ae1
FB
168 return;
169
718db961 170 if (m->jobs_in_progress_event_source)
2ae56591 171 return;
03b717a3 172
1b4154a8 173 next = manager_watch_jobs_next_time(m);
cfa9677b 174 r = sd_event_add_time(
6a0f1f6d
LP
175 m->event,
176 &m->jobs_in_progress_event_source,
177 CLOCK_MONOTONIC,
178 next, 0,
179 manager_dispatch_jobs_in_progress, m);
cfa9677b
MM
180 if (r < 0)
181 return;
7dfbe2e3
TG
182
183 (void) sd_event_source_set_description(m->jobs_in_progress_event_source, "manager-jobs-in-progress");
03b717a3
MS
184}
185
1addc46c 186static void manager_flip_auto_status(Manager *m, bool enable, const char *reason) {
f755e3b7
LP
187 assert(m);
188
cb8ccb22
ZJS
189 if (enable) {
190 if (m->show_status == SHOW_STATUS_AUTO)
7365a296 191 manager_set_show_status(m, SHOW_STATUS_TEMPORARY, reason);
cb8ccb22
ZJS
192 } else {
193 if (m->show_status == SHOW_STATUS_TEMPORARY)
7365a296 194 manager_set_show_status(m, SHOW_STATUS_AUTO, reason);
cb8ccb22
ZJS
195 }
196}
197
03b717a3 198static void manager_print_jobs_in_progress(Manager *m) {
03b717a3 199 Job *j;
03b717a3
MS
200 unsigned counter = 0, print_nr;
201 char cylon[6 + CYLON_BUFFER_EXTRA + 1];
202 unsigned cylon_pos;
4c989f89 203 uint64_t timeout = 0;
03b717a3 204
718db961 205 assert(m);
9c3349e2 206 assert(m->n_running_jobs > 0);
718db961 207
7365a296 208 manager_flip_auto_status(m, true, "delay");
d450b6f2 209
03b717a3
MS
210 print_nr = (m->jobs_in_progress_iteration / JOBS_IN_PROGRESS_PERIOD_DIVISOR) % m->n_running_jobs;
211
90e74a66 212 HASHMAP_FOREACH(j, m->jobs)
03b717a3
MS
213 if (j->state == JOB_RUNNING && counter++ == print_nr)
214 break;
215
e970a72e
MS
216 /* m->n_running_jobs must be consistent with the contents of m->jobs,
217 * so the above loop must have succeeded in finding j. */
218 assert(counter == print_nr + 1);
51d122af 219 assert(j);
5a82a91a 220
03b717a3
MS
221 cylon_pos = m->jobs_in_progress_iteration % 14;
222 if (cylon_pos >= 8)
223 cylon_pos = 14 - cylon_pos;
224 draw_cylon(cylon, sizeof(cylon), 6, cylon_pos);
225
8bb310c3
ZJS
226 m->jobs_in_progress_iteration++;
227
3889fc6f 228 char job_of_n[STRLEN("( of ) ") + DECIMAL_STR_MAX(unsigned)*2] = "";
5291f26d 229 if (m->n_running_jobs > 1)
3889fc6f 230 xsprintf(job_of_n, "(%u of %u) ", counter, m->n_running_jobs);
03b717a3 231
4c989f89 232 (void) job_get_timeout(j, &timeout);
8bb310c3 233
04d232d8
ZJS
234 /* We want to use enough information for the user to identify previous lines talking about the same
235 * unit, but keep the message as short as possible. So if 'Starting foo.service' or 'Starting
3889fc6f 236 * foo.service - Description' were used, 'foo.service' is enough here. On the other hand, if we used
04d232d8
ZJS
237 * 'Starting Description' before, then we shall also use 'Description' here. So we pass NULL as the
238 * second argument to unit_status_string(). */
239 const char *ident = unit_status_string(j->unit, NULL);
240
3889fc6f 241 const char *time = FORMAT_TIMESPAN(now(CLOCK_MONOTONIC) - j->begin_usec, 1*USEC_PER_SEC);
4c989f89 242 const char *limit = timeout > 0 ? FORMAT_TIMESPAN(timeout - j->begin_usec, 1*USEC_PER_SEC) : "no limit";
3889fc6f
ZJS
243
244 if (m->status_unit_format == STATUS_UNIT_FORMAT_DESCRIPTION)
245 /* When using 'Description', we effectively don't have enough space to show the nested status
246 * without ellipsization, so let's not even try. */
247 manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon,
248 "%sA %s job is running for %s (%s / %s)",
249 job_of_n,
250 job_type_to_string(j->type),
251 ident,
252 time, limit);
253 else {
254 const char *status_text = unit_status_text(j->unit);
255
256 manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon,
257 "%sJob %s/%s running (%s / %s)%s%s",
258 job_of_n,
259 ident,
260 job_type_to_string(j->type),
261 time, limit,
262 status_text ? ": " : "",
263 strempty(status_text));
264 }
265
266 sd_notifyf(false,
267 "STATUS=%sUser job %s/%s running (%s / %s)...",
268 job_of_n,
269 ident,
270 job_type_to_string(j->type),
271 time, limit);
272 m->status_ready = false;
03b717a3
MS
273}
274
e46b13c8 275static int have_ask_password(void) {
c2b2df60 276 _cleanup_closedir_ DIR *dir = NULL;
e46b13c8
ZJS
277
278 dir = opendir("/run/systemd/ask-password");
279 if (!dir) {
280 if (errno == ENOENT)
281 return false;
282 else
283 return -errno;
284 }
285
c7f0d9e5 286 FOREACH_DIRENT_ALL(de, dir, return -errno)
e46b13c8
ZJS
287 if (startswith(de->d_name, "ask."))
288 return true;
8fb3f009 289 return false;
e46b13c8
ZJS
290}
291
292static int manager_dispatch_ask_password_fd(sd_event_source *source,
293 int fd, uint32_t revents, void *userdata) {
99534007 294 Manager *m = ASSERT_PTR(userdata);
e46b13c8 295
665dfe93 296 (void) flush_fd(fd);
e46b13c8
ZJS
297
298 m->have_ask_password = have_ask_password();
299 if (m->have_ask_password < 0)
300 /* Log error but continue. Negative have_ask_password
301 * is treated as unknown status. */
c33b3297 302 log_error_errno(m->have_ask_password, "Failed to list /run/systemd/ask-password: %m");
e46b13c8
ZJS
303
304 return 0;
305}
306
307static void manager_close_ask_password(Manager *m) {
308 assert(m);
309
5dcadb4c 310 m->ask_password_event_source = sd_event_source_disable_unref(m->ask_password_event_source);
90990e28 311 m->ask_password_inotify_fd = safe_close(m->ask_password_inotify_fd);
e46b13c8
ZJS
312 m->have_ask_password = -EINVAL;
313}
314
315static int manager_check_ask_password(Manager *m) {
316 int r;
317
318 assert(m);
319
320 if (!m->ask_password_event_source) {
321 assert(m->ask_password_inotify_fd < 0);
322
17e4b070 323 (void) mkdir_p_label("/run/systemd/ask-password", 0755);
e46b13c8
ZJS
324
325 m->ask_password_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
4a62c710 326 if (m->ask_password_inotify_fd < 0)
330b8fb3 327 return log_error_errno(errno, "Failed to create inotify object: %m");
e46b13c8 328
27c3112d
FB
329 r = inotify_add_watch_and_warn(m->ask_password_inotify_fd,
330 "/run/systemd/ask-password",
331 IN_CREATE|IN_DELETE|IN_MOVE);
332 if (r < 0) {
e46b13c8 333 manager_close_ask_password(m);
27c3112d 334 return r;
e46b13c8
ZJS
335 }
336
337 r = sd_event_add_io(m->event, &m->ask_password_event_source,
338 m->ask_password_inotify_fd, EPOLLIN,
339 manager_dispatch_ask_password_fd, m);
340 if (r < 0) {
df3d3bdf 341 log_error_errno(r, "Failed to add event source for /run/systemd/ask-password: %m");
e46b13c8 342 manager_close_ask_password(m);
df3d3bdf 343 return r;
e46b13c8
ZJS
344 }
345
7dfbe2e3
TG
346 (void) sd_event_source_set_description(m->ask_password_event_source, "manager-ask-password");
347
e46b13c8
ZJS
348 /* Queries might have been added meanwhile... */
349 manager_dispatch_ask_password_fd(m->ask_password_event_source,
350 m->ask_password_inotify_fd, EPOLLIN, m);
351 }
352
353 return m->have_ask_password;
354}
355
31a7eb86 356static int manager_watch_idle_pipe(Manager *m) {
31a7eb86
ZJS
357 int r;
358
718db961
LP
359 assert(m);
360
361 if (m->idle_pipe_event_source)
31a7eb86
ZJS
362 return 0;
363
364 if (m->idle_pipe[2] < 0)
365 return 0;
366
151b9b96 367 r = sd_event_add_io(m->event, &m->idle_pipe_event_source, m->idle_pipe[2], EPOLLIN, manager_dispatch_idle_pipe_fd, m);
23bbb0de
MS
368 if (r < 0)
369 return log_error_errno(r, "Failed to watch idle pipe: %m");
31a7eb86 370
7dfbe2e3
TG
371 (void) sd_event_source_set_description(m->idle_pipe_event_source, "manager-idle-pipe");
372
31a7eb86 373 return 0;
31a7eb86
ZJS
374}
375
718db961
LP
376static void manager_close_idle_pipe(Manager *m) {
377 assert(m);
31a7eb86 378
5dcadb4c 379 m->idle_pipe_event_source = sd_event_source_disable_unref(m->idle_pipe_event_source);
cd72bd8a 380
3d94f76c
LP
381 safe_close_pair(m->idle_pipe);
382 safe_close_pair(m->idle_pipe + 2);
31a7eb86
ZJS
383}
384
8742514c 385static int manager_setup_time_change(Manager *m) {
718db961 386 int r;
b92bea5d 387
718db961 388 assert(m);
8742514c 389
638cece4 390 if (MANAGER_IS_TEST_RUN(m))
0d8c31ff
ZJS
391 return 0;
392
5dcadb4c 393 m->time_change_event_source = sd_event_source_disable_unref(m->time_change_event_source);
7feedd18 394
ec75e8e0 395 r = event_add_time_change(m->event, &m->time_change_event_source, manager_dispatch_time_change_fd, m);
23bbb0de
MS
396 if (r < 0)
397 return log_error_errno(r, "Failed to create time change event source: %m");
8742514c 398
a5cc7e5a
LP
399 /* Schedule this slightly earlier than the .timer event sources */
400 r = sd_event_source_set_priority(m->time_change_event_source, SD_EVENT_PRIORITY_NORMAL-1);
401 if (r < 0)
402 return log_error_errno(r, "Failed to set priority of time change event sources: %m");
403
8742514c
LP
404 log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd.");
405
406 return 0;
407}
408
bbf5fd8e
LP
409static int manager_read_timezone_stat(Manager *m) {
410 struct stat st;
411 bool changed;
412
413 assert(m);
414
415 /* Read the current stat() data of /etc/localtime so that we detect changes */
416 if (lstat("/etc/localtime", &st) < 0) {
417 log_debug_errno(errno, "Failed to stat /etc/localtime, ignoring: %m");
418 changed = m->etc_localtime_accessible;
419 m->etc_localtime_accessible = false;
420 } else {
421 usec_t k;
422
423 k = timespec_load(&st.st_mtim);
424 changed = !m->etc_localtime_accessible || k != m->etc_localtime_mtime;
425
426 m->etc_localtime_mtime = k;
427 m->etc_localtime_accessible = true;
428 }
429
430 return changed;
431}
432
433static int manager_setup_timezone_change(Manager *m) {
a5cc7e5a 434 _cleanup_(sd_event_source_unrefp) sd_event_source *new_event = NULL;
bbf5fd8e
LP
435 int r;
436
437 assert(m);
438
638cece4 439 if (MANAGER_IS_TEST_RUN(m))
bbf5fd8e
LP
440 return 0;
441
442 /* We watch /etc/localtime for three events: change of the link count (which might mean removal from /etc even
443 * though another link might be kept), renames, and file close operations after writing. Note we don't bother
444 * with IN_DELETE_SELF, as that would just report when the inode is removed entirely, i.e. after the link count
445 * went to zero and all fds to it are closed.
446 *
447 * Note that we never follow symlinks here. This is a simplification, but should cover almost all cases
448 * correctly.
449 *
450 * Note that we create the new event source first here, before releasing the old one. This should optimize
451 * behaviour as this way sd-event can reuse the old watch in case the inode didn't change. */
452
453 r = sd_event_add_inotify(m->event, &new_event, "/etc/localtime",
454 IN_ATTRIB|IN_MOVE_SELF|IN_CLOSE_WRITE|IN_DONT_FOLLOW, manager_dispatch_timezone_change, m);
0cb21d8c
LP
455 if (r == -ENOENT) {
456 /* If the file doesn't exist yet, subscribe to /etc instead, and wait until it is created either by
457 * O_CREATE or by rename() */
458
459 log_debug_errno(r, "/etc/localtime doesn't exist yet, watching /etc instead.");
bbf5fd8e
LP
460 r = sd_event_add_inotify(m->event, &new_event, "/etc",
461 IN_CREATE|IN_MOVED_TO|IN_ONLYDIR, manager_dispatch_timezone_change, m);
0cb21d8c 462 }
bbf5fd8e
LP
463 if (r < 0)
464 return log_error_errno(r, "Failed to create timezone change event source: %m");
465
a5cc7e5a
LP
466 /* Schedule this slightly earlier than the .timer event sources */
467 r = sd_event_source_set_priority(new_event, SD_EVENT_PRIORITY_NORMAL-1);
468 if (r < 0)
469 return log_error_errno(r, "Failed to set priority of timezone change event sources: %m");
470
bbf5fd8e 471 sd_event_source_unref(m->timezone_change_event_source);
a5cc7e5a 472 m->timezone_change_event_source = TAKE_PTR(new_event);
bbf5fd8e
LP
473
474 return 0;
475}
476
80876c20 477static int enable_special_signals(Manager *m) {
254d1313 478 _cleanup_close_ int fd = -EBADF;
80876c20
LP
479
480 assert(m);
481
638cece4 482 if (MANAGER_IS_TEST_RUN(m))
37453b3a
EV
483 return 0;
484
a41b539e 485 /* Enable that we get SIGINT on control-alt-del. In containers
c9999773
LP
486 * this will fail with EPERM (older) or EINVAL (newer), so
487 * ignore that. */
4c701096 488 if (reboot(RB_DISABLE_CAD) < 0 && !IN_SET(errno, EPERM, EINVAL))
56f64d95 489 log_warning_errno(errno, "Failed to enable ctrl-alt-del handling: %m");
80876c20 490
a41b539e
LP
491 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
492 if (fd < 0) {
493 /* Support systems without virtual console */
494 if (fd != -ENOENT)
56f64d95 495 log_warning_errno(errno, "Failed to open /dev/tty0: %m");
a41b539e 496 } else {
80876c20
LP
497 /* Enable that we get SIGWINCH on kbrequest */
498 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
56f64d95 499 log_warning_errno(errno, "Failed to enable kbrequest handling: %m");
80876c20
LP
500 }
501
502 return 0;
503}
504
8750ac02
ZJS
505#define RTSIG_IF_AVAILABLE(signum) (signum <= SIGRTMAX ? signum : -1)
506
ce578209 507static int manager_setup_signals(Manager *m) {
b92bea5d
ZJS
508 struct sigaction sa = {
509 .sa_handler = SIG_DFL,
510 .sa_flags = SA_NOCLDSTOP|SA_RESTART,
511 };
718db961
LP
512 sigset_t mask;
513 int r;
60918275 514
ce578209
LP
515 assert(m);
516
57c0c30e
LP
517 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
518
4dffec14
LP
519 /* We make liberal use of realtime signals here. On
520 * Linux/glibc we have 30 of them (with the exception of Linux
521 * on hppa, see below), between SIGRTMIN+0 ... SIGRTMIN+30
522 * (aka SIGRTMAX). */
7d793605 523
4dffec14 524 assert_se(sigemptyset(&mask) == 0);
7d793605
LP
525 sigset_add_many(&mask,
526 SIGCHLD, /* Child died */
527 SIGTERM, /* Reexecute daemon */
528 SIGHUP, /* Reload configuration */
54d04cd1 529 SIGUSR1, /* systemd: reconnect to D-Bus */
7d793605
LP
530 SIGUSR2, /* systemd: dump status */
531 SIGINT, /* Kernel sends us this on control-alt-del */
532 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
533 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
4dffec14 534
7d793605 535 SIGRTMIN+0, /* systemd: start default.target */
0003d1ab 536 SIGRTMIN+1, /* systemd: isolate rescue.target */
7d793605
LP
537 SIGRTMIN+2, /* systemd: isolate emergency.target */
538 SIGRTMIN+3, /* systemd: start halt.target */
539 SIGRTMIN+4, /* systemd: start poweroff.target */
540 SIGRTMIN+5, /* systemd: start reboot.target */
0003d1ab 541 SIGRTMIN+6, /* systemd: start kexec.target */
13ffc607 542 SIGRTMIN+7, /* systemd: start soft-reboot.target */
4dffec14
LP
543
544 /* ... space for more special targets ... */
545
0003d1ab
LP
546 SIGRTMIN+13, /* systemd: Immediate halt */
547 SIGRTMIN+14, /* systemd: Immediate poweroff */
548 SIGRTMIN+15, /* systemd: Immediate reboot */
549 SIGRTMIN+16, /* systemd: Immediate kexec */
13ffc607 550 SIGRTMIN+17, /* systemd: Immediate soft-reboot */
29e6b0c1
LP
551 SIGRTMIN+18, /* systemd: control command */
552
553 /* ... space ... */
4dffec14 554
0658666b
LP
555 SIGRTMIN+20, /* systemd: enable status messages */
556 SIGRTMIN+21, /* systemd: disable status messages */
253ee27a
LP
557 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
558 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
600b704e 559 SIGRTMIN+24, /* systemd: Immediate exit (--user only) */
463aef23 560 SIGRTMIN+25, /* systemd: reexecute manager */
4dffec14 561
8750ac02
ZJS
562 /* Apparently Linux on hppa had fewer RT signals until v3.18,
563 * SIGRTMAX was SIGRTMIN+25, and then SIGRTMIN was lowered,
564 * see commit v3.17-7614-g1f25df2eff.
565 *
566 * We cannot unconditionally make use of those signals here,
567 * so let's use a runtime check. Since these commands are
568 * accessible by different means and only really a safety
569 * net, the missing functionality on hppa shouldn't matter.
570 */
571
572 RTSIG_IF_AVAILABLE(SIGRTMIN+26), /* systemd: set log target to journal-or-kmsg */
573 RTSIG_IF_AVAILABLE(SIGRTMIN+27), /* systemd: set log target to console */
574 RTSIG_IF_AVAILABLE(SIGRTMIN+28), /* systemd: set log target to kmsg */
575 RTSIG_IF_AVAILABLE(SIGRTMIN+29), /* systemd: set log target to syslog-or-kmsg (obsolete) */
4dffec14
LP
576
577 /* ... one free signal here SIGRTMIN+30 ... */
7d793605 578 -1);
ce578209
LP
579 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
580
718db961
LP
581 m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
582 if (m->signal_fd < 0)
ce578209
LP
583 return -errno;
584
151b9b96 585 r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m);
718db961
LP
586 if (r < 0)
587 return r;
ce578209 588
7dfbe2e3
TG
589 (void) sd_event_source_set_description(m->signal_event_source, "manager-signal");
590
d8fdc620
LP
591 /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the
592 * notify processing can still figure out to which process/service a message belongs, before we reap the
593 * process. Also, process this before handling cgroup notifications, so that we always collect child exit
594 * status information before detecting that there's no process in a cgroup. */
595 r = sd_event_source_set_priority(m->signal_event_source, SD_EVENT_PRIORITY_NORMAL-6);
29083707
LP
596 if (r < 0)
597 return r;
598
463d0d15 599 if (MANAGER_IS_SYSTEM(m))
80876c20 600 return enable_special_signals(m);
e1414003 601
ce578209
LP
602 return 0;
603}
604
1ad6e8b3 605static char** sanitize_environment(char **l) {
f069efb4 606
47cf8ff2 607 /* Let's remove some environment variables that we need ourselves to communicate with our clients */
f069efb4 608 strv_env_unset_many(
1ad6e8b3 609 l,
8047ac8f
LP
610 "CACHE_DIRECTORY",
611 "CONFIGURATION_DIRECTORY",
bb0c0d6f 612 "CREDENTIALS_DIRECTORY",
47cf8ff2
LP
613 "EXIT_CODE",
614 "EXIT_STATUS",
615 "INVOCATION_ID",
616 "JOURNAL_STREAM",
617 "LISTEN_FDNAMES",
618 "LISTEN_FDS",
619 "LISTEN_PID",
8047ac8f 620 "LOGS_DIRECTORY",
c792a2e5 621 "LOG_NAMESPACE",
f069efb4
LP
622 "MAINPID",
623 "MANAGERPID",
6bb00842
LP
624 "MEMORY_PRESSURE_WATCH",
625 "MEMORY_PRESSURE_WRITE",
c792a2e5
LP
626 "MONITOR_EXIT_CODE",
627 "MONITOR_EXIT_STATUS",
628 "MONITOR_INVOCATION_ID",
629 "MONITOR_SERVICE_RESULT",
630 "MONITOR_UNIT",
47cf8ff2 631 "NOTIFY_SOCKET",
dcf3c3c3 632 "PIDFILE",
47cf8ff2
LP
633 "REMOTE_ADDR",
634 "REMOTE_PORT",
8047ac8f 635 "RUNTIME_DIRECTORY",
47cf8ff2 636 "SERVICE_RESULT",
8047ac8f 637 "STATE_DIRECTORY",
c792a2e5
LP
638 "SYSTEMD_EXEC_PID",
639 "TRIGGER_PATH",
640 "TRIGGER_TIMER_MONOTONIC_USEC",
641 "TRIGGER_TIMER_REALTIME_USEC",
642 "TRIGGER_UNIT",
f069efb4
LP
643 "WATCHDOG_PID",
644 "WATCHDOG_USEC",
645 NULL);
47cf8ff2
LP
646
647 /* Let's order the environment alphabetically, just to make it pretty */
305757d8 648 return strv_sort(l);
f069efb4
LP
649}
650
79a224c4 651int manager_default_environment(Manager *m) {
db11487d
ZJS
652 int r;
653
71ecc858
LP
654 assert(m);
655
1ad6e8b3
LP
656 m->transient_environment = strv_free(m->transient_environment);
657
463d0d15 658 if (MANAGER_IS_SYSTEM(m)) {
c792a2e5
LP
659 /* The system manager always starts with a clean environment for its children. It does not
660 * import the kernel's or the parents' exported variables.
e21fea24 661 *
c792a2e5
LP
662 * The initial passed environment is untouched to keep /proc/self/environ valid; it is used
663 * for tagging the init process inside containers. */
1ad6e8b3 664 m->transient_environment = strv_new("PATH=" DEFAULT_PATH);
db11487d
ZJS
665 if (!m->transient_environment)
666 return log_oom();
e21fea24
KS
667
668 /* Import locale variables LC_*= from configuration */
1ad6e8b3 669 (void) locale_setup(&m->transient_environment);
db11487d 670 } else {
aaf057c4 671 /* The user manager passes its own environment along to its children, except for $PATH. */
1ad6e8b3 672 m->transient_environment = strv_copy(environ);
db11487d
ZJS
673 if (!m->transient_environment)
674 return log_oom();
675
aaf057c4 676 r = strv_env_replace_strdup(&m->transient_environment, "PATH=" DEFAULT_USER_PATH);
db11487d
ZJS
677 if (r < 0)
678 return log_oom();
db11487d 679 }
8b55b8c4 680
1ad6e8b3 681 sanitize_environment(m->transient_environment);
e21fea24 682 return 0;
71ecc858
LP
683}
684
3536f49e
YW
685static int manager_setup_prefix(Manager *m) {
686 struct table_entry {
687 uint64_t type;
688 const char *suffix;
689 };
690
72fd1768 691 static const struct table_entry paths_system[_EXEC_DIRECTORY_TYPE_MAX] = {
9978e631
ZJS
692 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_SYSTEM_RUNTIME, NULL },
693 [EXEC_DIRECTORY_STATE] = { SD_PATH_SYSTEM_STATE_PRIVATE, NULL },
694 [EXEC_DIRECTORY_CACHE] = { SD_PATH_SYSTEM_STATE_CACHE, NULL },
695 [EXEC_DIRECTORY_LOGS] = { SD_PATH_SYSTEM_STATE_LOGS, NULL },
3536f49e
YW
696 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_SYSTEM_CONFIGURATION, NULL },
697 };
698
72fd1768 699 static const struct table_entry paths_user[_EXEC_DIRECTORY_TYPE_MAX] = {
9978e631 700 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_USER_RUNTIME, NULL },
f9c91932 701 [EXEC_DIRECTORY_STATE] = { SD_PATH_USER_STATE_PRIVATE, NULL },
9978e631 702 [EXEC_DIRECTORY_CACHE] = { SD_PATH_USER_STATE_CACHE, NULL },
f9c91932 703 [EXEC_DIRECTORY_LOGS] = { SD_PATH_USER_STATE_PRIVATE, "log" },
9978e631 704 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_USER_CONFIGURATION, NULL },
3536f49e
YW
705 };
706
3536f49e
YW
707 assert(m);
708
24b45970
ZJS
709 const struct table_entry *p = MANAGER_IS_SYSTEM(m) ? paths_system : paths_user;
710 int r;
3536f49e 711
24b45970 712 for (ExecDirectoryType i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) {
51327bcc 713 r = sd_path_lookup(p[i].type, p[i].suffix, &m->prefix[i]);
3536f49e 714 if (r < 0)
998df7ce
ZJS
715 return log_warning_errno(r, "Failed to lookup %s path: %m",
716 exec_directory_type_to_string(i));
3536f49e
YW
717 }
718
719 return 0;
720}
721
e8630e69
ZJS
722static void manager_free_unit_name_maps(Manager *m) {
723 m->unit_id_map = hashmap_free(m->unit_id_map);
724 m->unit_name_map = hashmap_free(m->unit_name_map);
3fb2326f 725 m->unit_path_cache = set_free(m->unit_path_cache);
c2911d48 726 m->unit_cache_timestamp_hash = 0;
e8630e69
ZJS
727}
728
279d81dd
LP
729static int manager_setup_run_queue(Manager *m) {
730 int r;
731
732 assert(m);
733 assert(!m->run_queue_event_source);
734
735 r = sd_event_add_defer(m->event, &m->run_queue_event_source, manager_dispatch_run_queue, m);
736 if (r < 0)
737 return r;
738
739 r = sd_event_source_set_priority(m->run_queue_event_source, SD_EVENT_PRIORITY_IDLE);
740 if (r < 0)
741 return r;
742
743 r = sd_event_source_set_enabled(m->run_queue_event_source, SD_EVENT_OFF);
744 if (r < 0)
745 return r;
746
747 (void) sd_event_source_set_description(m->run_queue_event_source, "manager-run-queue");
748
749 return 0;
750}
751
575b300b
LP
752static int manager_setup_sigchld_event_source(Manager *m) {
753 int r;
754
755 assert(m);
756 assert(!m->sigchld_event_source);
757
758 r = sd_event_add_defer(m->event, &m->sigchld_event_source, manager_dispatch_sigchld, m);
759 if (r < 0)
760 return r;
761
762 r = sd_event_source_set_priority(m->sigchld_event_source, SD_EVENT_PRIORITY_NORMAL-7);
763 if (r < 0)
764 return r;
765
766 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
767 if (r < 0)
768 return r;
769
770 (void) sd_event_source_set_description(m->sigchld_event_source, "manager-sigchld");
771
772 return 0;
773}
774
29e6b0c1
LP
775int manager_setup_memory_pressure_event_source(Manager *m) {
776 int r;
777
778 assert(m);
779
780 m->memory_pressure_event_source = sd_event_source_disable_unref(m->memory_pressure_event_source);
781
782 r = sd_event_add_memory_pressure(m->event, &m->memory_pressure_event_source, NULL, NULL);
783 if (r < 0)
784 log_full_errno(ERRNO_IS_NOT_SUPPORTED(r) || ERRNO_IS_PRIVILEGE(r) || (r == -EHOSTDOWN) ? LOG_DEBUG : LOG_NOTICE, r,
785 "Failed to establish memory pressure event source, ignoring: %m");
c9e120e0 786 else if (m->defaults.memory_pressure_threshold_usec != USEC_INFINITY) {
6bb00842
LP
787
788 /* If there's a default memory pressure threshold set, also apply it to the service manager itself */
789 r = sd_event_source_set_memory_pressure_period(
790 m->memory_pressure_event_source,
c9e120e0 791 m->defaults.memory_pressure_threshold_usec,
6bb00842
LP
792 MEMORY_PRESSURE_DEFAULT_WINDOW_USEC);
793 if (r < 0)
794 log_warning_errno(r, "Failed to adjust memory pressure threshold, ignoring: %m");
795 }
29e6b0c1
LP
796
797 return 0;
798}
799
2ad591a3
LP
800static int manager_find_credentials_dirs(Manager *m) {
801 const char *e;
802 int r;
803
804 assert(m);
805
806 r = get_credentials_dir(&e);
807 if (r < 0) {
808 if (r != -ENXIO)
809 log_debug_errno(r, "Failed to determine credentials directory, ignoring: %m");
810 } else {
811 m->received_credentials_directory = strdup(e);
812 if (!m->received_credentials_directory)
813 return -ENOMEM;
814 }
815
816 r = get_encrypted_credentials_dir(&e);
817 if (r < 0) {
818 if (r != -ENXIO)
819 log_debug_errno(r, "Failed to determine encrypted credentials directory, ignoring: %m");
820 } else {
821 m->received_encrypted_credentials_directory = strdup(e);
822 if (!m->received_encrypted_credentials_directory)
823 return -ENOMEM;
824 }
825
826 return 0;
827}
828
d35fe8c0 829void manager_set_switching_root(Manager *m, bool switching_root) {
7fa49280
LP
830 assert(m);
831
d35fe8c0
FB
832 m->switching_root = MANAGER_IS_SYSTEM(m) && switching_root;
833}
834
cc156539
DDM
835double manager_get_progress(Manager *m) {
836 assert(m);
837
838 if (MANAGER_IS_FINISHED(m) || m->n_installed_jobs == 0)
839 return 1.0;
840
841 return 1.0 - ((double) hashmap_size(m->jobs) / (double) m->n_installed_jobs);
842}
843
a8157796
LP
844static int compare_job_priority(const void *a, const void *b) {
845 const Job *x = a, *y = b;
846
847 return unit_compare_priority(x->unit, y->unit);
848}
849
4870133b 850int manager_new(RuntimeScope runtime_scope, ManagerTestRunFlags test_run_flags, Manager **_m) {
c70cac54 851 _cleanup_(manager_freep) Manager *m = NULL;
e3dd987c 852 int r;
8e274523
LP
853
854 assert(_m);
4870133b 855 assert(IN_SET(runtime_scope, RUNTIME_SCOPE_SYSTEM, RUNTIME_SCOPE_USER));
ce578209 856
3ad228ce 857 m = new(Manager, 1);
915b3753 858 if (!m)
8e274523 859 return -ENOMEM;
60918275 860
3ad228ce 861 *m = (Manager) {
4870133b 862 .runtime_scope = runtime_scope,
3ad228ce
LP
863 .objective = _MANAGER_OBJECTIVE_INVALID,
864
36cf4507
ZJS
865 .status_unit_format = STATUS_UNIT_FORMAT_DEFAULT,
866
3ad228ce
LP
867 .original_log_level = -1,
868 .original_log_target = _LOG_TARGET_INVALID,
869
986935cf
FB
870 .watchdog_overridden[WATCHDOG_RUNTIME] = USEC_INFINITY,
871 .watchdog_overridden[WATCHDOG_REBOOT] = USEC_INFINITY,
872 .watchdog_overridden[WATCHDOG_KEXEC] = USEC_INFINITY,
5717062e 873 .watchdog_overridden[WATCHDOG_PRETIMEOUT] = USEC_INFINITY,
986935cf 874
44a41954
FB
875 .show_status_overridden = _SHOW_STATUS_INVALID,
876
254d1313
ZJS
877 .notify_fd = -EBADF,
878 .cgroups_agent_fd = -EBADF,
879 .signal_fd = -EBADF,
71136404 880 .user_lookup_fds = EBADF_PAIR,
254d1313
ZJS
881 .private_listen_fd = -EBADF,
882 .dev_autofs_fd = -EBADF,
883 .cgroup_inotify_fd = -EBADF,
884 .pin_cgroupfs_fd = -EBADF,
885 .ask_password_inotify_fd = -EBADF,
886 .idle_pipe = { -EBADF, -EBADF, -EBADF, -EBADF},
3ad228ce
LP
887
888 /* start as id #1, so that we can leave #0 around as "null-like" value */
889 .current_job_id = 1,
890
891 .have_ask_password = -EINVAL, /* we don't know */
892 .first_boot = -1,
893 .test_run_flags = test_run_flags,
afcfaa69 894
fed25720 895 .dump_ratelimit = (const RateLimit) { .interval = 10 * USEC_PER_MINUTE, .burst = 10 },
bb5232b6
LB
896
897 .executor_fd = -EBADF,
3ad228ce 898 };
80876c20 899
ea09a416
LP
900 unit_defaults_init(&m->defaults, runtime_scope);
901
349cc4a5 902#if ENABLE_EFI
463d0d15 903 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0)
9f9f0342
LP
904 boot_timestamps(m->timestamps + MANAGER_TIMESTAMP_USERSPACE,
905 m->timestamps + MANAGER_TIMESTAMP_FIRMWARE,
906 m->timestamps + MANAGER_TIMESTAMP_LOADER);
463d0d15
LP
907#endif
908
f2341e0a 909 /* Prepare log fields we can use for structured logging */
463d0d15
LP
910 if (MANAGER_IS_SYSTEM(m)) {
911 m->unit_log_field = "UNIT=";
912 m->unit_log_format_string = "UNIT=%s";
4b58153d
LP
913
914 m->invocation_log_field = "INVOCATION_ID=";
f1c50bec 915 m->invocation_log_format_string = "INVOCATION_ID=%s";
463d0d15
LP
916 } else {
917 m->unit_log_field = "USER_UNIT=";
918 m->unit_log_format_string = "USER_UNIT=%s";
4b58153d
LP
919
920 m->invocation_log_field = "USER_INVOCATION_ID=";
f1c50bec 921 m->invocation_log_format_string = "USER_INVOCATION_ID=%s";
463d0d15 922 }
f2341e0a 923
2e5c94b9 924 /* Reboot immediately if the user hits C-A-D more often than 7x per 2s */
7d1e61ca 925 m->ctrl_alt_del_ratelimit = (const RateLimit) { .interval = 2 * USEC_PER_SEC, .burst = 7 };
2e5c94b9 926
e21fea24
KS
927 r = manager_default_environment(m);
928 if (r < 0)
c70cac54 929 return r;
1137a57c 930
d5099efc 931 r = hashmap_ensure_allocated(&m->units, &string_hash_ops);
718db961 932 if (r < 0)
c70cac54 933 return r;
60918275 934
548f6937 935 r = hashmap_ensure_allocated(&m->cgroup_unit, &path_hash_ops);
718db961 936 if (r < 0)
c70cac54 937 return r;
9152c765 938
d5099efc 939 r = hashmap_ensure_allocated(&m->watch_bus, &string_hash_ops);
718db961 940 if (r < 0)
c70cac54 941 return r;
05e343b7 942
da8e1782
MO
943 r = prioq_ensure_allocated(&m->run_queue, compare_job_priority);
944 if (r < 0)
945 return r;
946
e8112e67 947 r = manager_setup_prefix(m);
718db961 948 if (r < 0)
c70cac54 949 return r;
8742514c 950
2ad591a3
LP
951 r = manager_find_credentials_dirs(m);
952 if (r < 0)
953 return r;
bb0c0d6f 954
e8112e67 955 r = sd_event_default(&m->event);
8742514c 956 if (r < 0)
c70cac54 957 return r;
9152c765 958
e8112e67 959 r = manager_setup_run_queue(m);
a1d32bac 960 if (r < 0)
c70cac54 961 return r;
8e274523 962
5dd2f5ff 963 if (FLAGS_SET(test_run_flags, MANAGER_TEST_RUN_MINIMAL)) {
e8112e67
ZJS
964 m->cgroup_root = strdup("");
965 if (!m->cgroup_root)
966 return -ENOMEM;
967 } else {
968 r = manager_setup_signals(m);
969 if (r < 0)
970 return r;
8c47c732 971
e8112e67
ZJS
972 r = manager_setup_cgroup(m);
973 if (r < 0)
974 return r;
575b300b 975
e8112e67
ZJS
976 r = manager_setup_time_change(m);
977 if (r < 0)
978 return r;
9670d583 979
bbf5fd8e
LP
980 r = manager_read_timezone_stat(m);
981 if (r < 0)
982 return r;
983
ea5c5f68 984 (void) manager_setup_timezone_change(m);
bbf5fd8e 985
e8112e67
ZJS
986 r = manager_setup_sigchld_event_source(m);
987 if (r < 0)
988 return r;
b1994387 989
29e6b0c1
LP
990 r = manager_setup_memory_pressure_event_source(m);
991 if (r < 0)
992 return r;
993
b1994387 994#if HAVE_LIBBPF
ba187c9c 995 if (MANAGER_IS_SYSTEM(m) && lsm_bpf_supported(/* initialize = */ true)) {
b1994387
ILG
996 r = lsm_bpf_setup(m);
997 if (r < 0)
299d9417 998 log_warning_errno(r, "Failed to setup LSM BPF, ignoring: %m");
b1994387
ILG
999 }
1000#endif
e8112e67 1001 }
e27fe688 1002
2f8c48b6
AZ
1003 if (test_run_flags == 0) {
1004 if (MANAGER_IS_SYSTEM(m))
1005 r = mkdir_label("/run/systemd/units", 0755);
1006 else {
1007 _cleanup_free_ char *units_path = NULL;
1008 r = xdg_user_runtime_dir(&units_path, "/systemd/units");
1009 if (r < 0)
1010 return r;
1011 r = mkdir_p_label(units_path, 0755);
1012 }
1013
d3070fbd 1014 if (r < 0 && r != -EEXIST)
c70cac54 1015 return r;
bb5232b6
LB
1016
1017 m->executor_fd = open(SYSTEMD_EXECUTOR_BINARY_PATH, O_CLOEXEC|O_PATH);
1018 if (m->executor_fd < 0)
1019 return log_warning_errno(errno,
1020 "Failed to open executor binary '%s': %m",
1021 SYSTEMD_EXECUTOR_BINARY_PATH);
1022 } else if (!FLAGS_SET(test_run_flags, MANAGER_TEST_DONT_OPEN_EXECUTOR)) {
1023 _cleanup_free_ char *self_exe = NULL, *executor_path = NULL;
1024 _cleanup_close_ int self_dir_fd = -EBADF;
1025 int level = LOG_DEBUG;
1026
1027 /* Prefer sd-executor from the same directory as the test, e.g.: when running unit tests from the
1028 * build directory. Fallback to working directory and then the installation path. */
1029 r = readlink_and_make_absolute("/proc/self/exe", &self_exe);
1030 if (r < 0)
1031 return r;
1032
1033 self_dir_fd = open_parent(self_exe, O_CLOEXEC|O_DIRECTORY, 0);
1034 if (self_dir_fd < 0)
1035 return -errno;
1036
1037 m->executor_fd = openat(self_dir_fd, "systemd-executor", O_CLOEXEC|O_PATH);
1038 if (m->executor_fd < 0 && errno == ENOENT)
1039 m->executor_fd = openat(AT_FDCWD, "systemd-executor", O_CLOEXEC|O_PATH);
1040 if (m->executor_fd < 0 && errno == ENOENT) {
1041 m->executor_fd = open(SYSTEMD_EXECUTOR_BINARY_PATH, O_CLOEXEC|O_PATH);
1042 level = LOG_WARNING; /* Tests should normally use local builds */
1043 }
1044 if (m->executor_fd < 0)
1045 return -errno;
1046
1047 r = fd_get_path(m->executor_fd, &executor_path);
1048 if (r < 0)
1049 return r;
1050
1051 log_full(level, "Using systemd-executor binary from '%s'", executor_path);
d3070fbd
LP
1052 }
1053
232f6754
ZJS
1054 /* Note that we do not set up the notify fd here. We do that after deserialization,
1055 * since they might have gotten serialized across the reexec. */
3536f49e 1056
1cc6c93a
YW
1057 *_m = TAKE_PTR(m);
1058
8e274523 1059 return 0;
60918275
LP
1060}
1061
d86f9d52 1062static int manager_setup_notify(Manager *m) {
7181dbdb 1063 int r;
d86f9d52 1064
638cece4 1065 if (MANAGER_IS_TEST_RUN(m))
0d8c31ff
ZJS
1066 return 0;
1067
d86f9d52 1068 if (m->notify_fd < 0) {
254d1313 1069 _cleanup_close_ int fd = -EBADF;
f36a9d59
ZJS
1070 union sockaddr_union sa;
1071 socklen_t sa_len;
d86f9d52
LP
1072
1073 /* First free all secondary fields */
a1e58e8e 1074 m->notify_socket = mfree(m->notify_socket);
5dcadb4c 1075 m->notify_event_source = sd_event_source_disable_unref(m->notify_event_source);
d86f9d52
LP
1076
1077 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
4a62c710
MS
1078 if (fd < 0)
1079 return log_error_errno(errno, "Failed to allocate notification socket: %m");
d86f9d52 1080
28e7e934 1081 fd_increase_rxbuf(fd, NOTIFY_RCVBUF_SIZE);
a47806fa 1082
b910cc72 1083 m->notify_socket = path_join(m->prefix[EXEC_DIRECTORY_RUNTIME], "systemd/notify");
498e87d6
LP
1084 if (!m->notify_socket)
1085 return log_oom();
1086
f36a9d59
ZJS
1087 r = sockaddr_un_set_path(&sa.un, m->notify_socket);
1088 if (r < 0)
1089 return log_error_errno(r, "Notify socket '%s' not valid for AF_UNIX socket address, refusing.",
1090 m->notify_socket);
1091 sa_len = r;
15a3e96f 1092
498e87d6 1093 (void) mkdir_parents_label(m->notify_socket, 0755);
fbda85b0 1094 (void) sockaddr_un_unlink(&sa.un);
7181dbdb 1095
45ae2f72 1096 r = mac_selinux_bind(fd, &sa.sa, sa_len);
4a62c710 1097 if (r < 0)
45ae2f72 1098 return log_error_errno(r, "bind(%s) failed: %m", m->notify_socket);
d86f9d52 1099
2ff48e98 1100 r = setsockopt_int(fd, SOL_SOCKET, SO_PASSCRED, true);
4a62c710 1101 if (r < 0)
2ff48e98 1102 return log_error_errno(r, "SO_PASSCRED failed: %m");
d86f9d52 1103
c10d6bdb 1104 m->notify_fd = TAKE_FD(fd);
d86f9d52
LP
1105
1106 log_debug("Using notification socket %s", m->notify_socket);
1107 }
1108
1109 if (!m->notify_event_source) {
151b9b96 1110 r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLIN, manager_dispatch_notify_fd, m);
895b3a7b
MS
1111 if (r < 0)
1112 return log_error_errno(r, "Failed to allocate notify event source: %m");
d86f9d52 1113
d8fdc620
LP
1114 /* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which
1115 * service an exit message belongs. */
575b300b 1116 r = sd_event_source_set_priority(m->notify_event_source, SD_EVENT_PRIORITY_NORMAL-8);
23bbb0de
MS
1117 if (r < 0)
1118 return log_error_errno(r, "Failed to set priority of notify event source: %m");
7dfbe2e3
TG
1119
1120 (void) sd_event_source_set_description(m->notify_event_source, "manager-notify");
d86f9d52
LP
1121 }
1122
1123 return 0;
1124}
1125
d8fdc620
LP
1126static int manager_setup_cgroups_agent(Manager *m) {
1127
1128 static const union sockaddr_union sa = {
1129 .un.sun_family = AF_UNIX,
1130 .un.sun_path = "/run/systemd/cgroups-agent",
1131 };
1132 int r;
1133
1134 /* This creates a listening socket we receive cgroups agent messages on. We do not use D-Bus for delivering
1135 * these messages from the cgroups agent binary to PID 1, as the cgroups agent binary is very short-living, and
1136 * each instance of it needs a new D-Bus connection. Since D-Bus connections are SOCK_STREAM/AF_UNIX, on
1137 * overloaded systems the backlog of the D-Bus socket becomes relevant, as not more than the configured number
1138 * of D-Bus connections may be queued until the kernel will start dropping further incoming connections,
1139 * possibly resulting in lost cgroups agent messages. To avoid this, we'll use a private SOCK_DGRAM/AF_UNIX
1140 * socket, where no backlog is relevant as communication may take place without an actual connect() cycle, and
1141 * we thus won't lose messages.
1142 *
1143 * Note that PID 1 will forward the agent message to system bus, so that the user systemd instance may listen
1144 * to it. The system instance hence listens on this special socket, but the user instances listen on the system
1145 * bus for these messages. */
1146
638cece4 1147 if (MANAGER_IS_TEST_RUN(m))
d8fdc620
LP
1148 return 0;
1149
1150 if (!MANAGER_IS_SYSTEM(m))
1151 return 0;
1152
c22800e4 1153 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
b4cccbc1
LP
1154 if (r < 0)
1155 return log_error_errno(r, "Failed to determine whether unified cgroups hierarchy is used: %m");
1156 if (r > 0) /* We don't need this anymore on the unified hierarchy */
d8fdc620
LP
1157 return 0;
1158
1159 if (m->cgroups_agent_fd < 0) {
254d1313 1160 _cleanup_close_ int fd = -EBADF;
d8fdc620
LP
1161
1162 /* First free all secondary fields */
5dcadb4c 1163 m->cgroups_agent_event_source = sd_event_source_disable_unref(m->cgroups_agent_event_source);
d8fdc620
LP
1164
1165 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
1166 if (fd < 0)
1167 return log_error_errno(errno, "Failed to allocate cgroups agent socket: %m");
1168
28e7e934 1169 fd_increase_rxbuf(fd, CGROUPS_AGENT_RCVBUF_SIZE);
d8fdc620 1170
155b6876 1171 (void) sockaddr_un_unlink(&sa.un);
d8fdc620
LP
1172
1173 /* Only allow root to connect to this socket */
2053593f 1174 WITH_UMASK(0077)
fc2fffe7 1175 r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
d8fdc620
LP
1176 if (r < 0)
1177 return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
1178
0b7e8b32 1179 m->cgroups_agent_fd = TAKE_FD(fd);
d8fdc620
LP
1180 }
1181
1182 if (!m->cgroups_agent_event_source) {
1183 r = sd_event_add_io(m->event, &m->cgroups_agent_event_source, m->cgroups_agent_fd, EPOLLIN, manager_dispatch_cgroups_agent_fd, m);
1184 if (r < 0)
1185 return log_error_errno(r, "Failed to allocate cgroups agent event source: %m");
1186
cbe83389
LP
1187 /* Process cgroups notifications early. Note that when the agent notification is received
1188 * we'll just enqueue the unit in the cgroup empty queue, hence pick a high priority than
1189 * that. Also see handling of cgroup inotify for the unified cgroup stuff. */
1190 r = sd_event_source_set_priority(m->cgroups_agent_event_source, SD_EVENT_PRIORITY_NORMAL-9);
d8fdc620
LP
1191 if (r < 0)
1192 return log_error_errno(r, "Failed to set priority of cgroups agent event source: %m");
1193
1194 (void) sd_event_source_set_description(m->cgroups_agent_event_source, "manager-cgroups-agent");
1195 }
1196
1197 return 0;
1198}
1199
00d9ef85
LP
1200static int manager_setup_user_lookup_fd(Manager *m) {
1201 int r;
1202
1203 assert(m);
1204
1205 /* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID
1206 * 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation,
1207 * and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked
1208 * off processes right before executing the binaries to start. In order to be able to clean up any IPC objects
1209 * created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes,
1210 * hence we establish this communication channel so that forked off processes can pass their UID/GID
1211 * information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple
1212 * datagram, along with their unit name, so that we can share one communication socket pair among all units for
1213 * this purpose.
1214 *
1215 * You might wonder why we need a communication channel for this that is independent of the usual notification
1216 * socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET
1217 * channel is only accepted if it originates from the right unit and if reception was enabled for it. The user
1218 * lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available.
1219 *
1220 * Note that this function is called under two circumstances: when we first initialize (in which case we
1221 * allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload
1222 * (in which case the socket pair already exists but we still need to allocate the event source for it). */
1223
1224 if (m->user_lookup_fds[0] < 0) {
1225
1226 /* Free all secondary fields */
1227 safe_close_pair(m->user_lookup_fds);
5dcadb4c 1228 m->user_lookup_event_source = sd_event_source_disable_unref(m->user_lookup_event_source);
00d9ef85
LP
1229
1230 if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0)
1231 return log_error_errno(errno, "Failed to allocate user lookup socket: %m");
1232
28e7e934 1233 (void) fd_increase_rxbuf(m->user_lookup_fds[0], NOTIFY_RCVBUF_SIZE);
00d9ef85
LP
1234 }
1235
1236 if (!m->user_lookup_event_source) {
1237 r = sd_event_add_io(m->event, &m->user_lookup_event_source, m->user_lookup_fds[0], EPOLLIN, manager_dispatch_user_lookup_fd, m);
1238 if (r < 0)
1239 return log_error_errno(errno, "Failed to allocate user lookup event source: %m");
1240
1241 /* Process even earlier than the notify event source, so that we always know first about valid UID/GID
1242 * resolutions */
67ae4e8d 1243 r = sd_event_source_set_priority(m->user_lookup_event_source, SD_EVENT_PRIORITY_NORMAL-11);
00d9ef85 1244 if (r < 0)
5238e957 1245 return log_error_errno(errno, "Failed to set priority of user lookup event source: %m");
00d9ef85
LP
1246
1247 (void) sd_event_source_set_description(m->user_lookup_event_source, "user-lookup");
1248 }
1249
1250 return 0;
1251}
1252
23a177ef 1253static unsigned manager_dispatch_cleanup_queue(Manager *m) {
595ed347 1254 Unit *u;
23a177ef
LP
1255 unsigned n = 0;
1256
1257 assert(m);
1258
595ed347
MS
1259 while ((u = m->cleanup_queue)) {
1260 assert(u->in_cleanup_queue);
23a177ef 1261
595ed347 1262 unit_free(u);
23a177ef
LP
1263 n++;
1264 }
1265
1266 return n;
1267}
1268
6ac62d61
LP
1269static unsigned manager_dispatch_release_resources_queue(Manager *m) {
1270 unsigned n = 0;
1271 Unit *u;
1272
1273 assert(m);
1274
52e3671b 1275 while ((u = LIST_POP(release_resources_queue, m->release_resources_queue))) {
6ac62d61 1276 assert(u->in_release_resources_queue);
6ac62d61
LP
1277 u->in_release_resources_queue = false;
1278
1279 n++;
1280
1281 unit_release_resources(u);
1282 }
1283
1284 return n;
1285}
1286
eced69b3 1287enum {
35b8ca3a 1288 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
eced69b3
LP
1289 GC_OFFSET_UNSURE, /* No clue */
1290 GC_OFFSET_GOOD, /* We still need this unit */
1291 GC_OFFSET_BAD, /* We don't need this unit anymore */
1292 _GC_OFFSET_MAX
1293};
1294
00d9ef85 1295static void unit_gc_mark_good(Unit *u, unsigned gc_marker) {
4892084f
LN
1296 Unit *other;
1297
1298 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
1299
1300 /* Recursively mark referenced units as GOOD as well */
15ed3c3a 1301 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCES)
4892084f
LN
1302 if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE)
1303 unit_gc_mark_good(other, gc_marker);
1304}
1305
eced69b3 1306static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
701cc384 1307 Unit *other;
eced69b3 1308 bool is_bad;
701cc384
LP
1309
1310 assert(u);
1311
4c701096
YW
1312 if (IN_SET(u->gc_marker - gc_marker,
1313 GC_OFFSET_GOOD, GC_OFFSET_BAD, GC_OFFSET_UNSURE, GC_OFFSET_IN_PATH))
701cc384
LP
1314 return;
1315
ac155bb8 1316 if (u->in_cleanup_queue)
701cc384
LP
1317 goto bad;
1318
f2f725e5 1319 if (!unit_may_gc(u))
701cc384
LP
1320 goto good;
1321
ac155bb8 1322 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
eced69b3
LP
1323
1324 is_bad = true;
1325
15ed3c3a 1326 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCED_BY) {
701cc384
LP
1327 unit_gc_sweep(other, gc_marker);
1328
ac155bb8 1329 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
701cc384 1330 goto good;
eced69b3 1331
ac155bb8 1332 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
eced69b3 1333 is_bad = false;
701cc384
LP
1334 }
1335
ef3fc326
ZJS
1336 LIST_FOREACH(refs_by_target, ref, u->refs_by_target) {
1337 unit_gc_sweep(ref->source, gc_marker);
2641f02e 1338
ef3fc326
ZJS
1339 if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD)
1340 goto good;
2641f02e 1341
ef3fc326
ZJS
1342 if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD)
1343 is_bad = false;
2641f02e 1344 }
701cc384 1345
eced69b3
LP
1346 if (is_bad)
1347 goto bad;
1348
1349 /* We were unable to find anything out about this entry, so
1350 * let's investigate it later */
ac155bb8 1351 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
eced69b3
LP
1352 unit_add_to_gc_queue(u);
1353 return;
1354
701cc384 1355bad:
eced69b3
LP
1356 /* We definitely know that this one is not useful anymore, so
1357 * let's mark it for deletion */
ac155bb8 1358 u->gc_marker = gc_marker + GC_OFFSET_BAD;
eced69b3 1359 unit_add_to_cleanup_queue(u);
701cc384
LP
1360 return;
1361
1362good:
4892084f 1363 unit_gc_mark_good(u, gc_marker);
701cc384
LP
1364}
1365
c5a97ed1
LP
1366static unsigned manager_dispatch_gc_unit_queue(Manager *m) {
1367 unsigned n = 0, gc_marker;
595ed347 1368 Unit *u;
701cc384
LP
1369
1370 assert(m);
1371
cf1265e1 1372 /* log_debug("Running GC..."); */
701cc384 1373
eced69b3
LP
1374 m->gc_marker += _GC_OFFSET_MAX;
1375 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
c9c0cadb 1376 m->gc_marker = 1;
701cc384 1377
eced69b3
LP
1378 gc_marker = m->gc_marker;
1379
52e3671b 1380 while ((u = LIST_POP(gc_queue, m->gc_unit_queue))) {
595ed347 1381 assert(u->in_gc_queue);
701cc384 1382
595ed347 1383 unit_gc_sweep(u, gc_marker);
eced69b3 1384
595ed347 1385 u->in_gc_queue = false;
701cc384
LP
1386
1387 n++;
1388
4c701096
YW
1389 if (IN_SET(u->gc_marker - gc_marker,
1390 GC_OFFSET_BAD, GC_OFFSET_UNSURE)) {
cc3bc3e6 1391 if (u->id)
f2341e0a 1392 log_unit_debug(u, "Collecting.");
595ed347
MS
1393 u->gc_marker = gc_marker + GC_OFFSET_BAD;
1394 unit_add_to_cleanup_queue(u);
701cc384
LP
1395 }
1396 }
1397
701cc384
LP
1398 return n;
1399}
1400
c5a97ed1
LP
1401static unsigned manager_dispatch_gc_job_queue(Manager *m) {
1402 unsigned n = 0;
1403 Job *j;
1404
1405 assert(m);
1406
52e3671b 1407 while ((j = LIST_POP(gc_queue, m->gc_job_queue))) {
c5a97ed1 1408 assert(j->in_gc_queue);
c5a97ed1
LP
1409 j->in_gc_queue = false;
1410
1411 n++;
1412
2ab3050f 1413 if (!job_may_gc(j))
c5a97ed1
LP
1414 continue;
1415
1416 log_unit_debug(j->unit, "Collecting job.");
1417 (void) job_finish_and_invalidate(j, JOB_COLLECTED, false, false);
1418 }
1419
1420 return n;
1421}
1422
7223d500
LB
1423static int manager_ratelimit_requeue(sd_event_source *s, uint64_t usec, void *userdata) {
1424 Unit *u = userdata;
1425
1426 assert(u);
1427 assert(s == u->auto_start_stop_event_source);
1428
1429 u->auto_start_stop_event_source = sd_event_source_unref(u->auto_start_stop_event_source);
1430
1431 /* Re-queue to all queues, if the rate limit hit we might have been throttled on any of them. */
1432 unit_submit_to_stop_when_unneeded_queue(u);
1433 unit_submit_to_start_when_upheld_queue(u);
1434 unit_submit_to_stop_when_bound_queue(u);
1435
1436 return 0;
1437}
1438
1439static int manager_ratelimit_check_and_queue(Unit *u) {
1440 int r;
1441
1442 assert(u);
1443
1444 if (ratelimit_below(&u->auto_start_stop_ratelimit))
1445 return 1;
1446
1447 /* Already queued, no need to requeue */
1448 if (u->auto_start_stop_event_source)
1449 return 0;
1450
1451 r = sd_event_add_time(
1452 u->manager->event,
1453 &u->auto_start_stop_event_source,
1454 CLOCK_MONOTONIC,
1455 ratelimit_end(&u->auto_start_stop_ratelimit),
1456 0,
1457 manager_ratelimit_requeue,
1458 u);
1459 if (r < 0)
1460 return log_unit_error_errno(u, r, "Failed to queue timer on event loop: %m");
1461
1462 return 0;
1463}
1464
a3c1168a
LP
1465static unsigned manager_dispatch_stop_when_unneeded_queue(Manager *m) {
1466 unsigned n = 0;
1467 Unit *u;
1468 int r;
1469
1470 assert(m);
1471
52e3671b 1472 while ((u = LIST_POP(stop_when_unneeded_queue, m->stop_when_unneeded_queue))) {
a3c1168a 1473 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
a3c1168a
LP
1474
1475 assert(u->in_stop_when_unneeded_queue);
a3c1168a
LP
1476 u->in_stop_when_unneeded_queue = false;
1477
1478 n++;
1479
1480 if (!unit_is_unneeded(u))
1481 continue;
1482
1483 log_unit_debug(u, "Unit is not needed anymore.");
1484
1485 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1486 * service being unnecessary after a while. */
1487
7223d500
LB
1488 r = manager_ratelimit_check_and_queue(u);
1489 if (r <= 0) {
1490 log_unit_warning(u,
1491 "Unit not needed anymore, but not stopping since we tried this too often recently.%s",
1492 r == 0 ? " Will retry later." : "");
a3c1168a
LP
1493 continue;
1494 }
1495
1496 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
50cbaba4 1497 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
a3c1168a
LP
1498 if (r < 0)
1499 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1500 }
1501
1502 return n;
1503}
1504
0bc488c9
LP
1505static unsigned manager_dispatch_start_when_upheld_queue(Manager *m) {
1506 unsigned n = 0;
1507 Unit *u;
1508 int r;
1509
1510 assert(m);
1511
52e3671b 1512 while ((u = LIST_POP(start_when_upheld_queue, m->start_when_upheld_queue))) {
0bc488c9
LP
1513 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1514 Unit *culprit = NULL;
1515
1516 assert(u->in_start_when_upheld_queue);
0bc488c9
LP
1517 u->in_start_when_upheld_queue = false;
1518
1519 n++;
1520
1521 if (!unit_is_upheld_by_active(u, &culprit))
1522 continue;
1523
1524 log_unit_debug(u, "Unit is started because upheld by active unit %s.", culprit->id);
1525
1526 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1527 * service being unnecessary after a while. */
1528
7223d500
LB
1529 r = manager_ratelimit_check_and_queue(u);
1530 if (r <= 0) {
1531 log_unit_warning(u,
1532 "Unit needs to be started because active unit %s upholds it, but not starting since we tried this too often recently.%s",
1533 culprit->id,
1534 r == 0 ? " Will retry later." : "");
0bc488c9
LP
1535 continue;
1536 }
1537
1538 r = manager_add_job(u->manager, JOB_START, u, JOB_FAIL, NULL, &error, NULL);
1539 if (r < 0)
1540 log_unit_warning_errno(u, r, "Failed to enqueue start job, ignoring: %s", bus_error_message(&error, r));
1541 }
1542
1543 return n;
1544}
1545
56c59592
LP
1546static unsigned manager_dispatch_stop_when_bound_queue(Manager *m) {
1547 unsigned n = 0;
1548 Unit *u;
1549 int r;
1550
1551 assert(m);
1552
52e3671b 1553 while ((u = LIST_POP(stop_when_bound_queue, m->stop_when_bound_queue))) {
56c59592
LP
1554 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1555 Unit *culprit = NULL;
1556
1557 assert(u->in_stop_when_bound_queue);
56c59592
LP
1558 u->in_stop_when_bound_queue = false;
1559
1560 n++;
1561
1562 if (!unit_is_bound_by_inactive(u, &culprit))
1563 continue;
1564
1565 log_unit_debug(u, "Unit is stopped because bound to inactive unit %s.", culprit->id);
1566
1567 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1568 * service being unnecessary after a while. */
1569
7223d500
LB
1570 r = manager_ratelimit_check_and_queue(u);
1571 if (r <= 0) {
1572 log_unit_warning(u,
1573 "Unit needs to be stopped because it is bound to inactive unit %s it, but not stopping since we tried this too often recently.%s",
1574 culprit->id,
1575 r == 0 ? " Will retry later." : "");
56c59592
LP
1576 continue;
1577 }
1578
1579 r = manager_add_job(u->manager, JOB_STOP, u, JOB_REPLACE, NULL, &error, NULL);
1580 if (r < 0)
1581 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1582 }
1583
1584 return n;
1585}
1586
a16e1123 1587static void manager_clear_jobs_and_units(Manager *m) {
a16e1123 1588 Unit *u;
60918275
LP
1589
1590 assert(m);
1591
87f0e418
LP
1592 while ((u = hashmap_first(m->units)))
1593 unit_free(u);
964e0949
LP
1594
1595 manager_dispatch_cleanup_queue(m);
1596
1597 assert(!m->load_queue);
da8e1782 1598 assert(prioq_isempty(m->run_queue));
964e0949
LP
1599 assert(!m->dbus_unit_queue);
1600 assert(!m->dbus_job_queue);
1601 assert(!m->cleanup_queue);
c5a97ed1
LP
1602 assert(!m->gc_unit_queue);
1603 assert(!m->gc_job_queue);
13e72103
MK
1604 assert(!m->cgroup_realize_queue);
1605 assert(!m->cgroup_empty_queue);
1606 assert(!m->cgroup_oom_queue);
1607 assert(!m->target_deps_queue);
a3c1168a 1608 assert(!m->stop_when_unneeded_queue);
0bc488c9 1609 assert(!m->start_when_upheld_queue);
56c59592 1610 assert(!m->stop_when_bound_queue);
cb3c6aec 1611 assert(!m->release_resources_queue);
964e0949 1612
964e0949
LP
1613 assert(hashmap_isempty(m->jobs));
1614 assert(hashmap_isempty(m->units));
9e9e2b72
MS
1615
1616 m->n_on_console = 0;
1617 m->n_running_jobs = 0;
1e75824c
MK
1618 m->n_installed_jobs = 0;
1619 m->n_failed_jobs = 0;
a16e1123
LP
1620}
1621
06d8d842 1622Manager* manager_free(Manager *m) {
06d8d842
ZJS
1623 if (!m)
1624 return NULL;
a16e1123
LP
1625
1626 manager_clear_jobs_and_units(m);
23a177ef 1627
24b45970 1628 for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++)
7824bbeb
LP
1629 if (unit_vtable[c]->shutdown)
1630 unit_vtable[c]->shutdown(m);
1631
86036b26 1632 /* Keep the cgroup hierarchy in place except when we know we are going down for good */
13ffc607 1633 manager_shutdown_cgroup(m, /* delete= */ IN_SET(m->objective, MANAGER_EXIT, MANAGER_REBOOT, MANAGER_POWEROFF, MANAGER_HALT, MANAGER_KEXEC));
8e274523 1634
07a78643 1635 lookup_paths_flush_generator(&m->lookup_paths);
5a1e9937 1636
5e8d1c9a 1637 bus_done(m);
19d22d43 1638 manager_varlink_done(m);
ea430986 1639
e76506b7
DDM
1640 exec_shared_runtime_vacuum(m);
1641 hashmap_free(m->exec_shared_runtime_by_id);
e8a565cb 1642
29206d46
LP
1643 dynamic_user_vacuum(m, false);
1644 hashmap_free(m->dynamic_users);
1645
87f0e418 1646 hashmap_free(m->units);
4b58153d 1647 hashmap_free(m->units_by_invocation_id);
60918275 1648 hashmap_free(m->jobs);
62a76913 1649 hashmap_free(m->watch_pids);
495e75ed 1650 hashmap_free(m->watch_pids_more);
05e343b7 1651 hashmap_free(m->watch_bus);
9152c765 1652
da8e1782
MO
1653 prioq_free(m->run_queue);
1654
95ae05c0 1655 set_free(m->startup_units);
f755e3b7
LP
1656 set_free(m->failed_units);
1657
718db961 1658 sd_event_source_unref(m->signal_event_source);
575b300b 1659 sd_event_source_unref(m->sigchld_event_source);
718db961 1660 sd_event_source_unref(m->notify_event_source);
d8fdc620 1661 sd_event_source_unref(m->cgroups_agent_event_source);
718db961 1662 sd_event_source_unref(m->time_change_event_source);
bbf5fd8e 1663 sd_event_source_unref(m->timezone_change_event_source);
718db961 1664 sd_event_source_unref(m->jobs_in_progress_event_source);
752b5905 1665 sd_event_source_unref(m->run_queue_event_source);
00d9ef85 1666 sd_event_source_unref(m->user_lookup_event_source);
29e6b0c1 1667 sd_event_source_unref(m->memory_pressure_event_source);
718db961 1668
03e334a1
LP
1669 safe_close(m->signal_fd);
1670 safe_close(m->notify_fd);
d8fdc620 1671 safe_close(m->cgroups_agent_fd);
00d9ef85 1672 safe_close_pair(m->user_lookup_fds);
718db961 1673
e46b13c8
ZJS
1674 manager_close_ask_password(m);
1675
718db961
LP
1676 manager_close_idle_pipe(m);
1677
1678 sd_event_unref(m->event);
60918275 1679
c952c6ec
LP
1680 free(m->notify_socket);
1681
84e3543e 1682 lookup_paths_free(&m->lookup_paths);
1ad6e8b3
LP
1683 strv_free(m->transient_environment);
1684 strv_free(m->client_environment);
036643a2 1685
4ad49000 1686 hashmap_free(m->cgroup_unit);
e8630e69 1687 manager_free_unit_name_maps(m);
33be102a 1688
664f88a7
LP
1689 free(m->switch_root);
1690 free(m->switch_root_init);
1691
c9e120e0 1692 unit_defaults_done(&m->defaults);
c93ff2e9 1693
a57f7e2c
LP
1694 assert(hashmap_isempty(m->units_requiring_mounts_for));
1695 hashmap_free(m->units_requiring_mounts_for);
1696
00d9ef85
LP
1697 hashmap_free(m->uid_refs);
1698 hashmap_free(m->gid_refs);
1699
24b45970 1700 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
35aba85a 1701 m->prefix[dt] = mfree(m->prefix[dt]);
2ad591a3
LP
1702 free(m->received_credentials_directory);
1703 free(m->received_encrypted_credentials_directory);
35aba85a 1704
aff3a9e1
LB
1705 free(m->watchdog_pretimeout_governor);
1706 free(m->watchdog_pretimeout_governor_overridden);
1707
dc7d69b3
TM
1708 m->fw_ctx = fw_ctx_free(m->fw_ctx);
1709
b1994387
ILG
1710#if BPF_FRAMEWORK
1711 lsm_bpf_destroy(m->restrict_fs);
1712#endif
1713
bb5232b6
LB
1714 safe_close(m->executor_fd);
1715
6b430fdb 1716 return mfree(m);
60918275
LP
1717}
1718
04eb582a 1719static void manager_enumerate_perpetual(Manager *m) {
04eb582a
LP
1720 assert(m);
1721
1545051c 1722 if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL))
b1d5246d
ZJS
1723 return;
1724
04eb582a 1725 /* Let's ask every type to load all units from disk/kernel that it might know */
24b45970 1726 for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) {
04eb582a
LP
1727 if (!unit_type_supported(c)) {
1728 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
1729 continue;
1730 }
1731
1732 if (unit_vtable[c]->enumerate_perpetual)
1733 unit_vtable[c]->enumerate_perpetual(m);
1734 }
1735}
1736
a1113e08 1737static void manager_enumerate(Manager *m) {
f50e0a01
LP
1738 assert(m);
1739
1545051c 1740 if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL))
b1d5246d
ZJS
1741 return;
1742
04eb582a 1743 /* Let's ask every type to load all units from disk/kernel that it might know */
24b45970 1744 for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) {
1c2e9646 1745 if (!unit_type_supported(c)) {
03afec3c 1746 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
0faacd47 1747 continue;
a57f7e2c 1748 }
f50e0a01 1749
94b01dae
ZJS
1750 if (unit_vtable[c]->enumerate)
1751 unit_vtable[c]->enumerate(m);
0faacd47
LP
1752 }
1753
f50e0a01 1754 manager_dispatch_load_queue(m);
a16e1123
LP
1755}
1756
007c6337 1757static void manager_coldplug(Manager *m) {
a16e1123
LP
1758 Unit *u;
1759 char *k;
007c6337 1760 int r;
a16e1123
LP
1761
1762 assert(m);
f50e0a01 1763
28e5e1e9 1764 log_debug("Invoking unit coldplug() handlers%s", special_glyph(SPECIAL_GLYPH_ELLIPSIS));
f0831ed2
LP
1765
1766 /* Let's place the units back into their deserialized state */
90e74a66 1767 HASHMAP_FOREACH_KEY(u, k, m->units) {
f50e0a01
LP
1768
1769 /* ignore aliases */
ac155bb8 1770 if (u->id != k)
f50e0a01
LP
1771 continue;
1772
007c6337
LP
1773 r = unit_coldplug(u);
1774 if (r < 0)
1775 log_warning_errno(r, "We couldn't coldplug %s, proceeding anyway: %m", u->id);
f50e0a01 1776 }
a16e1123
LP
1777}
1778
f0831ed2 1779static void manager_catchup(Manager *m) {
f0831ed2
LP
1780 Unit *u;
1781 char *k;
1782
1783 assert(m);
1784
28e5e1e9 1785 log_debug("Invoking unit catchup() handlers%s", special_glyph(SPECIAL_GLYPH_ELLIPSIS));
f0831ed2
LP
1786
1787 /* Let's catch up on any state changes that happened while we were reloading/reexecing */
90e74a66 1788 HASHMAP_FOREACH_KEY(u, k, m->units) {
f0831ed2
LP
1789
1790 /* ignore aliases */
1791 if (u->id != k)
1792 continue;
1793
1794 unit_catchup(u);
1795 }
1796}
1797
9ff1a6f1 1798static void manager_distribute_fds(Manager *m, FDSet *fds) {
9ff1a6f1 1799 Unit *u;
9588bc32
LP
1800
1801 assert(m);
1802
90e74a66 1803 HASHMAP_FOREACH(u, m->units) {
9588bc32
LP
1804
1805 if (fdset_size(fds) <= 0)
1806 break;
1807
9ff1a6f1
LP
1808 if (!UNIT_VTABLE(u)->distribute_fds)
1809 continue;
9588bc32 1810
9ff1a6f1
LP
1811 UNIT_VTABLE(u)->distribute_fds(u, fds);
1812 }
9588bc32
LP
1813}
1814
2b680534 1815static bool manager_dbus_is_running(Manager *m, bool deserialized) {
8559b3b7
LP
1816 Unit *u;
1817
1818 assert(m);
1819
1820 /* This checks whether the dbus instance we are supposed to expose our APIs on is up. We check both the socket
1821 * and the service unit. If the 'deserialized' parameter is true we'll check the deserialized state of the unit
1822 * rather than the current one. */
1823
638cece4 1824 if (MANAGER_IS_TEST_RUN(m))
8559b3b7
LP
1825 return false;
1826
8559b3b7
LP
1827 u = manager_get_unit(m, SPECIAL_DBUS_SOCKET);
1828 if (!u)
1829 return false;
1830 if ((deserialized ? SOCKET(u)->deserialized_state : SOCKET(u)->state) != SOCKET_RUNNING)
1831 return false;
1832
1833 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
1834 if (!u)
1835 return false;
845824ac 1836 if (!IN_SET((deserialized ? SERVICE(u)->deserialized_state : SERVICE(u)->state),
1837 SERVICE_RUNNING,
1838 SERVICE_RELOAD,
1839 SERVICE_RELOAD_NOTIFY,
1840 SERVICE_RELOAD_SIGNAL))
8559b3b7
LP
1841 return false;
1842
1843 return true;
1844}
1845
9d4c195c
LP
1846static void manager_setup_bus(Manager *m) {
1847 assert(m);
1848
1849 /* Let's set up our private bus connection now, unconditionally */
1850 (void) bus_init_private(m);
1851
1852 /* If we are in --user mode also connect to the system bus now */
1853 if (MANAGER_IS_USER(m))
1854 (void) bus_init_system(m);
1855
1856 /* Let's connect to the bus now, but only if the unit is supposed to be up */
2b680534 1857 if (manager_dbus_is_running(m, MANAGER_IS_RELOADING(m))) {
9d4c195c
LP
1858 (void) bus_init_api(m);
1859
1860 if (MANAGER_IS_SYSTEM(m))
1861 (void) bus_init_system(m);
1862 }
1863}
1864
159f1e76
LP
1865static void manager_preset_all(Manager *m) {
1866 int r;
1867
1868 assert(m);
1869
1870 if (m->first_boot <= 0)
1871 return;
1872
1873 if (!MANAGER_IS_SYSTEM(m))
1874 return;
1875
638cece4 1876 if (MANAGER_IS_TEST_RUN(m))
159f1e76
LP
1877 return;
1878
1879 /* If this is the first boot, and we are in the host system, then preset everything */
22330352
ZJS
1880 UnitFilePresetMode mode =
1881 ENABLE_FIRST_BOOT_FULL_PRESET ? UNIT_FILE_PRESET_FULL : UNIT_FILE_PRESET_ENABLE_ONLY;
93651582 1882
4870133b 1883 r = unit_file_preset_all(RUNTIME_SCOPE_SYSTEM, 0, NULL, mode, NULL, 0);
159f1e76
LP
1884 if (r < 0)
1885 log_full_errno(r == -EEXIST ? LOG_NOTICE : LOG_WARNING, r,
1886 "Failed to populate /etc with preset unit settings, ignoring: %m");
1887 else
1888 log_info("Populated /etc with preset unit settings.");
1889}
1890
5ce5e1ad
LP
1891static void manager_ready(Manager *m) {
1892 assert(m);
1893
1894 /* After having loaded everything, do the final round of catching up with what might have changed */
1895
1896 m->objective = MANAGER_OK; /* Tell everyone we are up now */
1897
1898 /* It might be safe to log to the journal now and connect to dbus */
1899 manager_recheck_journal(m);
1900 manager_recheck_dbus(m);
1901
5ce5e1ad
LP
1902 /* Let's finally catch up with any changes that took place while we were reloading/reexecing */
1903 manager_catchup(m);
c6e892bc 1904
15b9243c 1905 /* Create a file which will indicate when the manager started loading units the last time. */
4b3ad81b
LB
1906 if (MANAGER_IS_SYSTEM(m))
1907 (void) touch_file("/run/systemd/systemd-units-load", false,
1908 m->timestamps[MANAGER_TIMESTAMP_UNITS_LOAD].realtime ?: now(CLOCK_REALTIME),
1909 UID_INVALID, GID_INVALID, 0444);
5ce5e1ad
LP
1910}
1911
a01ba4b2 1912Manager* manager_reloading_start(Manager *m) {
d147e2b6 1913 m->n_reloading++;
fa5a0251 1914 dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD);
d147e2b6
ZJS
1915 return m;
1916}
49fbe940 1917
a01ba4b2 1918void manager_reloading_stopp(Manager **m) {
d147e2b6
ZJS
1919 if (*m) {
1920 assert((*m)->n_reloading > 0);
1921 (*m)->n_reloading--;
1922 }
1923}
1924
2a7cf953 1925int manager_startup(Manager *m, FILE *serialization, FDSet *fds, const char *root) {
17f01ace 1926 int r;
a16e1123
LP
1927
1928 assert(m);
1929
a1f31f47
ZJS
1930 /* If we are running in test mode, we still want to run the generators,
1931 * but we should not touch the real generator directories. */
4870133b 1932 r = lookup_paths_init_or_warn(&m->lookup_paths, m->runtime_scope,
99aad9a2
ZJS
1933 MANAGER_IS_TEST_RUN(m) ? LOOKUP_PATHS_TEMPORARY_GENERATED : 0,
1934 root);
e801700e 1935 if (r < 0)
99aad9a2 1936 return r;
5a1e9937 1937
fa5a0251 1938 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_START));
eb523bfb
LP
1939 r = manager_run_environment_generators(m);
1940 if (r >= 0)
1941 r = manager_run_generators(m);
fa5a0251 1942 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_FINISH));
07719a21
LP
1943 if (r < 0)
1944 return r;
1945
159f1e76 1946 manager_preset_all(m);
572986ca 1947
581fef8d 1948 lookup_paths_log(&m->lookup_paths);
572986ca 1949
4df7d537
ZJS
1950 {
1951 /* This block is (optionally) done with the reloading counter bumped */
d7ac0952 1952 _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL;
4df7d537 1953
45f540a2
ZJS
1954 /* Make sure we don't have a left-over from a previous run */
1955 if (!serialization)
1956 (void) rm_rf(m->lookup_paths.transient, 0);
1957
4df7d537
ZJS
1958 /* If we will deserialize make sure that during enumeration this is already known, so we increase the
1959 * counter here already */
1960 if (serialization)
1961 reloading = manager_reloading_start(m);
1962
1963 /* First, enumerate what we can from all config files */
fa5a0251 1964 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_START));
4df7d537
ZJS
1965 manager_enumerate_perpetual(m);
1966 manager_enumerate(m);
fa5a0251 1967 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_FINISH));
4df7d537
ZJS
1968
1969 /* Second, deserialize if there is something to deserialize */
1970 if (serialization) {
1971 r = manager_deserialize(m, serialization, fds);
1972 if (r < 0)
1973 return log_error_errno(r, "Deserialization failed: %m");
1974 }
01e10de3 1975
4df7d537
ZJS
1976 /* Any fds left? Find some unit which wants them. This is useful to allow container managers to pass
1977 * some file descriptors to us pre-initialized. This enables socket-based activation of entire
1978 * containers. */
1979 manager_distribute_fds(m, fds);
d86f9d52 1980
4df7d537
ZJS
1981 /* We might have deserialized the notify fd, but if we didn't then let's create the bus now */
1982 r = manager_setup_notify(m);
1983 if (r < 0)
1984 /* No sense to continue without notifications, our children would fail anyway. */
1985 return r;
d8fdc620 1986
4df7d537
ZJS
1987 r = manager_setup_cgroups_agent(m);
1988 if (r < 0)
1989 /* Likewise, no sense to continue without empty cgroup notifications. */
1990 return r;
00d9ef85 1991
4df7d537
ZJS
1992 r = manager_setup_user_lookup_fd(m);
1993 if (r < 0)
1994 /* This shouldn't fail, except if things are really broken. */
1995 return r;
8559b3b7 1996
4df7d537
ZJS
1997 /* Connect to the bus if we are good for it */
1998 manager_setup_bus(m);
e3dd987c 1999
5238e957 2000 /* Now that we are connected to all possible buses, let's deserialize who is tracking us. */
4df7d537
ZJS
2001 r = bus_track_coldplug(m, &m->subscribed, false, m->deserialized_subscribed);
2002 if (r < 0)
2003 log_warning_errno(r, "Failed to deserialized tracked clients, ignoring: %m");
2004 m->deserialized_subscribed = strv_free(m->deserialized_subscribed);
a16e1123 2005
19d22d43
LP
2006 r = manager_varlink_init(m);
2007 if (r < 0)
064a5c14 2008 log_warning_errno(r, "Failed to set up Varlink, ignoring: %m");
19d22d43 2009
4df7d537
ZJS
2010 /* Third, fire things up! */
2011 manager_coldplug(m);
3ad2afb6 2012
4df7d537
ZJS
2013 /* Clean up runtime objects */
2014 manager_vacuum(m);
71445ae7 2015
4df7d537
ZJS
2016 if (serialization)
2017 /* Let's wait for the UnitNew/JobNew messages being sent, before we notify that the
2018 * reload is finished */
2019 m->send_reloading_done = true;
9f611ad8
LP
2020 }
2021
5ce5e1ad 2022 manager_ready(m);
f0831ed2 2023
d35fe8c0
FB
2024 manager_set_switching_root(m, false);
2025
17f01ace 2026 return 0;
f50e0a01
LP
2027}
2028
50cbaba4
LP
2029int manager_add_job(
2030 Manager *m,
2031 JobType type,
2032 Unit *unit,
2033 JobMode mode,
2034 Set *affected_jobs,
2035 sd_bus_error *error,
2036 Job **ret) {
2037
c7e3eb5d 2038 _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL;
50cbaba4 2039 int r;
e5b5ae50
LP
2040
2041 assert(m);
2042 assert(type < _JOB_TYPE_MAX);
87f0e418 2043 assert(unit);
e5b5ae50 2044 assert(mode < _JOB_MODE_MAX);
60918275 2045
7358dc02 2046 if (mode == JOB_ISOLATE && type != JOB_START)
1b09b81c 2047 return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start.");
c497c7a9 2048
7358dc02 2049 if (mode == JOB_ISOLATE && !unit->allow_isolate)
1b09b81c 2050 return sd_bus_error_set(error, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
2528a7a6 2051
1f0f9f21 2052 if (mode == JOB_TRIGGERING && type != JOB_STOP)
1b09b81c 2053 return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=triggering is only valid for stop.");
1f0f9f21 2054
09d04ad3
LP
2055 if (mode == JOB_RESTART_DEPENDENCIES && type != JOB_START)
2056 return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=restart-dependencies is only valid for start.");
2057
f2341e0a 2058 log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
9f04bd52 2059
c6497ccb 2060 type = job_type_collapse(type, unit);
e0209d83 2061
23ade460 2062 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
7527cb52
MS
2063 if (!tr)
2064 return -ENOMEM;
11dd41ce 2065
b0904249
LP
2066 r = transaction_add_job_and_dependencies(
2067 tr,
2068 type,
2069 unit,
2070 /* by= */ NULL,
2071 TRANSACTION_MATTERS |
2072 (IN_SET(mode, JOB_IGNORE_DEPENDENCIES, JOB_IGNORE_REQUIREMENTS) ? TRANSACTION_IGNORE_REQUIREMENTS : 0) |
09d04ad3
LP
2073 (mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0) |
2074 (mode == JOB_RESTART_DEPENDENCIES ? TRANSACTION_PROPAGATE_START_AS_RESTART : 0),
b0904249 2075 error);
7527cb52 2076 if (r < 0)
c7e3eb5d 2077 return r;
c497c7a9 2078
7527cb52
MS
2079 if (mode == JOB_ISOLATE) {
2080 r = transaction_add_isolate_jobs(tr, m);
2081 if (r < 0)
c7e3eb5d 2082 return r;
7527cb52
MS
2083 }
2084
1f0f9f21
KK
2085 if (mode == JOB_TRIGGERING) {
2086 r = transaction_add_triggering_jobs(tr, unit);
2087 if (r < 0)
c7e3eb5d 2088 return r;
1f0f9f21
KK
2089 }
2090
50cbaba4 2091 r = transaction_activate(tr, m, mode, affected_jobs, error);
7527cb52 2092 if (r < 0)
c7e3eb5d 2093 return r;
e5b5ae50 2094
f2341e0a 2095 log_unit_debug(unit,
66870f90
ZJS
2096 "Enqueued job %s/%s as %u", unit->id,
2097 job_type_to_string(type), (unsigned) tr->anchor_job->id);
f50e0a01 2098
50cbaba4
LP
2099 if (ret)
2100 *ret = tr->anchor_job;
60918275 2101
c7e3eb5d 2102 tr = transaction_free(tr);
e5b5ae50
LP
2103 return 0;
2104}
60918275 2105
50cbaba4 2106int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, sd_bus_error *e, Job **ret) {
4440b27d 2107 Unit *unit = NULL; /* just to appease gcc, initialization is not really necessary */
28247076
LP
2108 int r;
2109
2110 assert(m);
2111 assert(type < _JOB_TYPE_MAX);
2112 assert(name);
2113 assert(mode < _JOB_MODE_MAX);
2114
c3090674
LP
2115 r = manager_load_unit(m, name, NULL, NULL, &unit);
2116 if (r < 0)
28247076 2117 return r;
4440b27d 2118 assert(unit);
28247076 2119
50cbaba4 2120 return manager_add_job(m, type, unit, mode, affected_jobs, e, ret);
53f18416
LP
2121}
2122
50cbaba4 2123int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, Job **ret) {
4afd3348 2124 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
53f18416
LP
2125 int r;
2126
2127 assert(m);
2128 assert(type < _JOB_TYPE_MAX);
2129 assert(name);
2130 assert(mode < _JOB_MODE_MAX);
2131
50cbaba4 2132 r = manager_add_job_by_name(m, type, name, mode, affected_jobs, &error, ret);
53f18416
LP
2133 if (r < 0)
2134 return log_warning_errno(r, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode), name, bus_error_message(&error, r));
2135
2136 return r;
28247076
LP
2137}
2138
15d167f8
JW
2139int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e) {
2140 int r;
32620826 2141 _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL;
15d167f8
JW
2142
2143 assert(m);
2144 assert(unit);
2145 assert(mode < _JOB_MODE_MAX);
2146 assert(mode != JOB_ISOLATE); /* Isolate is only valid for start */
2147
2148 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
2149 if (!tr)
2150 return -ENOMEM;
2151
2152 /* We need an anchor job */
b0904249 2153 r = transaction_add_job_and_dependencies(tr, JOB_NOP, unit, NULL, TRANSACTION_IGNORE_REQUIREMENTS|TRANSACTION_IGNORE_ORDER, e);
15d167f8 2154 if (r < 0)
32620826 2155 return r;
15d167f8
JW
2156
2157 /* Failure in adding individual dependencies is ignored, so this always succeeds. */
b0904249
LP
2158 transaction_add_propagate_reload_jobs(
2159 tr,
2160 unit,
2161 tr->anchor_job,
3044510d 2162 mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0);
15d167f8 2163
50cbaba4 2164 r = transaction_activate(tr, m, mode, NULL, e);
15d167f8 2165 if (r < 0)
32620826 2166 return r;
15d167f8 2167
32620826 2168 tr = transaction_free(tr);
15d167f8 2169 return 0;
15d167f8
JW
2170}
2171
60918275
LP
2172Job *manager_get_job(Manager *m, uint32_t id) {
2173 assert(m);
2174
2175 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
2176}
2177
87f0e418 2178Unit *manager_get_unit(Manager *m, const char *name) {
60918275
LP
2179 assert(m);
2180 assert(name);
2181
87f0e418 2182 return hashmap_get(m->units, name);
60918275
LP
2183}
2184
19496554
MS
2185static int manager_dispatch_target_deps_queue(Manager *m) {
2186 Unit *u;
19496554
MS
2187 int r = 0;
2188
19496554
MS
2189 assert(m);
2190
52e3671b 2191 while ((u = LIST_POP(target_deps_queue, m->target_deps_queue))) {
15ed3c3a
LP
2192 _cleanup_free_ Unit **targets = NULL;
2193 int n_targets;
2194
19496554
MS
2195 assert(u->in_target_deps_queue);
2196
19496554
MS
2197 u->in_target_deps_queue = false;
2198
15ed3c3a
LP
2199 /* Take an "atomic" snapshot of dependencies here, as the call below will likely modify the
2200 * dependencies, and we can't have it that hash tables we iterate through are modified while
2201 * we are iterating through them. */
2202 n_targets = unit_get_dependency_array(u, UNIT_ATOM_DEFAULT_TARGET_DEPENDENCIES, &targets);
2203 if (n_targets < 0)
2204 return n_targets;
19496554 2205
15ed3c3a
LP
2206 for (int i = 0; i < n_targets; i++) {
2207 r = unit_add_default_target_dependency(u, targets[i]);
2208 if (r < 0)
2209 return r;
19496554
MS
2210 }
2211 }
2212
2213 return r;
2214}
2215
c1e1601e 2216unsigned manager_dispatch_load_queue(Manager *m) {
595ed347 2217 Unit *u;
c1e1601e 2218 unsigned n = 0;
60918275
LP
2219
2220 assert(m);
2221
223dabab
LP
2222 /* Make sure we are not run recursively */
2223 if (m->dispatching_load_queue)
c1e1601e 2224 return 0;
223dabab
LP
2225
2226 m->dispatching_load_queue = true;
2227
87f0e418 2228 /* Dispatches the load queue. Takes a unit from the queue and
60918275
LP
2229 * tries to load its data until the queue is empty */
2230
595ed347
MS
2231 while ((u = m->load_queue)) {
2232 assert(u->in_load_queue);
034c6ed7 2233
595ed347 2234 unit_load(u);
c1e1601e 2235 n++;
60918275
LP
2236 }
2237
223dabab 2238 m->dispatching_load_queue = false;
19496554
MS
2239
2240 /* Dispatch the units waiting for their target dependencies to be added now, as all targets that we know about
2241 * should be loaded and have aliases resolved */
2242 (void) manager_dispatch_target_deps_queue(m);
2243
c1e1601e 2244 return n;
60918275
LP
2245}
2246
81be2388 2247bool manager_unit_cache_should_retry_load(Unit *u) {
cda66772
LB
2248 assert(u);
2249
81be2388
ZJS
2250 /* Automatic reloading from disk only applies to units which were not found sometime in the past, and
2251 * the not-found stub is kept pinned in the unit graph by dependencies. For units that were
2252 * previously loaded, we don't do automatic reloading, and daemon-reload is necessary to update. */
cda66772
LB
2253 if (u->load_state != UNIT_NOT_FOUND)
2254 return false;
2255
81be2388
ZJS
2256 /* The cache has been updated since the last time we tried to load the unit. There might be new
2257 * fragment paths to read. */
c2911d48 2258 if (u->manager->unit_cache_timestamp_hash != u->fragment_not_found_timestamp_hash)
cda66772 2259 return true;
d904afc7 2260
81be2388 2261 /* The cache needs to be updated because there are modifications on disk. */
c2911d48 2262 return !lookup_paths_timestamp_hash_same(&u->manager->lookup_paths, u->manager->unit_cache_timestamp_hash, NULL);
d904afc7
LB
2263}
2264
c2756a68
LP
2265int manager_load_unit_prepare(
2266 Manager *m,
2267 const char *name,
2268 const char *path,
718db961 2269 sd_bus_error *e,
4b6a2b3f 2270 Unit **ret) {
c2756a68 2271
4b6a2b3f 2272 _cleanup_(unit_freep) Unit *cleanup_unit = NULL;
a99626c1 2273 _cleanup_free_ char *nbuf = NULL;
60918275
LP
2274 int r;
2275
2276 assert(m);
4b6a2b3f 2277 assert(ret);
a99626c1 2278 assert(name || path);
60918275 2279
526e3cbb 2280 /* This will prepare the unit for loading, but not actually load anything from disk. */
0301abf4 2281
526e3cbb 2282 if (path && !path_is_absolute(path))
718db961 2283 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path);
9e2f7c11 2284
08f46856 2285 if (!name) {
a99626c1
LP
2286 r = path_extract_filename(path, &nbuf);
2287 if (r < 0)
2288 return r;
2289 if (r == O_DIRECTORY)
2290 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path '%s' refers to directory, refusing.", path);
2291
2292 name = nbuf;
08f46856 2293 }
9e2f7c11 2294
4b6a2b3f 2295 UnitType t = unit_name_to_type(name);
7d17cfbc 2296
5d512d54
LN
2297 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
2298 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE))
2299 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name);
2300
718db961 2301 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name);
5d512d54 2302 }
60918275 2303
4b6a2b3f
ZJS
2304 Unit *unit = manager_get_unit(m, name);
2305 if (unit) {
d904afc7
LB
2306 /* The time-based cache allows to start new units without daemon-reload,
2307 * but if they are already referenced (because of dependencies or ordering)
2308 * then we have to force a load of the fragment. As an optimization, check
2309 * first if anything in the usual paths was modified since the last time
7233e91a
LB
2310 * the cache was loaded. Also check if the last time an attempt to load the
2311 * unit was made was before the most recent cache refresh, so that we know
81be2388 2312 * we need to try again — even if the cache is current, it might have been
7233e91a
LB
2313 * updated in a different context before we had a chance to retry loading
2314 * this particular unit. */
4b6a2b3f
ZJS
2315 if (manager_unit_cache_should_retry_load(unit))
2316 unit->load_state = UNIT_STUB;
d904afc7 2317 else {
4b6a2b3f 2318 *ret = unit;
535b7fcb 2319 return 0; /* The unit was already loaded */
d904afc7
LB
2320 }
2321 } else {
4b6a2b3f
ZJS
2322 unit = cleanup_unit = unit_new(m, unit_vtable[t]->object_size);
2323 if (!unit)
d904afc7 2324 return -ENOMEM;
034c6ed7 2325 }
60918275 2326
7d17cfbc 2327 if (path) {
4b6a2b3f 2328 r = free_and_strdup(&unit->fragment_path, path);
d904afc7
LB
2329 if (r < 0)
2330 return r;
7d17cfbc 2331 }
0301abf4 2332
4b6a2b3f 2333 r = unit_add_name(unit, name);
dc409696 2334 if (r < 0)
1ffba6fe 2335 return r;
60918275 2336
4b6a2b3f
ZJS
2337 unit_add_to_load_queue(unit);
2338 unit_add_to_dbus_queue(unit);
2339 unit_add_to_gc_queue(unit);
c1e1601e 2340
4b6a2b3f
ZJS
2341 *ret = unit;
2342 TAKE_PTR(cleanup_unit);
db06e3b6 2343
535b7fcb 2344 return 1; /* The unit was added the load queue */
db06e3b6
LP
2345}
2346
c2756a68
LP
2347int manager_load_unit(
2348 Manager *m,
2349 const char *name,
2350 const char *path,
718db961 2351 sd_bus_error *e,
4b6a2b3f 2352 Unit **ret) {
db06e3b6
LP
2353 int r;
2354
2355 assert(m);
4b6a2b3f 2356 assert(ret);
db06e3b6 2357
4b6a2b3f 2358 /* This will load the unit config, but not actually start any services or anything. */
db06e3b6 2359
4b6a2b3f 2360 r = manager_load_unit_prepare(m, name, path, e, ret);
535b7fcb 2361 if (r <= 0)
db06e3b6
LP
2362 return r;
2363
535b7fcb 2364 /* Unit was newly loaded */
f50e0a01 2365 manager_dispatch_load_queue(m);
4b6a2b3f 2366 *ret = unit_follow_merge(*ret);
4109ede7
ZJS
2367 return 0;
2368}
2369
2370int manager_load_startable_unit_or_warn(
2371 Manager *m,
2372 const char *name,
2373 const char *path,
2374 Unit **ret) {
2375
2376 /* Load a unit, make sure it loaded fully and is not masked. */
2377
2378 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2379 Unit *unit;
2380 int r;
2381
2382 r = manager_load_unit(m, name, path, &error, &unit);
2383 if (r < 0)
2384 return log_error_errno(r, "Failed to load %s %s: %s",
8ace1db7 2385 name ? "unit" : "unit file", name ?: path,
4109ede7 2386 bus_error_message(&error, r));
8ace1db7
LP
2387
2388 r = bus_unit_validate_load_state(unit, &error);
2389 if (r < 0)
2390 return log_error_errno(r, "%s", bus_error_message(&error, r));
9e2f7c11 2391
4109ede7 2392 *ret = unit;
60918275
LP
2393 return 0;
2394}
a66d02c3 2395
7fad411c
LP
2396void manager_clear_jobs(Manager *m) {
2397 Job *j;
2398
2399 assert(m);
2400
7fad411c 2401 while ((j = hashmap_first(m->jobs)))
5273510e 2402 /* No need to recurse. We're cancelling all jobs. */
833f92ad 2403 job_finish_and_invalidate(j, JOB_CANCELED, false, false);
7fad411c 2404}
83c60c9f 2405
495e75ed 2406void manager_unwatch_pidref(Manager *m, PidRef *pid) {
f75f613d
FB
2407 assert(m);
2408
495e75ed
LP
2409 for (;;) {
2410 Unit *u;
f75f613d 2411
495e75ed
LP
2412 u = manager_get_unit_by_pidref_watching(m, pid);
2413 if (!u)
2414 break;
2415
2416 unit_unwatch_pidref(u, pid);
2417 }
f75f613d
FB
2418}
2419
752b5905 2420static int manager_dispatch_run_queue(sd_event_source *source, void *userdata) {
99534007 2421 Manager *m = ASSERT_PTR(userdata);
83c60c9f 2422 Job *j;
034c6ed7 2423
752b5905 2424 assert(source);
9152c765 2425
da8e1782 2426 while ((j = prioq_peek(m->run_queue))) {
ac1135be 2427 assert(j->installed);
034c6ed7
LP
2428 assert(j->in_run_queue);
2429
6e64994d 2430 (void) job_run_and_invalidate(j);
9152c765 2431 }
034c6ed7 2432
a0b64226 2433 if (m->n_running_jobs > 0)
03b717a3
MS
2434 manager_watch_jobs_in_progress(m);
2435
31a7eb86
ZJS
2436 if (m->n_on_console > 0)
2437 manager_watch_idle_pipe(m);
2438
752b5905 2439 return 1;
c1e1601e
LP
2440}
2441
b0c4b282
LP
2442void manager_trigger_run_queue(Manager *m) {
2443 int r;
2444
2445 assert(m);
2446
2447 r = sd_event_source_set_enabled(
2448 m->run_queue_event_source,
2449 prioq_isempty(m->run_queue) ? SD_EVENT_OFF : SD_EVENT_ONESHOT);
2450 if (r < 0)
2451 log_warning_errno(r, "Failed to enable job run queue event source, ignoring: %m");
2452}
2453
9588bc32 2454static unsigned manager_dispatch_dbus_queue(Manager *m) {
e0a08581 2455 unsigned n = 0, budget;
595ed347 2456 Unit *u;
e0a08581 2457 Job *j;
c1e1601e
LP
2458
2459 assert(m);
2460
b8d381c4
LP
2461 /* When we are reloading, let's not wait with generating signals, since we need to exit the manager as quickly
2462 * as we can. There's no point in throttling generation of signals in that case. */
2463 if (MANAGER_IS_RELOADING(m) || m->send_reloading_done || m->pending_reload_message)
f5fbe71d 2464 budget = UINT_MAX; /* infinite budget in this case */
b8d381c4
LP
2465 else {
2466 /* Anything to do at all? */
2467 if (!m->dbus_unit_queue && !m->dbus_job_queue)
2468 return 0;
2469
2470 /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's
2471 * sit this cycle out, and process things in a later cycle when the queues got a bit emptier. */
2472 if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD)
2473 return 0;
2474
2475 /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't
2476 * overly full before this call we shouldn't increase it in size too wildly in one step, and we
2477 * shouldn't monopolize CPU time with generating these messages. Note the difference in counting of
2478 * this "budget" and the "threshold" above: the "budget" is decreased only once per generated message,
5238e957
BB
2479 * regardless how many buses/direct connections it is enqueued on, while the "threshold" is applied to
2480 * each queued instance of bus message, i.e. if the same message is enqueued to five buses/direct
b8d381c4
LP
2481 * connections it will be counted five times. This difference in counting ("references"
2482 * vs. "instances") is primarily a result of the fact that it's easier to implement it this way,
2483 * however it also reflects the thinking that the "threshold" should put a limit on used queue memory,
2484 * i.e. space, while the "budget" should put a limit on time. Also note that the "threshold" is
2485 * currently chosen much higher than the "budget". */
2486 budget = MANAGER_BUS_MESSAGE_BUDGET;
2487 }
e0a08581 2488
b8d381c4 2489 while (budget != 0 && (u = m->dbus_unit_queue)) {
e0a08581 2490
595ed347 2491 assert(u->in_dbus_queue);
c1e1601e 2492
595ed347 2493 bus_unit_send_change_signal(u);
b8d381c4
LP
2494 n++;
2495
f5fbe71d 2496 if (budget != UINT_MAX)
b8d381c4 2497 budget--;
c1e1601e
LP
2498 }
2499
b8d381c4 2500 while (budget != 0 && (j = m->dbus_job_queue)) {
c1e1601e
LP
2501 assert(j->in_dbus_queue);
2502
2503 bus_job_send_change_signal(j);
b8d381c4
LP
2504 n++;
2505
f5fbe71d 2506 if (budget != UINT_MAX)
b8d381c4 2507 budget--;
c1e1601e
LP
2508 }
2509
b8d381c4 2510 if (m->send_reloading_done) {
71445ae7 2511 m->send_reloading_done = false;
718db961 2512 bus_manager_send_reloading(m, false);
b8d381c4 2513 n++;
71445ae7
LP
2514 }
2515
b8d381c4 2516 if (m->pending_reload_message) {
209de525 2517 bus_send_pending_reload_message(m);
e0a08581
LP
2518 n++;
2519 }
718db961 2520
c1e1601e 2521 return n;
9152c765
LP
2522}
2523
d8fdc620
LP
2524static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2525 Manager *m = userdata;
60473f0c 2526 char buf[PATH_MAX];
d8fdc620
LP
2527 ssize_t n;
2528
2529 n = recv(fd, buf, sizeof(buf), 0);
2530 if (n < 0)
2531 return log_error_errno(errno, "Failed to read cgroups agent message: %m");
2532 if (n == 0) {
2533 log_error("Got zero-length cgroups agent message, ignoring.");
2534 return 0;
2535 }
2536 if ((size_t) n >= sizeof(buf)) {
2537 log_error("Got overly long cgroups agent message, ignoring.");
2538 return 0;
2539 }
2540
2541 if (memchr(buf, 0, n)) {
2542 log_error("Got cgroups agent message with embedded NUL byte, ignoring.");
2543 return 0;
2544 }
2545 buf[n] = 0;
2546
2547 manager_notify_cgroup_empty(m, buf);
d5f15326 2548 (void) bus_forward_agent_released(m, buf);
d8fdc620
LP
2549
2550 return 0;
2551}
2552
5151b4cc 2553static bool manager_process_barrier_fd(char * const *tags, FDSet *fds) {
4f07ddfa
KKD
2554
2555 /* nothing else must be sent when using BARRIER=1 */
5151b4cc 2556 if (strv_contains(tags, "BARRIER=1")) {
46276454 2557 if (strv_length(tags) != 1)
5151b4cc 2558 log_warning("Extra notification messages sent with BARRIER=1, ignoring everything.");
46276454
LP
2559 else if (fdset_size(fds) != 1)
2560 log_warning("Got incorrect number of fds with BARRIER=1, closing them.");
5151b4cc
BR
2561
2562 /* Drop the message if BARRIER=1 was found */
4f07ddfa 2563 return true;
5151b4cc 2564 }
4f07ddfa
KKD
2565
2566 return false;
2567}
2568
db256aab
LP
2569static void manager_invoke_notify_message(
2570 Manager *m,
2571 Unit *u,
2572 const struct ucred *ucred,
5151b4cc 2573 char * const *tags,
db256aab
LP
2574 FDSet *fds) {
2575
5ba6985b
LP
2576 assert(m);
2577 assert(u);
db256aab 2578 assert(ucred);
5151b4cc 2579 assert(tags);
5ba6985b 2580
62a76913 2581 if (u->notifygen == m->notifygen) /* Already invoked on this same unit in this same iteration? */
5ba6985b 2582 return;
62a76913
LP
2583 u->notifygen = m->notifygen;
2584
5151b4cc 2585 if (UNIT_VTABLE(u)->notify_message)
db256aab 2586 UNIT_VTABLE(u)->notify_message(u, ucred, tags, fds);
62a76913 2587
5151b4cc
BR
2588 else if (DEBUG_LOGGING) {
2589 _cleanup_free_ char *buf = NULL, *x = NULL, *y = NULL;
a86b7675 2590
5151b4cc
BR
2591 buf = strv_join(tags, ", ");
2592 if (buf)
2593 x = ellipsize(buf, 20, 90);
a86b7675 2594 if (x)
da5fb861
LP
2595 y = cescape(x);
2596
a86b7675
ZJS
2597 log_unit_debug(u, "Got notification message \"%s\", ignoring.", strnull(y));
2598 }
5ba6985b
LP
2599}
2600
718db961 2601static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
3d0b8a55 2602
b215b0ed 2603 _cleanup_fdset_free_ FDSet *fds = NULL;
99534007 2604 Manager *m = ASSERT_PTR(userdata);
b215b0ed
DH
2605 char buf[NOTIFY_BUFFER_MAX+1];
2606 struct iovec iovec = {
2607 .iov_base = buf,
2608 .iov_len = sizeof(buf)-1,
2609 };
fb29cdbe
LP
2610 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred)) +
2611 CMSG_SPACE(sizeof(int) * NOTIFY_FD_MAX)) control;
b215b0ed
DH
2612 struct msghdr msghdr = {
2613 .msg_iov = &iovec,
2614 .msg_iovlen = 1,
2615 .msg_control = &control,
2616 .msg_controllen = sizeof(control),
2617 };
2618
2619 struct cmsghdr *cmsg;
2620 struct ucred *ucred = NULL;
62a76913 2621 _cleanup_free_ Unit **array_copy = NULL;
5151b4cc 2622 _cleanup_strv_free_ char **tags = NULL;
62a76913 2623 Unit *u1, *u2, **array;
b215b0ed 2624 int r, *fd_array = NULL;
da6053d0 2625 size_t n_fds = 0;
62a76913 2626 bool found = false;
8c47c732
LP
2627 ssize_t n;
2628
718db961
LP
2629 assert(m->notify_fd == fd);
2630
2631 if (revents != EPOLLIN) {
2632 log_warning("Got unexpected poll event for notify fd.");
2633 return 0;
2634 }
8c47c732 2635
3691bcf3 2636 n = recvmsg_safe(m->notify_fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC);
882321a1
ZJS
2637 if (ERRNO_IS_NEG_TRANSIENT(n))
2638 return 0; /* Spurious wakeup, try again */
2639 if (n == -EXFULL) {
2640 log_warning("Got message with truncated control data (too many fds sent?), ignoring.");
2641 return 0;
8add30a0 2642 }
882321a1
ZJS
2643 if (n < 0)
2644 /* If this is any other, real error, then stop processing this socket. This of course means
2645 * we won't take notification messages anymore, but that's still better than busy looping:
2646 * being woken up over and over again, but being unable to actually read the message from the
2647 * socket. */
2648 return log_error_errno(n, "Failed to receive notification message: %m");
a354329f 2649
882321a1 2650 CMSG_FOREACH(cmsg, &msghdr)
b215b0ed 2651 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
a354329f 2652
3691bcf3 2653 assert(!fd_array);
b1d02191 2654 fd_array = CMSG_TYPED_DATA(cmsg, int);
b215b0ed 2655 n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
a354329f 2656
b215b0ed
DH
2657 } else if (cmsg->cmsg_level == SOL_SOCKET &&
2658 cmsg->cmsg_type == SCM_CREDENTIALS &&
2659 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) {
a354329f 2660
3691bcf3 2661 assert(!ucred);
b1d02191 2662 ucred = CMSG_TYPED_DATA(cmsg, struct ucred);
a354329f
LP
2663 }
2664
b215b0ed
DH
2665 if (n_fds > 0) {
2666 assert(fd_array);
a354329f 2667
b215b0ed
DH
2668 r = fdset_new_array(&fds, fd_array, n_fds);
2669 if (r < 0) {
2670 close_many(fd_array, n_fds);
9987750e
FB
2671 log_oom();
2672 return 0;
a354329f 2673 }
b215b0ed 2674 }
8c47c732 2675
15e23e8c 2676 if (!ucred || !pid_is_valid(ucred->pid)) {
b215b0ed
DH
2677 log_warning("Received notify message without valid credentials. Ignoring.");
2678 return 0;
2679 }
8c47c732 2680
045a3d59 2681 if ((size_t) n >= sizeof(buf) || (msghdr.msg_flags & MSG_TRUNC)) {
b215b0ed
DH
2682 log_warning("Received notify message exceeded maximum size. Ignoring.");
2683 return 0;
2684 }
8c47c732 2685
8f41e6b6
ZJS
2686 /* As extra safety check, let's make sure the string we get doesn't contain embedded NUL bytes.
2687 * We permit one trailing NUL byte in the message, but don't expect it. */
875ca88d
LP
2688 if (n > 1 && memchr(buf, 0, n-1)) {
2689 log_warning("Received notify message with embedded NUL bytes. Ignoring.");
2690 return 0;
2691 }
2692
8f41e6b6 2693 /* Make sure it's NUL-terminated, then parse it to obtain the tags list. */
b215b0ed 2694 buf[n] = 0;
5151b4cc
BR
2695 tags = strv_split_newlines(buf);
2696 if (!tags) {
2697 log_oom();
2698 return 0;
2699 }
8c47c732 2700
8f41e6b6 2701 /* Possibly a barrier fd, let's see. */
7b0a1267
LP
2702 if (manager_process_barrier_fd(tags, fds)) {
2703 log_debug("Received barrier notification message from PID " PID_FMT ".", ucred->pid);
4f07ddfa 2704 return 0;
7b0a1267 2705 }
4f07ddfa 2706
62a76913
LP
2707 /* Increase the generation counter used for filtering out duplicate unit invocations. */
2708 m->notifygen++;
2709
495e75ed
LP
2710 /* Generate lookup key from the PID (we have no pidfd here, after all) */
2711 PidRef pidref = PIDREF_MAKE_FROM_PID(ucred->pid);
2712
62a76913 2713 /* Notify every unit that might be interested, which might be multiple. */
495e75ed
LP
2714 u1 = manager_get_unit_by_pidref_cgroup(m, &pidref);
2715 u2 = hashmap_get(m->watch_pids, &pidref);
2716 array = hashmap_get(m->watch_pids_more, &pidref);
62a76913
LP
2717 if (array) {
2718 size_t k = 0;
5ba6985b 2719
62a76913
LP
2720 while (array[k])
2721 k++;
5ba6985b 2722
62a76913
LP
2723 array_copy = newdup(Unit*, array, k+1);
2724 if (!array_copy)
2725 log_oom();
2726 }
8f41e6b6
ZJS
2727 /* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle
2728 * duplicate units make sure we only invoke each unit's handler once. */
62a76913 2729 if (u1) {
5151b4cc 2730 manager_invoke_notify_message(m, u1, ucred, tags, fds);
62a76913
LP
2731 found = true;
2732 }
2733 if (u2) {
5151b4cc 2734 manager_invoke_notify_message(m, u2, ucred, tags, fds);
62a76913
LP
2735 found = true;
2736 }
2737 if (array_copy)
2738 for (size_t i = 0; array_copy[i]; i++) {
5151b4cc 2739 manager_invoke_notify_message(m, array_copy[i], ucred, tags, fds);
62a76913
LP
2740 found = true;
2741 }
8c47c732 2742
62a76913
LP
2743 if (!found)
2744 log_warning("Cannot find unit for notify message of PID "PID_FMT", ignoring.", ucred->pid);
a354329f 2745
b215b0ed 2746 if (fdset_size(fds) > 0)
5fd2c135 2747 log_warning("Got extra auxiliary fds with notification message, closing them.");
8c47c732
LP
2748
2749 return 0;
2750}
2751
62a76913
LP
2752static void manager_invoke_sigchld_event(
2753 Manager *m,
2754 Unit *u,
2755 const siginfo_t *si) {
36f20ae3 2756
5ba6985b
LP
2757 assert(m);
2758 assert(u);
2759 assert(si);
2760
62a76913
LP
2761 /* Already invoked the handler of this unit in this iteration? Then don't process this again */
2762 if (u->sigchldgen == m->sigchldgen)
2763 return;
2764 u->sigchldgen = m->sigchldgen;
5ba6985b 2765
62a76913 2766 log_unit_debug(u, "Child "PID_FMT" belongs to %s.", si->si_pid, u->id);
5ba6985b 2767 unit_unwatch_pid(u, si->si_pid);
e57051f5 2768
62a76913
LP
2769 if (UNIT_VTABLE(u)->sigchld_event)
2770 UNIT_VTABLE(u)->sigchld_event(u, si->si_pid, si->si_code, si->si_status);
5ba6985b
LP
2771}
2772
575b300b 2773static int manager_dispatch_sigchld(sd_event_source *source, void *userdata) {
99534007 2774 Manager *m = ASSERT_PTR(userdata);
575b300b
LP
2775 siginfo_t si = {};
2776 int r;
2777
2778 assert(source);
9152c765 2779
8f41e6b6
ZJS
2780 /* First we call waitid() for a PID and do not reap the zombie. That way we can still access
2781 * /proc/$PID for it while it is a zombie. */
9152c765 2782
575b300b 2783 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
acbb0225 2784
8afabc50
AJ
2785 if (errno != ECHILD)
2786 log_error_errno(errno, "Failed to peek for child with waitid(), ignoring: %m");
acbb0225 2787
8afabc50 2788 goto turn_off;
575b300b 2789 }
4112df16 2790
575b300b
LP
2791 if (si.si_pid <= 0)
2792 goto turn_off;
2793
2794 if (IN_SET(si.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED)) {
62a76913 2795 _cleanup_free_ Unit **array_copy = NULL;
575b300b 2796 _cleanup_free_ char *name = NULL;
62a76913 2797 Unit *u1, *u2, **array;
575b300b 2798
d7d74854 2799 (void) pid_get_comm(si.si_pid, &name);
575b300b
LP
2800
2801 log_debug("Child "PID_FMT" (%s) died (code=%s, status=%i/%s)",
2802 si.si_pid, strna(name),
2803 sigchld_code_to_string(si.si_code),
2804 si.si_status,
2805 strna(si.si_code == CLD_EXITED
2806 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2807 : signal_to_string(si.si_status)));
2808
62a76913
LP
2809 /* Increase the generation counter used for filtering out duplicate unit invocations */
2810 m->sigchldgen++;
2811
495e75ed
LP
2812 /* We look this up by a PidRef that only consists of the PID. After all we couldn't create a
2813 * pidfd here any more even if we wanted (since the process just exited). */
2814 PidRef pidref = PIDREF_MAKE_FROM_PID(si.si_pid);
2815
62a76913 2816 /* And now figure out the unit this belongs to, it might be multiple... */
495e75ed
LP
2817 u1 = manager_get_unit_by_pidref_cgroup(m, &pidref);
2818 u2 = hashmap_get(m->watch_pids, &pidref);
2819 array = hashmap_get(m->watch_pids_more, &pidref);
62a76913
LP
2820 if (array) {
2821 size_t n = 0;
2822
5238e957 2823 /* Count how many entries the array has */
62a76913
LP
2824 while (array[n])
2825 n++;
2826
2827 /* Make a copy of the array so that we don't trip up on the array changing beneath us */
2828 array_copy = newdup(Unit*, array, n+1);
2829 if (!array_copy)
2830 log_oom();
2831 }
2832
2833 /* Finally, execute them all. Note that u1, u2 and the array might contain duplicates, but
2834 * that's fine, manager_invoke_sigchld_event() will ensure we only invoke the handlers once for
2835 * each iteration. */
2ba6ae6b
ZJS
2836 if (u1) {
2837 /* We check for oom condition, in case we got SIGCHLD before the oom notification.
2838 * We only do this for the cgroup the PID belonged to. */
2839 (void) unit_check_oom(u1);
2840
288bd406 2841 /* We check if systemd-oomd performed a kill so that we log and notify appropriately */
fe8d22fb
AZ
2842 (void) unit_check_oomd_kill(u1);
2843
62a76913 2844 manager_invoke_sigchld_event(m, u1, &si);
2ba6ae6b 2845 }
62a76913
LP
2846 if (u2)
2847 manager_invoke_sigchld_event(m, u2, &si);
2848 if (array_copy)
2849 for (size_t i = 0; array_copy[i]; i++)
2850 manager_invoke_sigchld_event(m, array_copy[i], &si);
575b300b 2851 }
9152c765 2852
575b300b
LP
2853 /* And now, we actually reap the zombie. */
2854 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2855 log_error_errno(errno, "Failed to dequeue child, ignoring: %m");
2856 return 0;
2857 }
9152c765 2858
575b300b 2859 return 0;
8c47c732 2860
575b300b
LP
2861turn_off:
2862 /* All children processed for now, turn off event source */
4112df16 2863
575b300b
LP
2864 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
2865 if (r < 0)
2866 return log_error_errno(r, "Failed to disable SIGCHLD event source: %m");
9152c765
LP
2867
2868 return 0;
2869}
2870
5f968096
ZJS
2871static void manager_start_special(Manager *m, const char *name, JobMode mode) {
2872 Job *job;
398ef8ba 2873
5f968096
ZJS
2874 if (manager_add_job_by_name_and_warn(m, JOB_START, name, mode, NULL, &job) < 0)
2875 return;
1e001f52 2876
5f968096
ZJS
2877 const char *s = unit_status_string(job->unit, NULL);
2878
2879 log_info("Activating special unit %s...", s);
3889fc6f
ZJS
2880
2881 sd_notifyf(false,
2882 "STATUS=Activating special unit %s...", s);
2883 m->status_ready = false;
28247076
LP
2884}
2885
24dd31c1
LN
2886static void manager_handle_ctrl_alt_del(Manager *m) {
2887 /* If the user presses C-A-D more than
2888 * 7 times within 2s, we reboot/shutdown immediately,
2889 * unless it was disabled in system.conf */
2890
7994ac1d 2891 if (ratelimit_below(&m->ctrl_alt_del_ratelimit) || m->cad_burst_action == EMERGENCY_ACTION_NONE)
5f968096 2892 manager_start_special(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE_IRREVERSIBLY);
ae8c7939 2893 else
7af67e9a 2894 emergency_action(m, m->cad_burst_action, EMERGENCY_ACTION_WARN, NULL, -1,
ae8c7939 2895 "Ctrl-Alt-Del was pressed more than 7 times within 2s");
24dd31c1
LN
2896}
2897
718db961 2898static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
99534007 2899 Manager *m = ASSERT_PTR(userdata);
9152c765
LP
2900 ssize_t n;
2901 struct signalfd_siginfo sfsi;
dacd6cee 2902 int r;
9152c765 2903
718db961
LP
2904 assert(m->signal_fd == fd);
2905
2906 if (revents != EPOLLIN) {
2907 log_warning("Got unexpected events from signal file descriptor.");
2908 return 0;
2909 }
9152c765 2910
575b300b 2911 n = read(m->signal_fd, &sfsi, sizeof(sfsi));
8add30a0
YW
2912 if (n < 0) {
2913 if (ERRNO_IS_TRANSIENT(errno))
575b300b 2914 return 0;
9152c765 2915
575b300b
LP
2916 /* We return an error here, which will kill this handler,
2917 * to avoid a busy loop on read error. */
2918 return log_error_errno(errno, "Reading from signal fd failed: %m");
2919 }
8add30a0 2920 if (n != sizeof(sfsi)) {
c0f86d66 2921 log_warning("Truncated read from signal fd (%zi bytes), ignoring!", n);
8add30a0
YW
2922 return 0;
2923 }
9152c765 2924
575b300b
LP
2925 log_received_signal(sfsi.ssi_signo == SIGCHLD ||
2926 (sfsi.ssi_signo == SIGTERM && MANAGER_IS_USER(m))
2927 ? LOG_DEBUG : LOG_INFO,
2928 &sfsi);
1e001f52 2929
575b300b 2930 switch (sfsi.ssi_signo) {
b9cd2ec1 2931
575b300b
LP
2932 case SIGCHLD:
2933 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
2934 if (r < 0)
8afabc50 2935 log_warning_errno(r, "Failed to enable SIGCHLD event source, ignoring: %m");
b9cd2ec1 2936
575b300b 2937 break;
84e9af1e 2938
575b300b
LP
2939 case SIGTERM:
2940 if (MANAGER_IS_SYSTEM(m)) {
ba0c7754 2941 /* This is for compatibility with the original sysvinit */
c52b19d6
LP
2942 if (verify_run_space_and_log("Refusing to reexecute") < 0)
2943 break;
2944
2945 m->objective = MANAGER_REEXECUTE;
a1b256b0 2946 break;
575b300b 2947 }
84e9af1e 2948
575b300b
LP
2949 _fallthrough_;
2950 case SIGINT:
2951 if (MANAGER_IS_SYSTEM(m))
2952 manager_handle_ctrl_alt_del(m);
2953 else
5f968096 2954 manager_start_special(m, SPECIAL_EXIT_TARGET, JOB_REPLACE_IRREVERSIBLY);
575b300b 2955 break;
84e9af1e 2956
575b300b 2957 case SIGWINCH:
ba0c7754 2958 /* This is a nop on non-init */
575b300b 2959 if (MANAGER_IS_SYSTEM(m))
5f968096 2960 manager_start_special(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
84e9af1e 2961
575b300b 2962 break;
84e9af1e 2963
575b300b 2964 case SIGPWR:
ba0c7754 2965 /* This is a nop on non-init */
575b300b 2966 if (MANAGER_IS_SYSTEM(m))
5f968096 2967 manager_start_special(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
6632c602 2968
575b300b 2969 break;
57ee42ce 2970
8559b3b7 2971 case SIGUSR1:
2b680534 2972 if (manager_dbus_is_running(m, false)) {
575b300b 2973 log_info("Trying to reconnect to bus...");
575b300b 2974
8559b3b7
LP
2975 (void) bus_init_api(m);
2976
2977 if (MANAGER_IS_SYSTEM(m))
2978 (void) bus_init_system(m);
5f968096
ZJS
2979 } else
2980 manager_start_special(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
57ee42ce 2981
575b300b 2982 break;
575b300b
LP
2983
2984 case SIGUSR2: {
2985 _cleanup_free_ char *dump = NULL;
2986
d1d8786c 2987 r = manager_get_dump_string(m, /* patterns= */ NULL, &dump);
575b300b
LP
2988 if (r < 0) {
2989 log_warning_errno(errno, "Failed to acquire manager dump: %m");
57ee42ce
LP
2990 break;
2991 }
2992
575b300b
LP
2993 log_dump(LOG_INFO, dump);
2994 break;
2995 }
2149e37c 2996
575b300b 2997 case SIGHUP:
c52b19d6
LP
2998 if (verify_run_space_and_log("Refusing to reload") < 0)
2999 break;
3000
3001 m->objective = MANAGER_RELOAD;
575b300b
LP
3002 break;
3003
3004 default: {
3005
3006 /* Starting SIGRTMIN+0 */
3007 static const struct {
3008 const char *target;
3009 JobMode mode;
3010 } target_table[] = {
13ffc607
LP
3011 [0] = { SPECIAL_DEFAULT_TARGET, JOB_ISOLATE },
3012 [1] = { SPECIAL_RESCUE_TARGET, JOB_ISOLATE },
3013 [2] = { SPECIAL_EMERGENCY_TARGET, JOB_ISOLATE },
3014 [3] = { SPECIAL_HALT_TARGET, JOB_REPLACE_IRREVERSIBLY },
3015 [4] = { SPECIAL_POWEROFF_TARGET, JOB_REPLACE_IRREVERSIBLY },
3016 [5] = { SPECIAL_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY },
3017 [6] = { SPECIAL_KEXEC_TARGET, JOB_REPLACE_IRREVERSIBLY },
3018 [7] = { SPECIAL_SOFT_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY },
575b300b
LP
3019 };
3020
3021 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
af41e508 3022 static const ManagerObjective objective_table[] = {
575b300b
LP
3023 [0] = MANAGER_HALT,
3024 [1] = MANAGER_POWEROFF,
3025 [2] = MANAGER_REBOOT,
3026 [3] = MANAGER_KEXEC,
13ffc607 3027 [4] = MANAGER_SOFT_REBOOT,
575b300b 3028 };
b2cdc666 3029
575b300b
LP
3030 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
3031 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
3032 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
5f968096 3033 manager_start_special(m, target_table[idx].target, target_table[idx].mode);
1005d14f 3034 break;
2149e37c 3035 }
1005d14f 3036
575b300b 3037 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
af41e508
LP
3038 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(objective_table)) {
3039 m->objective = objective_table[sfsi.ssi_signo - SIGRTMIN - 13];
575b300b
LP
3040 break;
3041 }
3042
3043 switch (sfsi.ssi_signo - SIGRTMIN) {
3044
29e6b0c1
LP
3045 case 18: {
3046 bool generic = false;
3047
3048 if (sfsi.ssi_code != SI_QUEUE)
3049 generic = true;
3050 else {
3051 /* Override a few select commands by our own PID1-specific logic */
3052
3053 switch (sfsi.ssi_int) {
3054
3055 case _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE..._COMMON_SIGNAL_COMMAND_LOG_LEVEL_END:
3056 manager_override_log_level(m, sfsi.ssi_int - _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE);
3057 break;
3058
3059 case COMMON_SIGNAL_COMMAND_CONSOLE:
3060 manager_override_log_target(m, LOG_TARGET_CONSOLE);
3061 break;
3062
3063 case COMMON_SIGNAL_COMMAND_JOURNAL:
3064 manager_override_log_target(m, LOG_TARGET_JOURNAL);
3065 break;
3066
3067 case COMMON_SIGNAL_COMMAND_KMSG:
3068 manager_override_log_target(m, LOG_TARGET_KMSG);
3069 break;
3070
3071 case COMMON_SIGNAL_COMMAND_NULL:
3072 manager_override_log_target(m, LOG_TARGET_NULL);
3073 break;
3074
0112c37c
LB
3075 case MANAGER_SIGNAL_COMMAND_DUMP_JOBS: {
3076 _cleanup_free_ char *dump_jobs = NULL;
3077
3078 r = manager_get_dump_jobs_string(m, /* patterns= */ NULL, " ", &dump_jobs);
3079 if (r < 0) {
3080 log_warning_errno(errno, "Failed to acquire manager jobs dump: %m");
3081 break;
3082 }
3083
3084 log_dump(LOG_INFO, dump_jobs);
3085 break;
3086 }
3087
29e6b0c1
LP
3088 default:
3089 generic = true;
3090 }
3091 }
3092
3093 if (generic)
3094 return sigrtmin18_handler(source, &sfsi, NULL);
3095
3096 break;
3097 }
3098
575b300b 3099 case 20:
43bba15a 3100 manager_override_show_status(m, SHOW_STATUS_YES, "signal");
a16e1123
LP
3101 break;
3102
575b300b 3103 case 21:
43bba15a 3104 manager_override_show_status(m, SHOW_STATUS_NO, "signal");
575b300b 3105 break;
7d793605 3106
575b300b 3107 case 22:
a6ecbf83 3108 manager_override_log_level(m, LOG_DEBUG);
575b300b
LP
3109 break;
3110
3111 case 23:
a6ecbf83 3112 manager_restore_original_log_level(m);
575b300b 3113 break;
0003d1ab 3114
575b300b
LP
3115 case 24:
3116 if (MANAGER_IS_USER(m)) {
af41e508 3117 m->objective = MANAGER_EXIT;
575b300b 3118 return 0;
0658666b 3119 }
9152c765 3120
575b300b
LP
3121 /* This is a nop on init */
3122 break;
3123
463aef23
FB
3124 case 25:
3125 m->objective = MANAGER_REEXECUTE;
3126 break;
3127
575b300b
LP
3128 case 26:
3129 case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */
bda7d78b 3130 manager_restore_original_log_target(m);
575b300b
LP
3131 break;
3132
3133 case 27:
bda7d78b 3134 manager_override_log_target(m, LOG_TARGET_CONSOLE);
575b300b
LP
3135 break;
3136
3137 case 28:
bda7d78b 3138 manager_override_log_target(m, LOG_TARGET_KMSG);
575b300b
LP
3139 break;
3140
3141 default:
3142 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
3143 }
3144 }}
034c6ed7
LP
3145
3146 return 0;
3147}
3148
718db961 3149static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
99534007 3150 Manager *m = ASSERT_PTR(userdata);
718db961 3151 Unit *u;
034c6ed7 3152
a80c1575 3153 log_struct(LOG_DEBUG,
2b044526 3154 "MESSAGE_ID=" SD_MESSAGE_TIME_CHANGE_STR,
a1230ff9 3155 LOG_MESSAGE("Time has been changed"));
034c6ed7 3156
718db961 3157 /* Restart the watch */
7feedd18 3158 (void) manager_setup_time_change(m);
4e434314 3159
90e74a66 3160 HASHMAP_FOREACH(u, m->units)
718db961
LP
3161 if (UNIT_VTABLE(u)->time_change)
3162 UNIT_VTABLE(u)->time_change(u);
ea430986 3163
718db961
LP
3164 return 0;
3165}
ea430986 3166
bbf5fd8e
LP
3167static int manager_dispatch_timezone_change(
3168 sd_event_source *source,
3169 const struct inotify_event *e,
3170 void *userdata) {
3171
99534007 3172 Manager *m = ASSERT_PTR(userdata);
bbf5fd8e 3173 int changed;
bbf5fd8e
LP
3174 Unit *u;
3175
bbf5fd8e
LP
3176 log_debug("inotify event for /etc/localtime");
3177
3178 changed = manager_read_timezone_stat(m);
f20db199 3179 if (changed <= 0)
bbf5fd8e 3180 return changed;
bbf5fd8e
LP
3181
3182 /* Something changed, restart the watch, to ensure we watch the new /etc/localtime if it changed */
3183 (void) manager_setup_timezone_change(m);
3184
3185 /* Read the new timezone */
3186 tzset();
3187
3188 log_debug("Timezone has been changed (now: %s).", tzname[daylight]);
3189
90e74a66 3190 HASHMAP_FOREACH(u, m->units)
bbf5fd8e
LP
3191 if (UNIT_VTABLE(u)->timezone_change)
3192 UNIT_VTABLE(u)->timezone_change(u);
3193
3194 return 0;
3195}
3196
718db961 3197static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
99534007 3198 Manager *m = ASSERT_PTR(userdata);
8742514c 3199
718db961 3200 assert(m->idle_pipe[2] == fd);
8742514c 3201
8f41e6b6
ZJS
3202 /* There's at least one Type=idle child that just gave up on us waiting for the boot process to
3203 * complete. Let's now turn off any further console output if there's at least one service that needs
3204 * console access, so that from now on our own output should not spill into that service's output
3205 * anymore. After all, we support Type=idle only to beautify console output and it generally is set
3206 * on services that want to own the console exclusively without our interference. */
718db961 3207 m->no_console_output = m->n_on_console > 0;
03b717a3 3208
30fd9a2d 3209 /* Acknowledge the child's request, and let all other children know too that they shouldn't wait
8f41e6b6 3210 * any longer by closing the pipes towards them, which is what they are waiting for. */
718db961 3211 manager_close_idle_pipe(m);
03b717a3 3212
718db961
LP
3213 return 0;
3214}
31a7eb86 3215
718db961 3216static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata) {
99534007 3217 Manager *m = ASSERT_PTR(userdata);
fd08a840 3218 int r;
31a7eb86 3219
fd08a840 3220 assert(source);
9152c765 3221
718db961 3222 manager_print_jobs_in_progress(m);
fd08a840 3223
39cf0351 3224 r = sd_event_source_set_time_relative(source, JOBS_IN_PROGRESS_PERIOD_USEC);
fd08a840
ZJS
3225 if (r < 0)
3226 return r;
3227
3228 return sd_event_source_set_enabled(source, SD_EVENT_ONESHOT);
9152c765
LP
3229}
3230
3231int manager_loop(Manager *m) {
8c227e7f 3232 RateLimit rl = { .interval = 1*USEC_PER_SEC, .burst = 50000 };
9152c765 3233 int r;
9152c765
LP
3234
3235 assert(m);
1fb70e66 3236 assert(m->objective == MANAGER_OK); /* Ensure manager_startup() has been called */
9152c765 3237
b0c918b9
LP
3238 manager_check_finished(m);
3239
575b300b
LP
3240 /* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */
3241 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
e96d6be7 3242 if (r < 0)
575b300b 3243 return log_error_errno(r, "Failed to enable SIGCHLD event source: %m");
a4312405 3244
af41e508 3245 while (m->objective == MANAGER_OK) {
9152c765 3246
ae4a0ec4 3247 (void) watchdog_ping();
e96d6be7 3248
7994ac1d 3249 if (!ratelimit_below(&rl)) {
ea430986
LP
3250 /* Yay, something is going seriously wrong, pause a little */
3251 log_warning("Looping too fast. Throttling execution a little.");
3252 sleep(1);
3253 }
3254
37a8e683 3255 if (manager_dispatch_load_queue(m) > 0)
23a177ef
LP
3256 continue;
3257
c5a97ed1
LP
3258 if (manager_dispatch_gc_job_queue(m) > 0)
3259 continue;
3260
3261 if (manager_dispatch_gc_unit_queue(m) > 0)
701cc384
LP
3262 continue;
3263
cf1265e1 3264 if (manager_dispatch_cleanup_queue(m) > 0)
c1e1601e 3265 continue;
034c6ed7 3266
91a6073e 3267 if (manager_dispatch_cgroup_realize_queue(m) > 0)
c1e1601e
LP
3268 continue;
3269
0bc488c9
LP
3270 if (manager_dispatch_start_when_upheld_queue(m) > 0)
3271 continue;
3272
56c59592
LP
3273 if (manager_dispatch_stop_when_bound_queue(m) > 0)
3274 continue;
3275
a3c1168a
LP
3276 if (manager_dispatch_stop_when_unneeded_queue(m) > 0)
3277 continue;
3278
6ac62d61
LP
3279 if (manager_dispatch_release_resources_queue(m) > 0)
3280 continue;
3281
c1e1601e 3282 if (manager_dispatch_dbus_queue(m) > 0)
ea430986 3283 continue;
ea430986 3284
c5f8a179 3285 /* Sleep for watchdog runtime wait time */
12663295 3286 r = sd_event_run(m->event, watchdog_runtime_wait());
23bbb0de
MS
3287 if (r < 0)
3288 return log_error_errno(r, "Failed to run event loop: %m");
a16e1123 3289 }
957ca890 3290
af41e508 3291 return m->objective;
83c60c9f 3292}
ea430986 3293
718db961 3294int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u) {
ede3a796 3295 _cleanup_free_ char *n = NULL;
4b58153d 3296 sd_id128_t invocation_id;
ea430986 3297 Unit *u;
80fbf05e 3298 int r;
ea430986
LP
3299
3300 assert(m);
3301 assert(s);
3302 assert(_u);
3303
ede3a796
LP
3304 r = unit_name_from_dbus_path(s, &n);
3305 if (r < 0)
3306 return r;
ea430986 3307
da890466 3308 /* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128-bit ID then
8f41e6b6 3309 * we use it as invocation ID. */
4b58153d
LP
3310 r = sd_id128_from_string(n, &invocation_id);
3311 if (r >= 0) {
3312 u = hashmap_get(m->units_by_invocation_id, &invocation_id);
3313 if (u) {
3314 *_u = u;
3315 return 0;
3316 }
3317
930c124c
ZJS
3318 return sd_bus_error_setf(e, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID,
3319 "No unit with the specified invocation ID " SD_ID128_FORMAT_STR " known.",
3320 SD_ID128_FORMAT_VAL(invocation_id));
4b58153d
LP
3321 }
3322
00c83b43 3323 /* If this didn't work, we check if this is a unit name */
930c124c
ZJS
3324 if (!unit_name_is_valid(n, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
3325 _cleanup_free_ char *nn = NULL;
3326
3327 nn = cescape(n);
3328 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS,
3329 "Unit name %s is neither a valid invocation ID nor unit name.", strnull(nn));
3330 }
00c83b43 3331
80fbf05e 3332 r = manager_load_unit(m, n, NULL, e, &u);
80fbf05e
MS
3333 if (r < 0)
3334 return r;
ea430986
LP
3335
3336 *_u = u;
ea430986
LP
3337 return 0;
3338}
86fbf370
LP
3339
3340int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
718db961 3341 const char *p;
86fbf370 3342 unsigned id;
718db961 3343 Job *j;
86fbf370
LP
3344 int r;
3345
3346 assert(m);
3347 assert(s);
3348 assert(_j);
3349
718db961
LP
3350 p = startswith(s, "/org/freedesktop/systemd1/job/");
3351 if (!p)
86fbf370
LP
3352 return -EINVAL;
3353
718db961 3354 r = safe_atou(p, &id);
8742514c 3355 if (r < 0)
86fbf370
LP
3356 return r;
3357
8742514c
LP
3358 j = manager_get_job(m, id);
3359 if (!j)
86fbf370
LP
3360 return -ENOENT;
3361
3362 *_j = j;
3363
3364 return 0;
3365}
dfcd764e 3366
4927fcae 3367void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
e537352b 3368
349cc4a5 3369#if HAVE_AUDIT
2ba11090 3370 _cleanup_free_ char *p = NULL;
0aa281df 3371 const char *msg;
7410616c 3372 int audit_fd, r;
e537352b 3373
463d0d15 3374 if (!MANAGER_IS_SYSTEM(m))
a1a078ee
LP
3375 return;
3376
c1165f82
LP
3377 audit_fd = get_audit_fd();
3378 if (audit_fd < 0)
e537352b
LP
3379 return;
3380
bbd3a7ba
LP
3381 /* Don't generate audit events if the service was already
3382 * started and we're just deserializing */
2c289ea8 3383 if (MANAGER_IS_RELOADING(m))
bbd3a7ba
LP
3384 return;
3385
7410616c
LP
3386 r = unit_name_to_prefix_and_instance(u->id, &p);
3387 if (r < 0) {
d52b8493 3388 log_warning_errno(r, "Failed to extract prefix and instance of unit name, ignoring: %m");
e537352b
LP
3389 return;
3390 }
3391
63c372cb 3392 msg = strjoina("unit=", p);
0aa281df 3393 if (audit_log_user_comm_message(audit_fd, type, msg, "systemd", NULL, NULL, NULL, success) < 0) {
d52b8493
LP
3394 if (ERRNO_IS_PRIVILEGE(errno)) {
3395 /* We aren't allowed to send audit messages? Then let's not retry again. */
3396 log_debug_errno(errno, "Failed to send audit message, closing audit socket: %m");
c1165f82 3397 close_audit_fd();
d52b8493
LP
3398 } else
3399 log_warning_errno(errno, "Failed to send audit message, ignoring: %m");
391ade86 3400 }
4927fcae 3401#endif
e537352b 3402
e537352b
LP
3403}
3404
e983b760 3405void manager_send_unit_plymouth(Manager *m, Unit *u) {
2ba11090 3406 _cleanup_free_ char *message = NULL;
aa25e19b 3407 int c, r;
e983b760
LP
3408
3409 /* Don't generate plymouth events if the service was already
3410 * started and we're just deserializing */
2c289ea8 3411 if (MANAGER_IS_RELOADING(m))
e983b760
LP
3412 return;
3413
463d0d15 3414 if (!MANAGER_IS_SYSTEM(m))
e983b760
LP
3415 return;
3416
75f86906 3417 if (detect_container() > 0)
3772995a
LP
3418 return;
3419
b2bfd121 3420 if (!UNIT_VTABLE(u)->notify_plymouth)
e983b760
LP
3421 return;
3422
aa25e19b
LP
3423 c = asprintf(&message, "U\x02%c%s%c", (int) (strlen(u->id) + 1), u->id, '\x00');
3424 if (c < 0)
305757d8 3425 return (void) log_oom();
e983b760 3426
aa25e19b
LP
3427 /* We set SOCK_NONBLOCK here so that we rather drop the message then wait for plymouth */
3428 r = plymouth_send_raw(message, c, SOCK_NONBLOCK);
3429 if (r < 0)
3430 log_full_errno(ERRNO_IS_NO_PLYMOUTH(r) ? LOG_DEBUG : LOG_WARNING, r,
3431 "Failed to communicate with plymouth: %m");
e983b760
LP
3432}
3433
986935cf
FB
3434usec_t manager_get_watchdog(Manager *m, WatchdogType t) {
3435 assert(m);
3436
3437 if (MANAGER_IS_USER(m))
3438 return USEC_INFINITY;
3439
902ea119 3440 if (m->watchdog_overridden[t] != USEC_INFINITY)
986935cf
FB
3441 return m->watchdog_overridden[t];
3442
3443 return m->watchdog[t];
3444}
3445
3446void manager_set_watchdog(Manager *m, WatchdogType t, usec_t timeout) {
986935cf
FB
3447
3448 assert(m);
3449
3450 if (MANAGER_IS_USER(m))
3451 return;
3452
3453 if (m->watchdog[t] == timeout)
3454 return;
3455
902ea119
CK
3456 if (m->watchdog_overridden[t] == USEC_INFINITY) {
3457 if (t == WATCHDOG_RUNTIME)
f16890f8 3458 (void) watchdog_setup(timeout);
902ea119 3459 else if (t == WATCHDOG_PRETIMEOUT)
5717062e 3460 (void) watchdog_setup_pretimeout(timeout);
902ea119 3461 }
986935cf 3462
61927b9f 3463 m->watchdog[t] = timeout;
986935cf
FB
3464}
3465
aac47032 3466void manager_override_watchdog(Manager *m, WatchdogType t, usec_t timeout) {
902ea119 3467 usec_t usec;
986935cf
FB
3468
3469 assert(m);
3470
3471 if (MANAGER_IS_USER(m))
aac47032 3472 return;
986935cf
FB
3473
3474 if (m->watchdog_overridden[t] == timeout)
aac47032 3475 return;
986935cf 3476
9b5560f3 3477 usec = timeout == USEC_INFINITY ? m->watchdog[t] : timeout;
902ea119 3478 if (t == WATCHDOG_RUNTIME)
f16890f8 3479 (void) watchdog_setup(usec);
902ea119
CK
3480 else if (t == WATCHDOG_PRETIMEOUT)
3481 (void) watchdog_setup_pretimeout(usec);
986935cf 3482
61927b9f 3483 m->watchdog_overridden[t] = timeout;
986935cf
FB
3484}
3485
aff3a9e1
LB
3486int manager_set_watchdog_pretimeout_governor(Manager *m, const char *governor) {
3487 _cleanup_free_ char *p = NULL;
3488 int r;
3489
3490 assert(m);
3491
3492 if (MANAGER_IS_USER(m))
3493 return 0;
3494
3495 if (streq_ptr(m->watchdog_pretimeout_governor, governor))
3496 return 0;
3497
3498 p = strdup(governor);
3499 if (!p)
3500 return -ENOMEM;
3501
3502 r = watchdog_setup_pretimeout_governor(governor);
3503 if (r < 0)
3504 return r;
3505
3506 return free_and_replace(m->watchdog_pretimeout_governor, p);
3507}
3508
3509int manager_override_watchdog_pretimeout_governor(Manager *m, const char *governor) {
3510 _cleanup_free_ char *p = NULL;
3511 int r;
3512
3513 assert(m);
3514
3515 if (MANAGER_IS_USER(m))
3516 return 0;
3517
3518 if (streq_ptr(m->watchdog_pretimeout_governor_overridden, governor))
3519 return 0;
3520
3521 p = strdup(governor);
3522 if (!p)
3523 return -ENOMEM;
3524
3525 r = watchdog_setup_pretimeout_governor(governor);
3526 if (r < 0)
3527 return r;
3528
3529 return free_and_replace(m->watchdog_pretimeout_governor_overridden, p);
3530}
3531
a16e1123 3532int manager_reload(Manager *m) {
d7ac0952 3533 _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL;
51d122af 3534 _cleanup_fdset_free_ FDSet *fds = NULL;
6a33af40
LP
3535 _cleanup_fclose_ FILE *f = NULL;
3536 int r;
a16e1123
LP
3537
3538 assert(m);
3539
07719a21
LP
3540 r = manager_open_serialization(m, &f);
3541 if (r < 0)
6a33af40 3542 return log_error_errno(r, "Failed to create serialization file: %m");
38c52d46 3543
07719a21 3544 fds = fdset_new();
6a33af40
LP
3545 if (!fds)
3546 return log_oom();
3547
d147e2b6
ZJS
3548 /* We are officially in reload mode from here on. */
3549 reloading = manager_reloading_start(m);
a16e1123 3550
b3680f49 3551 r = manager_serialize(m, f, fds, false);
d147e2b6 3552 if (r < 0)
d68c645b 3553 return r;
a16e1123 3554
d147e2b6
ZJS
3555 if (fseeko(f, 0, SEEK_SET) < 0)
3556 return log_error_errno(errno, "Failed to seek to beginning of serialization: %m");
a16e1123 3557
6a33af40 3558 /* 💀 This is the point of no return, from here on there is no way back. 💀 */
d147e2b6 3559 reloading = NULL;
6a33af40
LP
3560
3561 bus_manager_send_reloading(m, true);
3562
3563 /* Start by flushing out all jobs and units, all generated units, all runtime environments, all dynamic users
3564 * and everything else that is worth flushing out. We'll get it all back from the serialization — if we need
7802194a 3565 * it. */
6a33af40 3566
a16e1123 3567 manager_clear_jobs_and_units(m);
07a78643 3568 lookup_paths_flush_generator(&m->lookup_paths);
84e3543e 3569 lookup_paths_free(&m->lookup_paths);
e76506b7 3570 exec_shared_runtime_vacuum(m);
29206d46 3571 dynamic_user_vacuum(m, false);
00d9ef85
LP
3572 m->uid_refs = hashmap_free(m->uid_refs);
3573 m->gid_refs = hashmap_free(m->gid_refs);
2ded0c04 3574
4870133b 3575 r = lookup_paths_init_or_warn(&m->lookup_paths, m->runtime_scope, 0, NULL);
6a33af40 3576 if (r < 0)
99aad9a2 3577 return r;
5a1e9937 3578
6a33af40
LP
3579 (void) manager_run_environment_generators(m);
3580 (void) manager_run_generators(m);
64691d20 3581
581fef8d 3582 lookup_paths_log(&m->lookup_paths);
07719a21 3583
91e0ee5f 3584 /* We flushed out generated files, for which we don't watch mtime, so we should flush the old map. */
e8630e69 3585 manager_free_unit_name_maps(m);
a82b8b3d 3586 m->unit_file_state_outdated = false;
5a1e9937 3587
6a33af40 3588 /* First, enumerate what we can from kernel and suchlike */
3ad2afb6 3589 manager_enumerate_perpetual(m);
ba64af90 3590 manager_enumerate(m);
a16e1123
LP
3591
3592 /* Second, deserialize our stored data */
6a33af40
LP
3593 r = manager_deserialize(m, f, fds);
3594 if (r < 0)
3595 log_warning_errno(r, "Deserialization failed, proceeding anyway: %m");
a16e1123 3596
6a33af40 3597 /* We don't need the serialization anymore */
62b0cbb3 3598 f = safe_fclose(f);
a16e1123 3599
6a33af40
LP
3600 /* Re-register notify_fd as event source, and set up other sockets/communication channels we might need */
3601 (void) manager_setup_notify(m);
3602 (void) manager_setup_cgroups_agent(m);
3603 (void) manager_setup_user_lookup_fd(m);
00d9ef85 3604
a16e1123 3605 /* Third, fire things up! */
007c6337 3606 manager_coldplug(m);
a16e1123 3607
5197be06
LP
3608 /* Clean up runtime objects no longer referenced */
3609 manager_vacuum(m);
e8a565cb 3610
3deed59a
AA
3611 /* Clean up deserialized tracked clients */
3612 m->deserialized_subscribed = strv_free(m->deserialized_subscribed);
3613
6a33af40 3614 /* Consider the reload process complete now. */
31dc1ca3
LP
3615 assert(m->n_reloading > 0);
3616 m->n_reloading--;
3617
5ce5e1ad 3618 manager_ready(m);
8936a5e3 3619
71445ae7 3620 m->send_reloading_done = true;
6a33af40 3621 return 0;
a16e1123
LP
3622}
3623
fdf20a31 3624void manager_reset_failed(Manager *m) {
5632e374 3625 Unit *u;
5632e374
LP
3626
3627 assert(m);
3628
90e74a66 3629 HASHMAP_FOREACH(u, m->units)
fdf20a31 3630 unit_reset_failed(u);
5632e374
LP
3631}
3632
31afa0a4 3633bool manager_unit_inactive_or_pending(Manager *m, const char *name) {
8f6df3fa
LP
3634 Unit *u;
3635
3636 assert(m);
3637 assert(name);
3638
3639 /* Returns true if the unit is inactive or going down */
bd0af849
ZJS
3640 u = manager_get_unit(m, name);
3641 if (!u)
8f6df3fa
LP
3642 return true;
3643
31afa0a4 3644 return unit_inactive_or_pending(u);
8f6df3fa
LP
3645}
3646
d8eb10d6
ZJS
3647static void log_taint_string(Manager *m) {
3648 _cleanup_free_ char *taint = NULL;
3649
3650 assert(m);
3651
3652 if (MANAGER_IS_USER(m) || m->taint_logged)
3653 return;
3654
3655 m->taint_logged = true; /* only check for taint once */
3656
3657 taint = manager_taint_string(m);
3658 if (isempty(taint))
3659 return;
3660
3661 log_struct(LOG_NOTICE,
3662 LOG_MESSAGE("System is tainted: %s", taint),
3663 "TAINT=%s", taint,
a1230ff9 3664 "MESSAGE_ID=" SD_MESSAGE_TAINTED_STR);
d8eb10d6
ZJS
3665}
3666
56dacdbc 3667static void manager_notify_finished(Manager *m) {
915b3753 3668 usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec;
b0c918b9 3669
638cece4 3670 if (MANAGER_IS_TEST_RUN(m))
b0c918b9
LP
3671 return;
3672
463d0d15 3673 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) {
dc3c9f5e
ZJS
3674 char buf[FORMAT_TIMESPAN_MAX + STRLEN(" (firmware) + ") + FORMAT_TIMESPAN_MAX + STRLEN(" (loader) + ")]
3675 = {};
3676 char *p = buf;
3677 size_t size = sizeof buf;
e03ae661 3678
9f9f0342
LP
3679 /* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and
3680 * MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered
915b3753
LP
3681 * negative values. */
3682
9f9f0342
LP
3683 firmware_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic - m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic;
3684 loader_usec = m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3685 userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
3686 total_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic + m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic;
18fa6b27 3687
dd1db3c2 3688 if (firmware_usec > 0)
5291f26d 3689 size = strpcpyf(&p, size, "%s (firmware) + ", FORMAT_TIMESPAN(firmware_usec, USEC_PER_MSEC));
dd1db3c2 3690 if (loader_usec > 0)
5291f26d 3691 size = strpcpyf(&p, size, "%s (loader) + ", FORMAT_TIMESPAN(loader_usec, USEC_PER_MSEC));
dd1db3c2 3692
9f9f0342 3693 if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) {
18fa6b27 3694
7802194a 3695 /* The initrd case on bare-metal */
9f9f0342
LP
3696 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3697 initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic;
18fa6b27 3698
e12919e8 3699 log_struct(LOG_INFO,
2b044526 3700 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
e12919e8
LP
3701 "KERNEL_USEC="USEC_FMT, kernel_usec,
3702 "INITRD_USEC="USEC_FMT, initrd_usec,
3703 "USERSPACE_USEC="USEC_FMT, userspace_usec,
dd1db3c2
YW
3704 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (initrd) + %s (userspace) = %s.",
3705 buf,
5291f26d
ZJS
3706 FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC),
3707 FORMAT_TIMESPAN(initrd_usec, USEC_PER_MSEC),
3708 FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC),
3709 FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC)));
18fa6b27 3710 } else {
7802194a 3711 /* The initrd-less case on bare-metal */
9f9f0342
LP
3712
3713 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
18fa6b27
LP
3714 initrd_usec = 0;
3715
81270860 3716 log_struct(LOG_INFO,
2b044526 3717 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
e12919e8 3718 "KERNEL_USEC="USEC_FMT, kernel_usec,
ccd06097 3719 "USERSPACE_USEC="USEC_FMT, userspace_usec,
dd1db3c2
YW
3720 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (userspace) = %s.",
3721 buf,
5291f26d
ZJS
3722 FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC),
3723 FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC),
3724 FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC)));
e12919e8
LP
3725 }
3726 } else {
4adf314b 3727 /* The container and --user case */
e12919e8 3728 firmware_usec = loader_usec = initrd_usec = kernel_usec = 0;
9f9f0342 3729 total_usec = userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
e12919e8
LP
3730
3731 log_struct(LOG_INFO,
2b044526 3732 "MESSAGE_ID=" SD_MESSAGE_USER_STARTUP_FINISHED_STR,
e12919e8 3733 "USERSPACE_USEC="USEC_FMT, userspace_usec,
e2cc6eca 3734 LOG_MESSAGE("Startup finished in %s.",
5291f26d 3735 FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC)));
18fa6b27 3736 }
b0c918b9 3737
718db961 3738 bus_manager_send_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec);
530345e7 3739
d8eb10d6 3740 log_taint_string(m);
b0c918b9
LP
3741}
3742
6d932659 3743static void user_manager_send_ready(Manager *m) {
4bf4f50f
ZJS
3744 int r;
3745
4adf314b
LP
3746 assert(m);
3747
3748 /* We send READY=1 on reaching basic.target only when running in --user mode. */
3749 if (!MANAGER_IS_USER(m) || m->ready_sent)
3750 return;
3751
d4341b76
ZJS
3752 r = sd_notify(false,
3753 "READY=1\n"
3754 "STATUS=Reached " SPECIAL_BASIC_TARGET ".");
4bf4f50f
ZJS
3755 if (r < 0)
3756 log_warning_errno(r, "Failed to send readiness notification, ignoring: %m");
3757
6d932659
ZJS
3758 m->ready_sent = true;
3759 m->status_ready = false;
3760}
3761
3762static void manager_send_ready(Manager *m) {
4bf4f50f
ZJS
3763 int r;
3764
6d932659
ZJS
3765 if (m->ready_sent && m->status_ready)
3766 /* Skip the notification if nothing changed. */
3767 return;
3768
028f7d3a
ZJS
3769 r = sd_notify(false,
3770 "READY=1\n"
3771 "STATUS=Ready.");
4bf4f50f
ZJS
3772 if (r < 0)
3773 log_full_errno(m->ready_sent ? LOG_DEBUG : LOG_WARNING, r,
3774 "Failed to send readiness notification, ignoring: %m");
3775
6d932659 3776 m->ready_sent = m->status_ready = true;
4adf314b
LP
3777}
3778
3779static void manager_check_basic_target(Manager *m) {
3780 Unit *u;
3781
3782 assert(m);
3783
3784 /* Small shortcut */
3785 if (m->ready_sent && m->taint_logged)
3786 return;
3787
3788 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
3789 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
3790 return;
3791
3792 /* For user managers, send out READY=1 as soon as we reach basic.target */
6d932659 3793 user_manager_send_ready(m);
4adf314b
LP
3794
3795 /* Log the taint string as soon as we reach basic.target */
3796 log_taint_string(m);
3797}
3798
56dacdbc 3799void manager_check_finished(Manager *m) {
56dacdbc
ZJS
3800 assert(m);
3801
2c289ea8 3802 if (MANAGER_IS_RELOADING(m))
aad1976f
LP
3803 return;
3804
4259d202
LP
3805 /* Verify that we have entered the event loop already, and not left it again. */
3806 if (!MANAGER_IS_RUNNING(m))
9771b62d
LP
3807 return;
3808
4adf314b 3809 manager_check_basic_target(m);
0c2826c6 3810
56dacdbc 3811 if (hashmap_size(m->jobs) > 0) {
56dacdbc 3812 if (m->jobs_in_progress_event_source)
2ae56591 3813 /* Ignore any failure, this is only for feedback */
1b4154a8
ZJS
3814 (void) sd_event_source_set_time(m->jobs_in_progress_event_source,
3815 manager_watch_jobs_next_time(m));
56dacdbc
ZJS
3816 return;
3817 }
3818
a4ac27c1
ZJS
3819 /* The jobs hashmap tends to grow a lot during boot, and then it's not reused until shutdown. Let's
3820 kill the hashmap if it is relatively large. */
3821 if (hashmap_buckets(m->jobs) > hashmap_size(m->units) / 10)
3822 m->jobs = hashmap_free(m->jobs);
3823
6d932659
ZJS
3824 manager_send_ready(m);
3825
9c1b17c3
YW
3826 /* Notify Type=idle units that we are done now */
3827 manager_close_idle_pipe(m);
3828
6d932659
ZJS
3829 if (MANAGER_IS_FINISHED(m))
3830 return;
3831
7365a296 3832 manager_flip_auto_status(m, false, "boot finished");
56dacdbc 3833
56dacdbc 3834 /* Turn off confirm spawn now */
7d5ceb64 3835 m->confirm_spawn = NULL;
56dacdbc
ZJS
3836
3837 /* No need to update ask password status when we're going non-interactive */
3838 manager_close_ask_password(m);
3839
3840 /* This is no longer the first boot */
3841 manager_set_first_boot(m, false);
3842
fa5a0251 3843 dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_FINISH);
56dacdbc
ZJS
3844
3845 manager_notify_finished(m);
3846
e7ab4d1a 3847 manager_invalidate_startup_units(m);
56dacdbc
ZJS
3848}
3849
dd0ab174
LP
3850void manager_send_reloading(Manager *m) {
3851 assert(m);
3852
3853 /* Let whoever invoked us know that we are now reloading */
3854 (void) sd_notifyf(/* unset= */ false,
3855 "RELOADING=1\n"
3856 "MONOTONIC_USEC=" USEC_FMT "\n", now(CLOCK_MONOTONIC));
3857
3858 /* And ensure that we'll send READY=1 again as soon as we are ready again */
3859 m->ready_sent = false;
3860}
3861
64691d20 3862static bool generator_path_any(const char* const* paths) {
64691d20
ZJS
3863 bool found = false;
3864
3865 /* Optimize by skipping the whole process by not creating output directories
3866 * if no generators are found. */
2034c8b8 3867 STRV_FOREACH(path, paths)
64691d20
ZJS
3868 if (access(*path, F_OK) == 0)
3869 found = true;
3870 else if (errno != ENOENT)
3871 log_warning_errno(errno, "Failed to open generator directory %s: %m", *path);
3872
3873 return found;
3874}
3875
64691d20
ZJS
3876static int manager_run_environment_generators(Manager *m) {
3877 char **tmp = NULL; /* this is only used in the forked process, no cleanup here */
cccf5703 3878 _cleanup_strv_free_ char **paths = NULL;
1ad6e8b3
LP
3879 void* args[] = {
3880 [STDOUT_GENERATE] = &tmp,
3881 [STDOUT_COLLECT] = &tmp,
3882 [STDOUT_CONSUME] = &m->transient_environment,
3883 };
e3b8d063 3884 int r;
64691d20 3885
638cece4 3886 if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_ENV_GENERATORS))
e0a3da1f
ZJS
3887 return 0;
3888
361cacf4 3889 paths = env_generator_binary_paths(m->runtime_scope);
cccf5703
BB
3890 if (!paths)
3891 return log_oom();
64691d20 3892
cccf5703 3893 if (!generator_path_any((const char* const*) paths))
64691d20
ZJS
3894 return 0;
3895
2053593f 3896 WITH_UMASK(0022)
cccf5703 3897 r = execute_directories((const char* const*) paths, DEFAULT_TIMEOUT_USEC, gather_environment,
43f565c6
YW
3898 args, NULL, m->transient_environment,
3899 EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID);
e3b8d063 3900 return r;
64691d20
ZJS
3901}
3902
82c5db16
LP
3903static int build_generator_environment(Manager *m, char ***ret) {
3904 _cleanup_strv_free_ char **nl = NULL;
3905 Virtualization v;
08951245 3906 ConfidentialVirtualization cv;
82c5db16
LP
3907 int r;
3908
3909 assert(m);
3910 assert(ret);
3911
3912 /* Generators oftentimes want to know some basic facts about the environment they run in, in order to
3913 * adjust generated units to that. Let's pass down some bits of information that are easy for us to
3914 * determine (but a bit harder for generator scripts to determine), as environment variables. */
3915
3916 nl = strv_copy(m->transient_environment);
3917 if (!nl)
3918 return -ENOMEM;
3919
4870133b 3920 r = strv_env_assign(&nl, "SYSTEMD_SCOPE", runtime_scope_to_string(m->runtime_scope));
82c5db16
LP
3921 if (r < 0)
3922 return r;
3923
3924 if (MANAGER_IS_SYSTEM(m)) {
3925 /* Note that $SYSTEMD_IN_INITRD may be used to override the initrd detection in much of our
3926 * codebase. This is hence more than purely informational. It will shortcut detection of the
3927 * initrd state if generators invoke our own tools. But that's OK, as it would come to the
3928 * same results (hopefully). */
3929 r = strv_env_assign(&nl, "SYSTEMD_IN_INITRD", one_zero(in_initrd()));
3930 if (r < 0)
3931 return r;
3932
3933 if (m->first_boot >= 0) {
3934 r = strv_env_assign(&nl, "SYSTEMD_FIRST_BOOT", one_zero(m->first_boot));
3935 if (r < 0)
3936 return r;
3937 }
3938 }
3939
3940 v = detect_virtualization();
3941 if (v < 0)
3942 log_debug_errno(v, "Failed to detect virtualization, ignoring: %m");
3943 else if (v > 0) {
3944 const char *s;
3945
3946 s = strjoina(VIRTUALIZATION_IS_VM(v) ? "vm:" :
3947 VIRTUALIZATION_IS_CONTAINER(v) ? "container:" : ":",
3948 virtualization_to_string(v));
3949
3950 r = strv_env_assign(&nl, "SYSTEMD_VIRTUALIZATION", s);
3951 if (r < 0)
3952 return r;
3953 }
3954
08951245
DB
3955 cv = detect_confidential_virtualization();
3956 if (cv < 0)
3957 log_debug_errno(cv, "Failed to detect confidential virtualization, ignoring: %m");
3958 else if (cv > 0) {
3959 r = strv_env_assign(&nl, "SYSTEMD_CONFIDENTIAL_VIRTUALIZATION", confidential_virtualization_to_string(cv));
3960 if (r < 0)
3961 return r;
3962 }
3963
82c5db16
LP
3964 r = strv_env_assign(&nl, "SYSTEMD_ARCHITECTURE", architecture_to_string(uname_architecture()));
3965 if (r < 0)
3966 return r;
3967
3968 *ret = TAKE_PTR(nl);
3969 return 0;
3970}
3971
ca6ce62d
ZJS
3972static int manager_execute_generators(Manager *m, char **paths, bool remount_ro) {
3973 _cleanup_strv_free_ char **ge = NULL;
3974 const char *argv[] = {
3975 NULL, /* Leave this empty, execute_directory() will fill something in */
3976 m->lookup_paths.generator,
3977 m->lookup_paths.generator_early,
3978 m->lookup_paths.generator_late,
3979 NULL,
3980 };
3981 int r;
3982
3983 r = build_generator_environment(m, &ge);
3984 if (r < 0)
3985 return log_error_errno(r, "Failed to build generator environment: %m");
3986
3987 if (remount_ro) {
3988 /* Remount most of the filesystem tree read-only. We leave /sys/ as-is, because our code
3989 * checks whether it is read-only to detect containerized execution environments. We leave
3990 * /run/ as-is too, because that's where our output goes. We also leave /proc/ and /dev/shm/
3991 * because they're API, and /tmp/ that safe_fork() mounted for us.
3992 */
3993 r = bind_remount_recursive("/", MS_RDONLY, MS_RDONLY,
3994 STRV_MAKE("/sys", "/run", "/proc", "/dev/shm", "/tmp"));
3995 if (r < 0)
3996 log_warning_errno(r, "Read-only bind remount failed, ignoring: %m");
3997 }
3998
3999 BLOCK_WITH_UMASK(0022);
4000 return execute_directories(
4001 (const char* const*) paths,
4002 DEFAULT_TIMEOUT_USEC,
4003 /* callbacks= */ NULL, /* callback_args= */ NULL,
4004 (char**) argv,
4005 ge,
4006 EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID);
4007}
4008
e801700e 4009static int manager_run_generators(Manager *m) {
b8fba0cd 4010 ForkFlags flags = FORK_RESET_SIGNALS | FORK_WAIT | FORK_NEW_MOUNTNS | FORK_MOUNTNS_SLAVE;
ca6ce62d 4011 _cleanup_strv_free_ char **paths = NULL;
07719a21 4012 int r;
5a1e9937
LP
4013
4014 assert(m);
4015
638cece4 4016 if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_GENERATORS))
e0a3da1f
ZJS
4017 return 0;
4018
4870133b 4019 paths = generator_binary_paths(m->runtime_scope);
e801700e
ZJS
4020 if (!paths)
4021 return log_oom();
5a1e9937 4022
64691d20
ZJS
4023 if (!generator_path_any((const char* const*) paths))
4024 return 0;
5a1e9937 4025
cd64fd56 4026 r = lookup_paths_mkdir_generator(&m->lookup_paths);
7eb4f326
LP
4027 if (r < 0) {
4028 log_error_errno(r, "Failed to create generator directories: %m");
07719a21 4029 goto finish;
7eb4f326 4030 }
5a1e9937 4031
ca6ce62d
ZJS
4032 /* If we are the system manager, we fork and invoke the generators in a sanitized mount namespace. If
4033 * we are the user manager, let's just execute the generators directly. We might not have the
4034 * necessary privileges, and the system manager has already mounted /tmp/ and everything else for us.
4035 */
4036 if (MANAGER_IS_USER(m)) {
4037 r = manager_execute_generators(m, paths, /* remount_ro= */ false);
82c5db16
LP
4038 goto finish;
4039 }
5a1e9937 4040
b8fba0cd
LB
4041 /* On some systems /tmp/ doesn't exist, and on some other systems we cannot create it at all. Avoid
4042 * trying to mount a private tmpfs on it as there's no one size fits all. */
4043 if (is_dir("/tmp", /* follow= */ false) > 0)
4044 flags |= FORK_PRIVATE_TMP;
4045
4046 r = safe_fork("(sd-gens)", flags, NULL);
ca6ce62d
ZJS
4047 if (r == 0) {
4048 r = manager_execute_generators(m, paths, /* remount_ro= */ true);
4049 _exit(r >= 0 ? EXIT_SUCCESS : EXIT_FAILURE);
4050 }
a2275dcb 4051 if (r < 0) {
46801870 4052 if (!ERRNO_IS_PRIVILEGE(r) && r != -EINVAL) {
a2275dcb
YW
4053 log_error_errno(r, "Failed to fork off sandboxing environment for executing generators: %m");
4054 goto finish;
4055 }
4056
4057 /* Failed to fork with new mount namespace? Maybe, running in a container environment with
46801870
YW
4058 * seccomp or without capability.
4059 *
4060 * We also allow -EINVAL to allow running without CLONE_NEWNS.
4061 *
4062 * Also, when running on non-native userland architecture via systemd-nspawn and
4063 * qemu-user-static QEMU-emulator, clone() with CLONE_NEWNS fails with EINVAL, see
4064 * https://github.com/systemd/systemd/issues/28901.
4065 */
a2275dcb
YW
4066 log_debug_errno(r,
4067 "Failed to fork off sandboxing environment for executing generators. "
4068 "Falling back to execute generators without sandboxing: %m");
4069 r = manager_execute_generators(m, paths, /* remount_ro= */ false);
4070 }
5a1e9937 4071
718db961 4072finish:
cd64fd56 4073 lookup_paths_trim_generator(&m->lookup_paths);
e801700e 4074 return r;
5a1e9937
LP
4075}
4076
1ad6e8b3
LP
4077int manager_transient_environment_add(Manager *m, char **plus) {
4078 char **a;
4079
4080 assert(m);
4081
4082 if (strv_isempty(plus))
4083 return 0;
4084
4ab3d29f 4085 a = strv_env_merge(m->transient_environment, plus);
1ad6e8b3 4086 if (!a)
2fbbbf9a 4087 return log_oom();
1ad6e8b3
LP
4088
4089 sanitize_environment(a);
4090
4091 return strv_free_and_replace(m->transient_environment, a);
4092}
4093
4094int manager_client_environment_modify(
4095 Manager *m,
4096 char **minus,
4097 char **plus) {
4098
718db961 4099 char **a = NULL, **b = NULL, **l;
1ad6e8b3 4100
97d0e5f8 4101 assert(m);
bcd8e6d1 4102
1ad6e8b3
LP
4103 if (strv_isempty(minus) && strv_isempty(plus))
4104 return 0;
4105
4106 l = m->client_environment;
bcd8e6d1 4107
718db961
LP
4108 if (!strv_isempty(minus)) {
4109 a = strv_env_delete(l, 1, minus);
4110 if (!a)
4111 return -ENOMEM;
4112
4113 l = a;
4114 }
4115
4116 if (!strv_isempty(plus)) {
4ab3d29f 4117 b = strv_env_merge(l, plus);
aa9f8a30
AH
4118 if (!b) {
4119 strv_free(a);
718db961 4120 return -ENOMEM;
aa9f8a30 4121 }
bcd8e6d1 4122
718db961
LP
4123 l = b;
4124 }
4125
1ad6e8b3
LP
4126 if (m->client_environment != l)
4127 strv_free(m->client_environment);
4128
718db961
LP
4129 if (a != l)
4130 strv_free(a);
4131 if (b != l)
4132 strv_free(b);
4133
1ad6e8b3
LP
4134 m->client_environment = sanitize_environment(l);
4135 return 0;
4136}
4137
4138int manager_get_effective_environment(Manager *m, char ***ret) {
4139 char **l;
4140
4141 assert(m);
4142 assert(ret);
4143
4ab3d29f 4144 l = strv_env_merge(m->transient_environment, m->client_environment);
1ad6e8b3
LP
4145 if (!l)
4146 return -ENOMEM;
f069efb4 4147
1ad6e8b3 4148 *ret = l;
97d0e5f8
UTL
4149 return 0;
4150}
4151
bfb27b06
LP
4152int manager_set_unit_defaults(Manager *m, const UnitDefaults *defaults) {
4153 _cleanup_free_ char *label = NULL;
4154 struct rlimit *rlimit[_RLIMIT_MAX];
4155 int r;
4156
aa5ae971 4157 assert(m);
bfb27b06 4158 assert(defaults);
aa5ae971 4159
bfb27b06
LP
4160 if (streq_ptr(defaults->smack_process_label, "/"))
4161 label = NULL;
4162 else {
4163 const char *l = defaults->smack_process_label;
aa5ae971 4164#ifdef SMACK_DEFAULT_PROCESS_LABEL
bfb27b06
LP
4165 if (!l)
4166 l = SMACK_DEFAULT_PROCESS_LABEL;
aa5ae971 4167#endif
bfb27b06
LP
4168 if (l) {
4169 label = strdup(l);
4170 if (!label)
4171 return -ENOMEM;
4172 } else
4173 label = NULL;
4174 }
aa5ae971 4175
bfb27b06
LP
4176 r = rlimit_copy_all(rlimit, defaults->rlimit);
4177 if (r < 0)
4178 return r;
aa5ae971 4179
bfb27b06
LP
4180 m->defaults.std_output = defaults->std_output;
4181 m->defaults.std_error = defaults->std_error;
c93ff2e9 4182
bfb27b06
LP
4183 m->defaults.restart_usec = defaults->restart_usec;
4184 m->defaults.timeout_start_usec = defaults->timeout_start_usec;
4185 m->defaults.timeout_stop_usec = defaults->timeout_stop_usec;
4186 m->defaults.timeout_abort_usec = defaults->timeout_abort_usec;
4187 m->defaults.timeout_abort_set = defaults->timeout_abort_set;
4188 m->defaults.device_timeout_usec = defaults->device_timeout_usec;
d9814c76 4189
bfb27b06
LP
4190 m->defaults.start_limit_interval = defaults->start_limit_interval;
4191 m->defaults.start_limit_burst = defaults->start_limit_burst;
c93ff2e9 4192
bfb27b06
LP
4193 m->defaults.cpu_accounting = defaults->cpu_accounting;
4194 m->defaults.memory_accounting = defaults->memory_accounting;
4195 m->defaults.io_accounting = defaults->io_accounting;
4196 m->defaults.blockio_accounting = defaults->blockio_accounting;
4197 m->defaults.tasks_accounting = defaults->tasks_accounting;
4198 m->defaults.ip_accounting = defaults->ip_accounting;
4199
4200 m->defaults.tasks_max = defaults->tasks_max;
4201 m->defaults.timer_accuracy_usec = defaults->timer_accuracy_usec;
4202
4203 m->defaults.oom_policy = defaults->oom_policy;
4204 m->defaults.oom_score_adjust = defaults->oom_score_adjust;
4205 m->defaults.oom_score_adjust_set = defaults->oom_score_adjust_set;
4206
4207 m->defaults.memory_pressure_watch = defaults->memory_pressure_watch;
4208 m->defaults.memory_pressure_threshold_usec = defaults->memory_pressure_threshold_usec;
4209
4210 free_and_replace(m->defaults.smack_process_label, label);
4211 rlimit_free_all(m->defaults.rlimit);
4212 memcpy(m->defaults.rlimit, rlimit, sizeof(struct rlimit*) * _RLIMIT_MAX);
c93ff2e9
FC
4213
4214 return 0;
4215}
4216
8559b3b7
LP
4217void manager_recheck_dbus(Manager *m) {
4218 assert(m);
4219
8f41e6b6
ZJS
4220 /* Connects to the bus if the dbus service and socket are running. If we are running in user mode
4221 * this is all it does. In system mode we'll also connect to the system bus (which will most likely
4222 * just reuse the connection of the API bus). That's because the system bus after all runs as service
4223 * of the system instance, while in the user instance we can assume it's already there. */
8559b3b7 4224
31dc1ca3
LP
4225 if (MANAGER_IS_RELOADING(m))
4226 return; /* don't check while we are reloading… */
4227
2b680534 4228 if (manager_dbus_is_running(m, false)) {
8559b3b7
LP
4229 (void) bus_init_api(m);
4230
4231 if (MANAGER_IS_SYSTEM(m))
4232 (void) bus_init_system(m);
4233 } else {
4234 (void) bus_done_api(m);
4235
4236 if (MANAGER_IS_SYSTEM(m))
4237 (void) bus_done_system(m);
4238 }
4239}
4240
d075092f 4241static bool manager_journal_is_running(Manager *m) {
f1dd0c3f
LP
4242 Unit *u;
4243
4244 assert(m);
4245
638cece4 4246 if (MANAGER_IS_TEST_RUN(m))
7d814a19
LP
4247 return false;
4248
d075092f 4249 /* If we are the user manager we can safely assume that the journal is up */
463d0d15 4250 if (!MANAGER_IS_SYSTEM(m))
d075092f 4251 return true;
f1dd0c3f 4252
d075092f 4253 /* Check that the socket is not only up, but in RUNNING state */
731a676c 4254 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
d075092f
LP
4255 if (!u)
4256 return false;
4257 if (SOCKET(u)->state != SOCKET_RUNNING)
4258 return false;
f1dd0c3f 4259
d075092f 4260 /* Similar, check if the daemon itself is fully up, too */
731a676c 4261 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
d075092f
LP
4262 if (!u)
4263 return false;
217677ab 4264 if (!IN_SET(SERVICE(u)->state, SERVICE_RELOAD, SERVICE_RUNNING))
d075092f
LP
4265 return false;
4266
4267 return true;
4268}
4269
6123dfaa
ZJS
4270void disable_printk_ratelimit(void) {
4271 /* Disable kernel's printk ratelimit.
4272 *
4273 * Logging to /dev/kmsg is most useful during early boot and shutdown, where normal logging
4274 * mechanisms are not available. The semantics of this sysctl are such that any kernel command-line
4275 * setting takes precedence. */
4276 int r;
4277
4278 r = sysctl_write("kernel/printk_devkmsg", "on");
4279 if (r < 0)
4280 log_debug_errno(r, "Failed to set sysctl kernel.printk_devkmsg=on: %m");
4281}
4282
d075092f
LP
4283void manager_recheck_journal(Manager *m) {
4284
4285 assert(m);
4286
4287 /* Don't bother with this unless we are in the special situation of being PID 1 */
4288 if (getpid_cached() != 1)
731a676c 4289 return;
f1dd0c3f 4290
31dc1ca3
LP
4291 /* Don't check this while we are reloading, things might still change */
4292 if (MANAGER_IS_RELOADING(m))
4293 return;
4294
8f41e6b6
ZJS
4295 /* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. If
4296 * the journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we
4297 * might trigger an activation ourselves we can't fulfill. */
cedf5088 4298 log_set_prohibit_ipc(!manager_journal_is_running(m));
cc2b9e6b 4299 log_open();
f1dd0c3f
LP
4300}
4301
44a41954
FB
4302static ShowStatus manager_get_show_status(Manager *m) {
4303 assert(m);
4304
4305 if (MANAGER_IS_USER(m))
4306 return _SHOW_STATUS_INVALID;
4307
4308 if (m->show_status_overridden != _SHOW_STATUS_INVALID)
4309 return m->show_status_overridden;
4310
4311 return m->show_status;
4312}
4313
4314bool manager_get_show_status_on(Manager *m) {
4315 assert(m);
4316
4317 return show_status_on(manager_get_show_status(m));
4318}
b309078a 4319
3ceb3471
FB
4320static void set_show_status_marker(bool b) {
4321 if (b)
4322 (void) touch("/run/systemd/show-status");
4323 else
4324 (void) unlink("/run/systemd/show-status");
4325}
4326
44a41954 4327void manager_set_show_status(Manager *m, ShowStatus mode, const char *reason) {
27d340c7 4328 assert(m);
44a41954 4329 assert(reason);
0d066dd1 4330 assert(mode >= 0 && mode < _SHOW_STATUS_MAX);
27d340c7 4331
44a41954 4332 if (MANAGER_IS_USER(m))
27d340c7
LP
4333 return;
4334
ef15d3e1
ZJS
4335 if (mode == m->show_status)
4336 return;
4337
44a41954
FB
4338 if (m->show_status_overridden == _SHOW_STATUS_INVALID) {
4339 bool enabled;
4340
4341 enabled = show_status_on(mode);
4342 log_debug("%s (%s) showing of status (%s).",
4343 enabled ? "Enabling" : "Disabling",
4344 strna(show_status_to_string(mode)),
4345 reason);
4346
3ceb3471 4347 set_show_status_marker(enabled);
44a41954
FB
4348 }
4349
4350 m->show_status = mode;
4351}
4352
43bba15a 4353void manager_override_show_status(Manager *m, ShowStatus mode, const char *reason) {
44a41954
FB
4354 assert(m);
4355 assert(mode < _SHOW_STATUS_MAX);
4356
4357 if (MANAGER_IS_USER(m))
4358 return;
4359
4360 if (mode == m->show_status_overridden)
4361 return;
4362
4363 m->show_status_overridden = mode;
4364
4365 if (mode == _SHOW_STATUS_INVALID)
4366 mode = m->show_status;
4367
ef15d3e1 4368 log_debug("%s (%s) showing of status (%s).",
44a41954 4369 m->show_status_overridden != _SHOW_STATUS_INVALID ? "Overriding" : "Restoring",
ef15d3e1
ZJS
4370 strna(show_status_to_string(mode)),
4371 reason);
b309078a 4372
3ceb3471 4373 set_show_status_marker(show_status_on(mode));
27d340c7
LP
4374}
4375
7d5ceb64
FB
4376const char *manager_get_confirm_spawn(Manager *m) {
4377 static int last_errno = 0;
7d5ceb64
FB
4378 struct stat st;
4379 int r;
4380
ea758432
LP
4381 assert(m);
4382
7d5ceb64
FB
4383 /* Here's the deal: we want to test the validity of the console but don't want
4384 * PID1 to go through the whole console process which might block. But we also
4385 * want to warn the user only once if something is wrong with the console so we
4386 * cannot do the sanity checks after spawning our children. So here we simply do
4387 * really basic tests to hopefully trap common errors.
4388 *
4389 * If the console suddenly disappear at the time our children will really it
4390 * then they will simply fail to acquire it and a positive answer will be
2aed63f4 4391 * assumed. New children will fall back to /dev/console though.
7d5ceb64
FB
4392 *
4393 * Note: TTYs are devices that can come and go any time, and frequently aren't
4394 * available yet during early boot (consider a USB rs232 dongle...). If for any
2aed63f4 4395 * reason the configured console is not ready, we fall back to the default
7d5ceb64
FB
4396 * console. */
4397
ea758432
LP
4398 if (!m->confirm_spawn || path_equal(m->confirm_spawn, "/dev/console"))
4399 return m->confirm_spawn;
7d5ceb64 4400
ea758432
LP
4401 if (stat(m->confirm_spawn, &st) < 0) {
4402 r = -errno;
7d5ceb64 4403 goto fail;
ea758432 4404 }
7d5ceb64
FB
4405
4406 if (!S_ISCHR(st.st_mode)) {
ea758432 4407 r = -ENOTTY;
7d5ceb64
FB
4408 goto fail;
4409 }
4410
4411 last_errno = 0;
ea758432
LP
4412 return m->confirm_spawn;
4413
7d5ceb64 4414fail:
ea758432
LP
4415 if (last_errno != r)
4416 last_errno = log_warning_errno(r, "Failed to open %s, using default console: %m", m->confirm_spawn);
4417
7d5ceb64
FB
4418 return "/dev/console";
4419}
4420
e2680723
LP
4421void manager_set_first_boot(Manager *m, bool b) {
4422 assert(m);
4423
463d0d15 4424 if (!MANAGER_IS_SYSTEM(m))
e2680723
LP
4425 return;
4426
ae2a2c53
LP
4427 if (m->first_boot != (int) b) {
4428 if (b)
4429 (void) touch("/run/systemd/first-boot");
4430 else
4431 (void) unlink("/run/systemd/first-boot");
4432 }
e2680723 4433
ae2a2c53 4434 m->first_boot = b;
e2680723
LP
4435}
4436
b0eb2944
FB
4437void manager_disable_confirm_spawn(void) {
4438 (void) touch("/run/systemd/confirm_spawn_disabled");
4439}
4440
0d6d3cf0
FB
4441static bool manager_should_show_status(Manager *m, StatusType type) {
4442 assert(m);
4443
4444 if (!MANAGER_IS_SYSTEM(m))
4445 return false;
4446
4447 if (m->no_console_output)
4448 return false;
4449
4450 if (!IN_SET(manager_state(m), MANAGER_INITIALIZING, MANAGER_STARTING, MANAGER_STOPPING))
4451 return false;
4452
4453 /* If we cannot find out the status properly, just proceed. */
4454 if (type != STATUS_TYPE_EMERGENCY && manager_check_ask_password(m) > 0)
4455 return false;
4456
4457 if (type == STATUS_TYPE_NOTICE && m->show_status != SHOW_STATUS_NO)
4458 return true;
4459
44a41954 4460 return manager_get_show_status_on(m);
0d6d3cf0
FB
4461}
4462
127d5fd1 4463void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) {
25cee550
MS
4464 va_list ap;
4465
cb6531be
ZJS
4466 /* If m is NULL, assume we're after shutdown and let the messages through. */
4467
0d6d3cf0 4468 if (m && !manager_should_show_status(m, type))
25cee550
MS
4469 return;
4470
03b717a3
MS
4471 /* XXX We should totally drop the check for ephemeral here
4472 * and thus effectively make 'Type=idle' pointless. */
cb6531be 4473 if (type == STATUS_TYPE_EPHEMERAL && m && m->n_on_console > 0)
03b717a3
MS
4474 return;
4475
25cee550 4476 va_start(ap, format);
a885727a 4477 status_vprintf(status, SHOW_STATUS_ELLIPSIZE|(type == STATUS_TYPE_EPHEMERAL ? SHOW_STATUS_EPHEMERAL : 0), format, ap);
25cee550
MS
4478 va_end(ap);
4479}
4480
ac19bdd0 4481Set* manager_get_units_requiring_mounts_for(Manager *m, const char *path) {
a57f7e2c
LP
4482 assert(m);
4483 assert(path);
4484
ac19bdd0
ZJS
4485 if (path_equal(path, "/"))
4486 path = "";
a57f7e2c 4487
ac19bdd0 4488 return hashmap_get(m->units_requiring_mounts_for, path);
a57f7e2c 4489}
e66cf1a3 4490
5269eb6b 4491int manager_update_failed_units(Manager *m, Unit *u, bool failed) {
03455c28 4492 unsigned size;
5269eb6b 4493 int r;
03455c28
LDM
4494
4495 assert(m);
4496 assert(u->manager == m);
4497
4498 size = set_size(m->failed_units);
4499
9fff8981 4500 if (failed) {
de7fef4b 4501 r = set_ensure_put(&m->failed_units, NULL, u);
5269eb6b
LP
4502 if (r < 0)
4503 return log_oom();
9fff8981 4504 } else
5269eb6b 4505 (void) set_remove(m->failed_units, u);
03455c28
LDM
4506
4507 if (set_size(m->failed_units) != size)
4508 bus_manager_send_change_signal(m);
5269eb6b
LP
4509
4510 return 0;
03455c28
LDM
4511}
4512
f755e3b7
LP
4513ManagerState manager_state(Manager *m) {
4514 Unit *u;
4515
4516 assert(m);
4517
f9d29f6d
JB
4518 /* Is the special shutdown target active or queued? If so, we are in shutdown state */
4519 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
4520 if (u && unit_active_or_pending(u))
4521 return MANAGER_STOPPING;
4522
f755e3b7 4523 /* Did we ever finish booting? If not then we are still starting up */
49d5666c 4524 if (!MANAGER_IS_FINISHED(m)) {
d81afec1
LP
4525
4526 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
4527 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
4528 return MANAGER_INITIALIZING;
4529
f755e3b7 4530 return MANAGER_STARTING;
d81afec1 4531 }
f755e3b7 4532
45a7b16b
LP
4533 if (MANAGER_IS_SYSTEM(m)) {
4534 /* Are the rescue or emergency targets active or queued? If so we are in maintenance state */
4535 u = manager_get_unit(m, SPECIAL_RESCUE_TARGET);
4536 if (u && unit_active_or_pending(u))
4537 return MANAGER_MAINTENANCE;
f755e3b7 4538
45a7b16b
LP
4539 u = manager_get_unit(m, SPECIAL_EMERGENCY_TARGET);
4540 if (u && unit_active_or_pending(u))
4541 return MANAGER_MAINTENANCE;
4542 }
f755e3b7
LP
4543
4544 /* Are there any failed units? If so, we are in degraded mode */
4545 if (set_size(m->failed_units) > 0)
4546 return MANAGER_DEGRADED;
4547
4548 return MANAGER_RUNNING;
4549}
4550
00d9ef85 4551static void manager_unref_uid_internal(
010becd9 4552 Hashmap *uid_refs,
00d9ef85
LP
4553 uid_t uid,
4554 bool destroy_now,
4555 int (*_clean_ipc)(uid_t uid)) {
4556
4557 uint32_t c, n;
4558
00d9ef85
LP
4559 assert(uid_is_valid(uid));
4560 assert(_clean_ipc);
4561
8f41e6b6
ZJS
4562 /* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the
4563 * assumption that uid_t and gid_t are actually defined the same way, with the same validity rules.
00d9ef85 4564 *
da890466 4565 * We store a hashmap where the key is the UID/GID and the value is a 32-bit reference counter, whose
8f41e6b6
ZJS
4566 * highest bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last
4567 * reference to the UID/GID is dropped. The flag is set to on, once at least one reference from a
4568 * unit where RemoveIPC= is set is added on a UID/GID. It is reset when the UID's/GID's reference
4569 * counter drops to 0 again. */
00d9ef85
LP
4570
4571 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4572 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4573
4574 if (uid == 0) /* We don't keep track of root, and will never destroy it */
4575 return;
4576
010becd9 4577 c = PTR_TO_UINT32(hashmap_get(uid_refs, UID_TO_PTR(uid)));
00d9ef85
LP
4578
4579 n = c & ~DESTROY_IPC_FLAG;
4580 assert(n > 0);
4581 n--;
4582
4583 if (destroy_now && n == 0) {
010becd9 4584 hashmap_remove(uid_refs, UID_TO_PTR(uid));
00d9ef85
LP
4585
4586 if (c & DESTROY_IPC_FLAG) {
4587 log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.",
4588 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
4589 uid);
4590 (void) _clean_ipc(uid);
4591 }
4592 } else {
4593 c = n | (c & DESTROY_IPC_FLAG);
010becd9 4594 assert_se(hashmap_update(uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0);
00d9ef85
LP
4595 }
4596}
4597
4598void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) {
010becd9 4599 manager_unref_uid_internal(m->uid_refs, uid, destroy_now, clean_ipc_by_uid);
00d9ef85
LP
4600}
4601
4602void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) {
010becd9 4603 manager_unref_uid_internal(m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid);
00d9ef85
LP
4604}
4605
4606static int manager_ref_uid_internal(
00d9ef85
LP
4607 Hashmap **uid_refs,
4608 uid_t uid,
4609 bool clean_ipc) {
4610
4611 uint32_t c, n;
4612 int r;
4613
00d9ef85
LP
4614 assert(uid_refs);
4615 assert(uid_is_valid(uid));
4616
8f41e6b6
ZJS
4617 /* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the
4618 * assumption that uid_t and gid_t are actually defined the same way, with the same validity
4619 * rules. */
00d9ef85
LP
4620
4621 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4622 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4623
4624 if (uid == 0) /* We don't keep track of root, and will never destroy it */
4625 return 0;
4626
4627 r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
4628 if (r < 0)
4629 return r;
4630
4631 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
4632
4633 n = c & ~DESTROY_IPC_FLAG;
4634 n++;
4635
4636 if (n & DESTROY_IPC_FLAG) /* check for overflow */
4637 return -EOVERFLOW;
4638
4639 c = n | (c & DESTROY_IPC_FLAG) | (clean_ipc ? DESTROY_IPC_FLAG : 0);
4640
4641 return hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
4642}
4643
4644int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) {
010becd9 4645 return manager_ref_uid_internal(&m->uid_refs, uid, clean_ipc);
00d9ef85
LP
4646}
4647
4648int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) {
010becd9 4649 return manager_ref_uid_internal(&m->gid_refs, (uid_t) gid, clean_ipc);
00d9ef85
LP
4650}
4651
4652static void manager_vacuum_uid_refs_internal(
010becd9 4653 Hashmap *uid_refs,
00d9ef85
LP
4654 int (*_clean_ipc)(uid_t uid)) {
4655
00d9ef85
LP
4656 void *p, *k;
4657
00d9ef85
LP
4658 assert(_clean_ipc);
4659
010becd9 4660 HASHMAP_FOREACH_KEY(p, k, uid_refs) {
00d9ef85
LP
4661 uint32_t c, n;
4662 uid_t uid;
4663
4664 uid = PTR_TO_UID(k);
4665 c = PTR_TO_UINT32(p);
4666
4667 n = c & ~DESTROY_IPC_FLAG;
4668 if (n > 0)
4669 continue;
4670
4671 if (c & DESTROY_IPC_FLAG) {
4672 log_debug("Found unreferenced %s " UID_FMT " after reload/reexec. Cleaning up.",
4673 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
4674 uid);
4675 (void) _clean_ipc(uid);
4676 }
4677
010becd9 4678 assert_se(hashmap_remove(uid_refs, k) == p);
00d9ef85
LP
4679 }
4680}
4681
06a4eb07 4682static void manager_vacuum_uid_refs(Manager *m) {
010becd9 4683 manager_vacuum_uid_refs_internal(m->uid_refs, clean_ipc_by_uid);
00d9ef85
LP
4684}
4685
06a4eb07 4686static void manager_vacuum_gid_refs(Manager *m) {
010becd9 4687 manager_vacuum_uid_refs_internal(m->gid_refs, clean_ipc_by_gid);
00d9ef85
LP
4688}
4689
06a4eb07
FB
4690static void manager_vacuum(Manager *m) {
4691 assert(m);
4692
4693 /* Release any dynamic users no longer referenced */
4694 dynamic_user_vacuum(m, true);
4695
4696 /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
4697 manager_vacuum_uid_refs(m);
4698 manager_vacuum_gid_refs(m);
4699
4700 /* Release any runtimes no longer referenced */
e76506b7 4701 exec_shared_runtime_vacuum(m);
06a4eb07
FB
4702}
4703
00d9ef85
LP
4704int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
4705 struct buffer {
4706 uid_t uid;
4707 gid_t gid;
4708 char unit_name[UNIT_NAME_MAX+1];
4709 } _packed_ buffer;
4710
4711 Manager *m = userdata;
4712 ssize_t l;
4713 size_t n;
4714 Unit *u;
4715
4716 assert_se(source);
4717 assert_se(m);
4718
8f41e6b6
ZJS
4719 /* Invoked whenever a child process succeeded resolving its user/group to use and sent us the
4720 * resulting UID/GID in a datagram. We parse the datagram here and pass it off to the unit, so that
4721 * it can add a reference to the UID/GID so that it can destroy the UID/GID's IPC objects when the
4722 * reference counter drops to 0. */
00d9ef85
LP
4723
4724 l = recv(fd, &buffer, sizeof(buffer), MSG_DONTWAIT);
4725 if (l < 0) {
8add30a0 4726 if (ERRNO_IS_TRANSIENT(errno))
00d9ef85
LP
4727 return 0;
4728
4729 return log_error_errno(errno, "Failed to read from user lookup fd: %m");
4730 }
4731
4732 if ((size_t) l <= offsetof(struct buffer, unit_name)) {
4733 log_warning("Received too short user lookup message, ignoring.");
4734 return 0;
4735 }
4736
4737 if ((size_t) l > offsetof(struct buffer, unit_name) + UNIT_NAME_MAX) {
4738 log_warning("Received too long user lookup message, ignoring.");
4739 return 0;
4740 }
4741
4742 if (!uid_is_valid(buffer.uid) && !gid_is_valid(buffer.gid)) {
4743 log_warning("Got user lookup message with invalid UID/GID pair, ignoring.");
4744 return 0;
4745 }
4746
4747 n = (size_t) l - offsetof(struct buffer, unit_name);
4748 if (memchr(buffer.unit_name, 0, n)) {
4749 log_warning("Received lookup message with embedded NUL character, ignoring.");
4750 return 0;
4751 }
4752
4753 buffer.unit_name[n] = 0;
4754 u = manager_get_unit(m, buffer.unit_name);
4755 if (!u) {
4756 log_debug("Got user lookup message but unit doesn't exist, ignoring.");
4757 return 0;
4758 }
4759
4760 log_unit_debug(u, "User lookup succeeded: uid=" UID_FMT " gid=" GID_FMT, buffer.uid, buffer.gid);
4761
4762 unit_notify_user_lookup(u, buffer.uid, buffer.gid);
4763 return 0;
4764}
4765
63e8df04 4766static int short_uid_range(const char *path) {
8dcc66ce 4767 _cleanup_(uid_range_freep) UidRange *p = NULL;
63e8df04
LP
4768 int r;
4769
4770 assert(path);
4771
4772 /* Taint systemd if we the UID range assigned to this environment doesn't at least cover 0…65534,
4773 * i.e. from root to nobody. */
4774
8dcc66ce 4775 r = uid_range_load_userns(&p, path);
882321a1
ZJS
4776 if (ERRNO_IS_NEG_NOT_SUPPORTED(r))
4777 return false;
4778 if (r < 0)
63e8df04
LP
4779 return log_debug_errno(r, "Failed to load %s: %m", path);
4780
8dcc66ce 4781 return !uid_range_covers(p, 0, 65535);
63e8df04
LP
4782}
4783
83fe5d8a
ZJS
4784char* manager_taint_string(const Manager *m) {
4785 /* Returns a "taint string", e.g. "local-hwclock:var-run-bad". Only things that are detected at
4786 * runtime should be tagged here. For stuff that is known during compilation, emit a warning in the
63e8df04 4787 * configuration phase. */
198ce932 4788
af6b0ecc
LP
4789 assert(m);
4790
b0d3095f 4791 const char* stage[12] = {};
83fe5d8a 4792 size_t n = 0;
af6b0ecc 4793
31cd2dd9
LB
4794 _cleanup_free_ char *usrbin = NULL;
4795 if (readlink_malloc("/bin", &usrbin) < 0 || !PATH_IN_SET(usrbin, "usr/bin", "/usr/bin"))
4796 stage[n++] = "unmerged-usr";
4797
af6b0ecc 4798 if (access("/proc/cgroups", F_OK) < 0)
83fe5d8a 4799 stage[n++] = "cgroups-missing";
af6b0ecc 4800
82f30632 4801 if (cg_all_unified() == 0)
83fe5d8a 4802 stage[n++] = "cgroupsv1";
82f30632 4803
af6b0ecc 4804 if (clock_is_localtime(NULL) > 0)
83fe5d8a 4805 stage[n++] = "local-hwclock";
af6b0ecc 4806
6bfe9b3b 4807 if (os_release_support_ended(NULL, /* quiet= */ true, NULL) > 0)
4bd03515
ZJS
4808 stage[n++] = "support-ended";
4809
83fe5d8a
ZJS
4810 _cleanup_free_ char *destination = NULL;
4811 if (readlink_malloc("/var/run", &destination) < 0 ||
4812 !PATH_IN_SET(destination, "../run", "/run"))
4813 stage[n++] = "var-run-bad";
af6b0ecc 4814
83fe5d8a
ZJS
4815 _cleanup_free_ char *overflowuid = NULL, *overflowgid = NULL;
4816 if (read_one_line_file("/proc/sys/kernel/overflowuid", &overflowuid) >= 0 &&
4817 !streq(overflowuid, "65534"))
4818 stage[n++] = "overflowuid-not-65534";
4819 if (read_one_line_file("/proc/sys/kernel/overflowgid", &overflowgid) >= 0 &&
4820 !streq(overflowgid, "65534"))
4821 stage[n++] = "overflowgid-not-65534";
90d7464d 4822
83fe5d8a 4823 struct utsname uts;
40efaaed
LP
4824 assert_se(uname(&uts) >= 0);
4825 if (strverscmp_improved(uts.release, KERNEL_BASELINE_VERSION) < 0)
83fe5d8a 4826 stage[n++] = "old-kernel";
40efaaed 4827
63e8df04 4828 if (short_uid_range("/proc/self/uid_map") > 0)
83fe5d8a 4829 stage[n++] = "short-uid-range";
63e8df04 4830 if (short_uid_range("/proc/self/gid_map") > 0)
83fe5d8a 4831 stage[n++] = "short-gid-range";
63e8df04 4832
83fe5d8a 4833 assert(n < ELEMENTSOF(stage) - 1); /* One extra for NULL terminator */
af6b0ecc 4834
83fe5d8a 4835 return strv_join((char**) stage, ":");
af6b0ecc
LP
4836}
4837
adefcf28
LP
4838void manager_ref_console(Manager *m) {
4839 assert(m);
4840
4841 m->n_on_console++;
4842}
4843
4844void manager_unref_console(Manager *m) {
4845
4846 assert(m->n_on_console > 0);
4847 m->n_on_console--;
4848
4849 if (m->n_on_console == 0)
4850 m->no_console_output = false; /* unset no_console_output flag, since the console is definitely free now */
4851}
4852
a6ecbf83
FB
4853void manager_override_log_level(Manager *m, int level) {
4854 _cleanup_free_ char *s = NULL;
4855 assert(m);
4856
4857 if (!m->log_level_overridden) {
4858 m->original_log_level = log_get_max_level();
4859 m->log_level_overridden = true;
4860 }
4861
4862 (void) log_level_to_string_alloc(level, &s);
4863 log_info("Setting log level to %s.", strna(s));
4864
4865 log_set_max_level(level);
4866}
4867
4868void manager_restore_original_log_level(Manager *m) {
4869 _cleanup_free_ char *s = NULL;
4870 assert(m);
4871
4872 if (!m->log_level_overridden)
4873 return;
4874
4875 (void) log_level_to_string_alloc(m->original_log_level, &s);
4876 log_info("Restoring log level to original (%s).", strna(s));
4877
4878 log_set_max_level(m->original_log_level);
4879 m->log_level_overridden = false;
4880}
4881
bda7d78b
FB
4882void manager_override_log_target(Manager *m, LogTarget target) {
4883 assert(m);
4884
4885 if (!m->log_target_overridden) {
4886 m->original_log_target = log_get_target();
4887 m->log_target_overridden = true;
4888 }
4889
4890 log_info("Setting log target to %s.", log_target_to_string(target));
4891 log_set_target(target);
4892}
4893
4894void manager_restore_original_log_target(Manager *m) {
4895 assert(m);
4896
4897 if (!m->log_target_overridden)
4898 return;
4899
4900 log_info("Restoring log target to original %s.", log_target_to_string(m->original_log_target));
4901
4902 log_set_target(m->original_log_target);
4903 m->log_target_overridden = false;
4904}
4905
d4ee7bd8
YW
4906ManagerTimestamp manager_timestamp_initrd_mangle(ManagerTimestamp s) {
4907 if (in_initrd() &&
4908 s >= MANAGER_TIMESTAMP_SECURITY_START &&
4909 s <= MANAGER_TIMESTAMP_UNITS_LOAD_FINISH)
4910 return s - MANAGER_TIMESTAMP_SECURITY_START + MANAGER_TIMESTAMP_INITRD_SECURITY_START;
4911 return s;
4912}
4913
06044356
LP
4914int manager_allocate_idle_pipe(Manager *m) {
4915 int r;
4916
4917 assert(m);
4918
4919 if (m->idle_pipe[0] >= 0) {
4920 assert(m->idle_pipe[1] >= 0);
4921 assert(m->idle_pipe[2] >= 0);
4922 assert(m->idle_pipe[3] >= 0);
4923 return 0;
4924 }
4925
4926 assert(m->idle_pipe[1] < 0);
4927 assert(m->idle_pipe[2] < 0);
4928 assert(m->idle_pipe[3] < 0);
4929
4930 r = RET_NERRNO(pipe2(m->idle_pipe + 0, O_NONBLOCK|O_CLOEXEC));
4931 if (r < 0)
4932 return r;
4933
4934 r = RET_NERRNO(pipe2(m->idle_pipe + 2, O_NONBLOCK|O_CLOEXEC));
4935 if (r < 0) {
4936 safe_close_pair(m->idle_pipe + 0);
4937 return r;
4938 }
4939
4940 return 1;
4941}
4942
ea09a416
LP
4943void unit_defaults_init(UnitDefaults *defaults, RuntimeScope scope) {
4944 assert(defaults);
4945 assert(scope >= 0);
4946 assert(scope < _RUNTIME_SCOPE_MAX);
4947
4948 *defaults = (UnitDefaults) {
4949 .std_output = EXEC_OUTPUT_JOURNAL,
4950 .std_error = EXEC_OUTPUT_INHERIT,
4951 .restart_usec = DEFAULT_RESTART_USEC,
4952 .timeout_start_usec = manager_default_timeout(scope),
4953 .timeout_stop_usec = manager_default_timeout(scope),
4954 .timeout_abort_usec = manager_default_timeout(scope),
4955 .timeout_abort_set = false,
4956 .device_timeout_usec = manager_default_timeout(scope),
4957 .start_limit_interval = DEFAULT_START_LIMIT_INTERVAL,
4958 .start_limit_burst = DEFAULT_START_LIMIT_BURST,
4959
4960 /* On 4.15+ with unified hierarchy, CPU accounting is essentially free as it doesn't require the CPU
4961 * controller to be enabled, so the default is to enable it unless we got told otherwise. */
4962 .cpu_accounting = cpu_accounting_is_cheap(),
4963 .memory_accounting = MEMORY_ACCOUNTING_DEFAULT,
4964 .io_accounting = false,
4965 .blockio_accounting = false,
4966 .tasks_accounting = true,
4967 .ip_accounting = false,
4968
4969 .tasks_max = DEFAULT_TASKS_MAX,
4970 .timer_accuracy_usec = 1 * USEC_PER_MINUTE,
4971
4972 .memory_pressure_watch = CGROUP_PRESSURE_WATCH_AUTO,
4973 .memory_pressure_threshold_usec = MEMORY_PRESSURE_DEFAULT_THRESHOLD_USEC,
4974
4975 .oom_policy = OOM_STOP,
4976 .oom_score_adjust_set = false,
4977 };
4978}
4979
c9e120e0
LP
4980void unit_defaults_done(UnitDefaults *defaults) {
4981 assert(defaults);
4982
4983 defaults->smack_process_label = mfree(defaults->smack_process_label);
4984 rlimit_free_all(defaults->rlimit);
4985}
4986
bb5232b6
LB
4987LogTarget manager_get_executor_log_target(Manager *m) {
4988 assert(m);
4989
4990 /* If journald is not available tell sd-executor to go to kmsg, as it might be starting journald */
4991
4992 if (manager_journal_is_running(m))
4993 return log_get_target();
4994
4995 return LOG_TARGET_KMSG;
4996}
4997
f755e3b7 4998static const char *const manager_state_table[_MANAGER_STATE_MAX] = {
d81afec1 4999 [MANAGER_INITIALIZING] = "initializing",
48d83e33
ZJS
5000 [MANAGER_STARTING] = "starting",
5001 [MANAGER_RUNNING] = "running",
5002 [MANAGER_DEGRADED] = "degraded",
5003 [MANAGER_MAINTENANCE] = "maintenance",
5004 [MANAGER_STOPPING] = "stopping",
f755e3b7
LP
5005};
5006
5007DEFINE_STRING_TABLE_LOOKUP(manager_state, ManagerState);
9f9f0342
LP
5008
5009static const char *const manager_timestamp_table[_MANAGER_TIMESTAMP_MAX] = {
48d83e33
ZJS
5010 [MANAGER_TIMESTAMP_FIRMWARE] = "firmware",
5011 [MANAGER_TIMESTAMP_LOADER] = "loader",
5012 [MANAGER_TIMESTAMP_KERNEL] = "kernel",
5013 [MANAGER_TIMESTAMP_INITRD] = "initrd",
5014 [MANAGER_TIMESTAMP_USERSPACE] = "userspace",
5015 [MANAGER_TIMESTAMP_FINISH] = "finish",
5016 [MANAGER_TIMESTAMP_SECURITY_START] = "security-start",
5017 [MANAGER_TIMESTAMP_SECURITY_FINISH] = "security-finish",
5018 [MANAGER_TIMESTAMP_GENERATORS_START] = "generators-start",
5019 [MANAGER_TIMESTAMP_GENERATORS_FINISH] = "generators-finish",
5020 [MANAGER_TIMESTAMP_UNITS_LOAD_START] = "units-load-start",
5021 [MANAGER_TIMESTAMP_UNITS_LOAD_FINISH] = "units-load-finish",
49fbe940 5022 [MANAGER_TIMESTAMP_UNITS_LOAD] = "units-load",
48d83e33
ZJS
5023 [MANAGER_TIMESTAMP_INITRD_SECURITY_START] = "initrd-security-start",
5024 [MANAGER_TIMESTAMP_INITRD_SECURITY_FINISH] = "initrd-security-finish",
5025 [MANAGER_TIMESTAMP_INITRD_GENERATORS_START] = "initrd-generators-start",
d4ee7bd8 5026 [MANAGER_TIMESTAMP_INITRD_GENERATORS_FINISH] = "initrd-generators-finish",
48d83e33 5027 [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_START] = "initrd-units-load-start",
d4ee7bd8 5028 [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_FINISH] = "initrd-units-load-finish",
9f9f0342
LP
5029};
5030
5031DEFINE_STRING_TABLE_LOOKUP(manager_timestamp, ManagerTimestamp);
afcfaa69
LP
5032
5033static const char* const oom_policy_table[_OOM_POLICY_MAX] = {
5034 [OOM_CONTINUE] = "continue",
48d83e33
ZJS
5035 [OOM_STOP] = "stop",
5036 [OOM_KILL] = "kill",
afcfaa69
LP
5037};
5038
5039DEFINE_STRING_TABLE_LOOKUP(oom_policy, OOMPolicy);