]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | ||
3 | #include <errno.h> | |
4 | #include <fcntl.h> | |
5 | #include <linux/kd.h> | |
6 | #include <sys/epoll.h> | |
7 | #include <sys/inotify.h> | |
8 | #include <sys/ioctl.h> | |
9 | #include <sys/mount.h> | |
10 | #include <sys/reboot.h> | |
11 | #include <sys/timerfd.h> | |
12 | #include <sys/wait.h> | |
13 | #include <unistd.h> | |
14 | ||
15 | #if HAVE_AUDIT | |
16 | #include <libaudit.h> | |
17 | #endif | |
18 | ||
19 | #include "sd-daemon.h" | |
20 | #include "sd-messages.h" | |
21 | #include "sd-path.h" | |
22 | ||
23 | #include "all-units.h" | |
24 | #include "alloc-util.h" | |
25 | #include "audit-fd.h" | |
26 | #include "boot-timestamps.h" | |
27 | #include "build-path.h" | |
28 | #include "bus-common-errors.h" | |
29 | #include "bus-error.h" | |
30 | #include "bus-kernel.h" | |
31 | #include "bus-util.h" | |
32 | #include "clean-ipc.h" | |
33 | #include "clock-util.h" | |
34 | #include "common-signal.h" | |
35 | #include "confidential-virt.h" | |
36 | #include "constants.h" | |
37 | #include "core-varlink.h" | |
38 | #include "creds-util.h" | |
39 | #include "daemon-util.h" | |
40 | #include "dbus-job.h" | |
41 | #include "dbus-manager.h" | |
42 | #include "dbus-unit.h" | |
43 | #include "dbus.h" | |
44 | #include "dirent-util.h" | |
45 | #include "env-util.h" | |
46 | #include "escape.h" | |
47 | #include "event-util.h" | |
48 | #include "exec-util.h" | |
49 | #include "execute.h" | |
50 | #include "exit-status.h" | |
51 | #include "fd-util.h" | |
52 | #include "fileio.h" | |
53 | #include "generator-setup.h" | |
54 | #include "hashmap.h" | |
55 | #include "initrd-util.h" | |
56 | #include "inotify-util.h" | |
57 | #include "install.h" | |
58 | #include "io-util.h" | |
59 | #include "iovec-util.h" | |
60 | #include "label-util.h" | |
61 | #include "load-fragment.h" | |
62 | #include "locale-setup.h" | |
63 | #include "log.h" | |
64 | #include "macro.h" | |
65 | #include "manager.h" | |
66 | #include "manager-dump.h" | |
67 | #include "manager-serialize.h" | |
68 | #include "memory-util.h" | |
69 | #include "mkdir-label.h" | |
70 | #include "mount-util.h" | |
71 | #include "os-util.h" | |
72 | #include "parse-util.h" | |
73 | #include "path-lookup.h" | |
74 | #include "path-util.h" | |
75 | #include "plymouth-util.h" | |
76 | #include "pretty-print.h" | |
77 | #include "process-util.h" | |
78 | #include "psi-util.h" | |
79 | #include "ratelimit.h" | |
80 | #include "rlimit-util.h" | |
81 | #include "rm-rf.h" | |
82 | #include "selinux-util.h" | |
83 | #include "signal-util.h" | |
84 | #include "socket-util.h" | |
85 | #include "special.h" | |
86 | #include "stat-util.h" | |
87 | #include "string-table.h" | |
88 | #include "string-util.h" | |
89 | #include "strv.h" | |
90 | #include "strxcpyx.h" | |
91 | #include "sysctl-util.h" | |
92 | #include "syslog-util.h" | |
93 | #include "taint.h" | |
94 | #include "terminal-util.h" | |
95 | #include "time-util.h" | |
96 | #include "transaction.h" | |
97 | #include "uid-range.h" | |
98 | #include "umask-util.h" | |
99 | #include "unit-name.h" | |
100 | #include "user-util.h" | |
101 | #include "virt.h" | |
102 | #include "watchdog.h" | |
103 | ||
104 | #define NOTIFY_RCVBUF_SIZE (8*1024*1024) | |
105 | #define CGROUPS_AGENT_RCVBUF_SIZE (8*1024*1024) | |
106 | ||
107 | /* Initial delay and the interval for printing status messages about running jobs */ | |
108 | #define JOBS_IN_PROGRESS_WAIT_USEC (2*USEC_PER_SEC) | |
109 | #define JOBS_IN_PROGRESS_QUIET_WAIT_USEC (25*USEC_PER_SEC) | |
110 | #define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3) | |
111 | #define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3 | |
112 | ||
113 | /* If there are more than 1K bus messages queue across our API and direct buses, then let's not add more on top until | |
114 | * the queue gets more empty. */ | |
115 | #define MANAGER_BUS_BUSY_THRESHOLD 1024LU | |
116 | ||
117 | /* How many units and jobs to process of the bus queue before returning to the event loop. */ | |
118 | #define MANAGER_BUS_MESSAGE_BUDGET 100U | |
119 | ||
120 | #define DEFAULT_TASKS_MAX ((CGroupTasksMax) { 15U, 100U }) /* 15% */ | |
121 | ||
122 | static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
123 | static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
124 | static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
125 | static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
126 | static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
127 | static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
128 | static int manager_dispatch_handoff_timestamp_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
129 | static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata); | |
130 | static int manager_dispatch_run_queue(sd_event_source *source, void *userdata); | |
131 | static int manager_dispatch_sigchld(sd_event_source *source, void *userdata); | |
132 | static int manager_dispatch_timezone_change(sd_event_source *source, const struct inotify_event *event, void *userdata); | |
133 | static int manager_run_environment_generators(Manager *m); | |
134 | static int manager_run_generators(Manager *m); | |
135 | static void manager_vacuum(Manager *m); | |
136 | ||
137 | static usec_t manager_watch_jobs_next_time(Manager *m) { | |
138 | usec_t timeout; | |
139 | ||
140 | if (MANAGER_IS_USER(m)) | |
141 | /* Let the user manager without a timeout show status quickly, so the system manager can make | |
142 | * use of it, if it wants to. */ | |
143 | timeout = JOBS_IN_PROGRESS_WAIT_USEC * 2 / 3; | |
144 | else if (show_status_on(m->show_status)) | |
145 | /* When status is on, just use the usual timeout. */ | |
146 | timeout = JOBS_IN_PROGRESS_WAIT_USEC; | |
147 | else | |
148 | timeout = JOBS_IN_PROGRESS_QUIET_WAIT_USEC; | |
149 | ||
150 | return usec_add(now(CLOCK_MONOTONIC), timeout); | |
151 | } | |
152 | ||
153 | static bool manager_is_confirm_spawn_disabled(Manager *m) { | |
154 | assert(m); | |
155 | ||
156 | if (!m->confirm_spawn) | |
157 | return true; | |
158 | ||
159 | return access("/run/systemd/confirm_spawn_disabled", F_OK) >= 0; | |
160 | } | |
161 | ||
162 | static void manager_watch_jobs_in_progress(Manager *m) { | |
163 | usec_t next; | |
164 | int r; | |
165 | ||
166 | assert(m); | |
167 | ||
168 | /* We do not want to show the cylon animation if the user | |
169 | * needs to confirm service executions otherwise confirmation | |
170 | * messages will be screwed by the cylon animation. */ | |
171 | if (!manager_is_confirm_spawn_disabled(m)) | |
172 | return; | |
173 | ||
174 | if (m->jobs_in_progress_event_source) | |
175 | return; | |
176 | ||
177 | next = manager_watch_jobs_next_time(m); | |
178 | r = sd_event_add_time( | |
179 | m->event, | |
180 | &m->jobs_in_progress_event_source, | |
181 | CLOCK_MONOTONIC, | |
182 | next, 0, | |
183 | manager_dispatch_jobs_in_progress, m); | |
184 | if (r < 0) | |
185 | return; | |
186 | ||
187 | (void) sd_event_source_set_description(m->jobs_in_progress_event_source, "manager-jobs-in-progress"); | |
188 | } | |
189 | ||
190 | static void manager_flip_auto_status(Manager *m, bool enable, const char *reason) { | |
191 | assert(m); | |
192 | ||
193 | if (enable) { | |
194 | if (m->show_status == SHOW_STATUS_AUTO) | |
195 | manager_set_show_status(m, SHOW_STATUS_TEMPORARY, reason); | |
196 | } else { | |
197 | if (m->show_status == SHOW_STATUS_TEMPORARY) | |
198 | manager_set_show_status(m, SHOW_STATUS_AUTO, reason); | |
199 | } | |
200 | } | |
201 | ||
202 | static void manager_print_jobs_in_progress(Manager *m) { | |
203 | Job *j; | |
204 | unsigned counter = 0, print_nr; | |
205 | char cylon[6 + CYLON_BUFFER_EXTRA + 1]; | |
206 | unsigned cylon_pos; | |
207 | uint64_t timeout = 0; | |
208 | ||
209 | assert(m); | |
210 | assert(m->n_running_jobs > 0); | |
211 | ||
212 | manager_flip_auto_status(m, true, "delay"); | |
213 | ||
214 | print_nr = (m->jobs_in_progress_iteration / JOBS_IN_PROGRESS_PERIOD_DIVISOR) % m->n_running_jobs; | |
215 | ||
216 | HASHMAP_FOREACH(j, m->jobs) | |
217 | if (j->state == JOB_RUNNING && counter++ == print_nr) | |
218 | break; | |
219 | ||
220 | /* m->n_running_jobs must be consistent with the contents of m->jobs, | |
221 | * so the above loop must have succeeded in finding j. */ | |
222 | assert(counter == print_nr + 1); | |
223 | assert(j); | |
224 | ||
225 | cylon_pos = m->jobs_in_progress_iteration % 14; | |
226 | if (cylon_pos >= 8) | |
227 | cylon_pos = 14 - cylon_pos; | |
228 | draw_cylon(cylon, sizeof(cylon), 6, cylon_pos); | |
229 | ||
230 | m->jobs_in_progress_iteration++; | |
231 | ||
232 | char job_of_n[STRLEN("( of ) ") + DECIMAL_STR_MAX(unsigned)*2] = ""; | |
233 | if (m->n_running_jobs > 1) | |
234 | xsprintf(job_of_n, "(%u of %u) ", counter, m->n_running_jobs); | |
235 | ||
236 | (void) job_get_timeout(j, &timeout); | |
237 | ||
238 | /* We want to use enough information for the user to identify previous lines talking about the same | |
239 | * unit, but keep the message as short as possible. So if 'Starting foo.service' or 'Starting | |
240 | * foo.service - Description' were used, 'foo.service' is enough here. On the other hand, if we used | |
241 | * 'Starting Description' before, then we shall also use 'Description' here. So we pass NULL as the | |
242 | * second argument to unit_status_string(). */ | |
243 | const char *ident = unit_status_string(j->unit, NULL); | |
244 | ||
245 | const char *time = FORMAT_TIMESPAN(now(CLOCK_MONOTONIC) - j->begin_usec, 1*USEC_PER_SEC); | |
246 | const char *limit = timeout > 0 ? FORMAT_TIMESPAN(timeout - j->begin_usec, 1*USEC_PER_SEC) : "no limit"; | |
247 | ||
248 | if (m->status_unit_format == STATUS_UNIT_FORMAT_DESCRIPTION) | |
249 | /* When using 'Description', we effectively don't have enough space to show the nested status | |
250 | * without ellipsization, so let's not even try. */ | |
251 | manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon, | |
252 | "%sA %s job is running for %s (%s / %s)", | |
253 | job_of_n, | |
254 | job_type_to_string(j->type), | |
255 | ident, | |
256 | time, limit); | |
257 | else { | |
258 | const char *status_text = unit_status_text(j->unit); | |
259 | ||
260 | manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon, | |
261 | "%sJob %s/%s running (%s / %s)%s%s", | |
262 | job_of_n, | |
263 | ident, | |
264 | job_type_to_string(j->type), | |
265 | time, limit, | |
266 | status_text ? ": " : "", | |
267 | strempty(status_text)); | |
268 | } | |
269 | ||
270 | (void) sd_notifyf(/* unset_environment= */ false, | |
271 | "STATUS=%sUser job %s/%s running (%s / %s)...", | |
272 | job_of_n, | |
273 | ident, job_type_to_string(j->type), | |
274 | time, limit); | |
275 | m->status_ready = false; | |
276 | } | |
277 | ||
278 | static int have_ask_password(void) { | |
279 | _cleanup_closedir_ DIR *dir = NULL; | |
280 | ||
281 | dir = opendir("/run/systemd/ask-password"); | |
282 | if (!dir) { | |
283 | if (errno == ENOENT) | |
284 | return false; | |
285 | else | |
286 | return -errno; | |
287 | } | |
288 | ||
289 | FOREACH_DIRENT_ALL(de, dir, return -errno) | |
290 | if (startswith(de->d_name, "ask.")) | |
291 | return true; | |
292 | return false; | |
293 | } | |
294 | ||
295 | static int manager_dispatch_ask_password_fd(sd_event_source *source, | |
296 | int fd, uint32_t revents, void *userdata) { | |
297 | Manager *m = ASSERT_PTR(userdata); | |
298 | ||
299 | (void) flush_fd(fd); | |
300 | ||
301 | m->have_ask_password = have_ask_password(); | |
302 | if (m->have_ask_password < 0) | |
303 | /* Log error but continue. Negative have_ask_password | |
304 | * is treated as unknown status. */ | |
305 | log_error_errno(m->have_ask_password, "Failed to list /run/systemd/ask-password: %m"); | |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
310 | static void manager_close_ask_password(Manager *m) { | |
311 | assert(m); | |
312 | ||
313 | m->ask_password_event_source = sd_event_source_disable_unref(m->ask_password_event_source); | |
314 | m->ask_password_inotify_fd = safe_close(m->ask_password_inotify_fd); | |
315 | m->have_ask_password = -EINVAL; | |
316 | } | |
317 | ||
318 | static int manager_check_ask_password(Manager *m) { | |
319 | int r; | |
320 | ||
321 | assert(m); | |
322 | ||
323 | if (!m->ask_password_event_source) { | |
324 | assert(m->ask_password_inotify_fd < 0); | |
325 | ||
326 | (void) mkdir_p_label("/run/systemd/ask-password", 0755); | |
327 | ||
328 | m->ask_password_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC); | |
329 | if (m->ask_password_inotify_fd < 0) | |
330 | return log_error_errno(errno, "Failed to create inotify object: %m"); | |
331 | ||
332 | r = inotify_add_watch_and_warn(m->ask_password_inotify_fd, | |
333 | "/run/systemd/ask-password", | |
334 | IN_CREATE|IN_DELETE|IN_MOVE); | |
335 | if (r < 0) { | |
336 | manager_close_ask_password(m); | |
337 | return r; | |
338 | } | |
339 | ||
340 | r = sd_event_add_io(m->event, &m->ask_password_event_source, | |
341 | m->ask_password_inotify_fd, EPOLLIN, | |
342 | manager_dispatch_ask_password_fd, m); | |
343 | if (r < 0) { | |
344 | log_error_errno(r, "Failed to add event source for /run/systemd/ask-password: %m"); | |
345 | manager_close_ask_password(m); | |
346 | return r; | |
347 | } | |
348 | ||
349 | (void) sd_event_source_set_description(m->ask_password_event_source, "manager-ask-password"); | |
350 | ||
351 | /* Queries might have been added meanwhile... */ | |
352 | manager_dispatch_ask_password_fd(m->ask_password_event_source, | |
353 | m->ask_password_inotify_fd, EPOLLIN, m); | |
354 | } | |
355 | ||
356 | return m->have_ask_password; | |
357 | } | |
358 | ||
359 | static int manager_watch_idle_pipe(Manager *m) { | |
360 | int r; | |
361 | ||
362 | assert(m); | |
363 | ||
364 | if (m->idle_pipe_event_source) | |
365 | return 0; | |
366 | ||
367 | if (m->idle_pipe[2] < 0) | |
368 | return 0; | |
369 | ||
370 | r = sd_event_add_io(m->event, &m->idle_pipe_event_source, m->idle_pipe[2], EPOLLIN, manager_dispatch_idle_pipe_fd, m); | |
371 | if (r < 0) | |
372 | return log_error_errno(r, "Failed to watch idle pipe: %m"); | |
373 | ||
374 | (void) sd_event_source_set_description(m->idle_pipe_event_source, "manager-idle-pipe"); | |
375 | ||
376 | return 0; | |
377 | } | |
378 | ||
379 | static void manager_close_idle_pipe(Manager *m) { | |
380 | assert(m); | |
381 | ||
382 | m->idle_pipe_event_source = sd_event_source_disable_unref(m->idle_pipe_event_source); | |
383 | ||
384 | safe_close_pair(m->idle_pipe); | |
385 | safe_close_pair(m->idle_pipe + 2); | |
386 | } | |
387 | ||
388 | static int manager_setup_time_change(Manager *m) { | |
389 | int r; | |
390 | ||
391 | assert(m); | |
392 | ||
393 | if (MANAGER_IS_TEST_RUN(m)) | |
394 | return 0; | |
395 | ||
396 | m->time_change_event_source = sd_event_source_disable_unref(m->time_change_event_source); | |
397 | ||
398 | r = event_add_time_change(m->event, &m->time_change_event_source, manager_dispatch_time_change_fd, m); | |
399 | if (r < 0) | |
400 | return log_error_errno(r, "Failed to create time change event source: %m"); | |
401 | ||
402 | /* Schedule this slightly earlier than the .timer event sources */ | |
403 | r = sd_event_source_set_priority(m->time_change_event_source, EVENT_PRIORITY_TIME_CHANGE); | |
404 | if (r < 0) | |
405 | return log_error_errno(r, "Failed to set priority of time change event sources: %m"); | |
406 | ||
407 | log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd."); | |
408 | ||
409 | return 0; | |
410 | } | |
411 | ||
412 | static int manager_read_timezone_stat(Manager *m) { | |
413 | struct stat st; | |
414 | bool changed; | |
415 | ||
416 | assert(m); | |
417 | ||
418 | /* Read the current stat() data of /etc/localtime so that we detect changes */ | |
419 | if (lstat("/etc/localtime", &st) < 0) { | |
420 | log_debug_errno(errno, "Failed to stat /etc/localtime, ignoring: %m"); | |
421 | changed = m->etc_localtime_accessible; | |
422 | m->etc_localtime_accessible = false; | |
423 | } else { | |
424 | usec_t k; | |
425 | ||
426 | k = timespec_load(&st.st_mtim); | |
427 | changed = !m->etc_localtime_accessible || k != m->etc_localtime_mtime; | |
428 | ||
429 | m->etc_localtime_mtime = k; | |
430 | m->etc_localtime_accessible = true; | |
431 | } | |
432 | ||
433 | return changed; | |
434 | } | |
435 | ||
436 | static int manager_setup_timezone_change(Manager *m) { | |
437 | _cleanup_(sd_event_source_unrefp) sd_event_source *new_event = NULL; | |
438 | int r; | |
439 | ||
440 | assert(m); | |
441 | ||
442 | if (MANAGER_IS_TEST_RUN(m)) | |
443 | return 0; | |
444 | ||
445 | /* We watch /etc/localtime for three events: change of the link count (which might mean removal from /etc even | |
446 | * though another link might be kept), renames, and file close operations after writing. Note we don't bother | |
447 | * with IN_DELETE_SELF, as that would just report when the inode is removed entirely, i.e. after the link count | |
448 | * went to zero and all fds to it are closed. | |
449 | * | |
450 | * Note that we never follow symlinks here. This is a simplification, but should cover almost all cases | |
451 | * correctly. | |
452 | * | |
453 | * Note that we create the new event source first here, before releasing the old one. This should optimize | |
454 | * behaviour as this way sd-event can reuse the old watch in case the inode didn't change. */ | |
455 | ||
456 | r = sd_event_add_inotify(m->event, &new_event, "/etc/localtime", | |
457 | IN_ATTRIB|IN_MOVE_SELF|IN_CLOSE_WRITE|IN_DONT_FOLLOW, manager_dispatch_timezone_change, m); | |
458 | if (r == -ENOENT) { | |
459 | /* If the file doesn't exist yet, subscribe to /etc instead, and wait until it is created either by | |
460 | * O_CREATE or by rename() */ | |
461 | ||
462 | log_debug_errno(r, "/etc/localtime doesn't exist yet, watching /etc instead."); | |
463 | r = sd_event_add_inotify(m->event, &new_event, "/etc", | |
464 | IN_CREATE|IN_MOVED_TO|IN_ONLYDIR, manager_dispatch_timezone_change, m); | |
465 | } | |
466 | if (r < 0) | |
467 | return log_error_errno(r, "Failed to create timezone change event source: %m"); | |
468 | ||
469 | /* Schedule this slightly earlier than the .timer event sources */ | |
470 | r = sd_event_source_set_priority(new_event, EVENT_PRIORITY_TIME_ZONE); | |
471 | if (r < 0) | |
472 | return log_error_errno(r, "Failed to set priority of timezone change event sources: %m"); | |
473 | ||
474 | sd_event_source_unref(m->timezone_change_event_source); | |
475 | m->timezone_change_event_source = TAKE_PTR(new_event); | |
476 | ||
477 | return 0; | |
478 | } | |
479 | ||
480 | static int enable_special_signals(Manager *m) { | |
481 | _cleanup_close_ int fd = -EBADF; | |
482 | ||
483 | assert(m); | |
484 | ||
485 | if (MANAGER_IS_TEST_RUN(m)) | |
486 | return 0; | |
487 | ||
488 | /* Enable that we get SIGINT on control-alt-del. In containers this will fail with EPERM (older) or | |
489 | * EINVAL (newer), so ignore that. */ | |
490 | if (reboot(RB_DISABLE_CAD) < 0 && !IN_SET(errno, EPERM, EINVAL)) | |
491 | log_warning_errno(errno, "Failed to enable ctrl-alt-del handling, ignoring: %m"); | |
492 | ||
493 | fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC); | |
494 | if (fd < 0) | |
495 | /* Support systems without virtual console (ENOENT) gracefully */ | |
496 | log_full_errno(fd == -ENOENT ? LOG_DEBUG : LOG_WARNING, fd, "Failed to open /dev/tty0, ignoring: %m"); | |
497 | else { | |
498 | /* Enable that we get SIGWINCH on kbrequest */ | |
499 | if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0) | |
500 | log_warning_errno(errno, "Failed to enable kbrequest handling, ignoring: %m"); | |
501 | } | |
502 | ||
503 | return 0; | |
504 | } | |
505 | ||
506 | #define RTSIG_IF_AVAILABLE(signum) (signum <= SIGRTMAX ? signum : -1) | |
507 | ||
508 | static int manager_setup_signals(Manager *m) { | |
509 | struct sigaction sa = { | |
510 | .sa_handler = SIG_DFL, | |
511 | .sa_flags = SA_NOCLDSTOP|SA_RESTART, | |
512 | }; | |
513 | sigset_t mask; | |
514 | int r; | |
515 | ||
516 | assert(m); | |
517 | ||
518 | assert_se(sigaction(SIGCHLD, &sa, NULL) == 0); | |
519 | ||
520 | /* We make liberal use of realtime signals here. On | |
521 | * Linux/glibc we have 30 of them (with the exception of Linux | |
522 | * on hppa, see below), between SIGRTMIN+0 ... SIGRTMIN+30 | |
523 | * (aka SIGRTMAX). */ | |
524 | ||
525 | assert_se(sigemptyset(&mask) == 0); | |
526 | sigset_add_many(&mask, | |
527 | SIGCHLD, /* Child died */ | |
528 | SIGTERM, /* Reexecute daemon */ | |
529 | SIGHUP, /* Reload configuration */ | |
530 | SIGUSR1, /* systemd: reconnect to D-Bus */ | |
531 | SIGUSR2, /* systemd: dump status */ | |
532 | SIGINT, /* Kernel sends us this on control-alt-del */ | |
533 | SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */ | |
534 | SIGPWR, /* Some kernel drivers and upsd send us this on power failure */ | |
535 | ||
536 | SIGRTMIN+0, /* systemd: start default.target */ | |
537 | SIGRTMIN+1, /* systemd: isolate rescue.target */ | |
538 | SIGRTMIN+2, /* systemd: isolate emergency.target */ | |
539 | SIGRTMIN+3, /* systemd: start halt.target */ | |
540 | SIGRTMIN+4, /* systemd: start poweroff.target */ | |
541 | SIGRTMIN+5, /* systemd: start reboot.target */ | |
542 | SIGRTMIN+6, /* systemd: start kexec.target */ | |
543 | SIGRTMIN+7, /* systemd: start soft-reboot.target */ | |
544 | ||
545 | /* ... space for more special targets ... */ | |
546 | ||
547 | SIGRTMIN+13, /* systemd: Immediate halt */ | |
548 | SIGRTMIN+14, /* systemd: Immediate poweroff */ | |
549 | SIGRTMIN+15, /* systemd: Immediate reboot */ | |
550 | SIGRTMIN+16, /* systemd: Immediate kexec */ | |
551 | SIGRTMIN+17, /* systemd: Immediate soft-reboot */ | |
552 | SIGRTMIN+18, /* systemd: control command */ | |
553 | ||
554 | /* ... space ... */ | |
555 | ||
556 | SIGRTMIN+20, /* systemd: enable status messages */ | |
557 | SIGRTMIN+21, /* systemd: disable status messages */ | |
558 | SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */ | |
559 | SIGRTMIN+23, /* systemd: set log level to LOG_INFO */ | |
560 | SIGRTMIN+24, /* systemd: Immediate exit (--user only) */ | |
561 | SIGRTMIN+25, /* systemd: reexecute manager */ | |
562 | ||
563 | /* Apparently Linux on hppa had fewer RT signals until v3.18, | |
564 | * SIGRTMAX was SIGRTMIN+25, and then SIGRTMIN was lowered, | |
565 | * see commit v3.17-7614-g1f25df2eff. | |
566 | * | |
567 | * We cannot unconditionally make use of those signals here, | |
568 | * so let's use a runtime check. Since these commands are | |
569 | * accessible by different means and only really a safety | |
570 | * net, the missing functionality on hppa shouldn't matter. | |
571 | */ | |
572 | ||
573 | RTSIG_IF_AVAILABLE(SIGRTMIN+26), /* systemd: set log target to journal-or-kmsg */ | |
574 | RTSIG_IF_AVAILABLE(SIGRTMIN+27), /* systemd: set log target to console */ | |
575 | RTSIG_IF_AVAILABLE(SIGRTMIN+28), /* systemd: set log target to kmsg */ | |
576 | RTSIG_IF_AVAILABLE(SIGRTMIN+29), /* systemd: set log target to syslog-or-kmsg (obsolete) */ | |
577 | ||
578 | /* ... one free signal here SIGRTMIN+30 ... */ | |
579 | -1); | |
580 | assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0); | |
581 | ||
582 | m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC); | |
583 | if (m->signal_fd < 0) | |
584 | return -errno; | |
585 | ||
586 | r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m); | |
587 | if (r < 0) | |
588 | return r; | |
589 | ||
590 | (void) sd_event_source_set_description(m->signal_event_source, "manager-signal"); | |
591 | ||
592 | /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the | |
593 | * notify processing can still figure out to which process/service a message belongs, before we reap the | |
594 | * process. Also, process this before handling cgroup notifications, so that we always collect child exit | |
595 | * status information before detecting that there's no process in a cgroup. */ | |
596 | r = sd_event_source_set_priority(m->signal_event_source, EVENT_PRIORITY_SIGNALS); | |
597 | if (r < 0) | |
598 | return r; | |
599 | ||
600 | /* Report to supervisor that we now process the above signals. We report this as level "2", to | |
601 | * indicate that we support more than sysvinit's signals (of course, sysvinit never sent this | |
602 | * message, but conceptually it makes sense to consider level "1" to be equivalent to sysvinit's | |
603 | * signal handling). Also, by setting this to "2" people looking for this hopefully won't | |
604 | * misunderstand this as a boolean concept. Signal level 2 shall refer to the signals PID 1 | |
605 | * understands at the time of release of systemd v256, i.e. including basic SIGRTMIN+18 handling for | |
606 | * memory pressure and stuff. When more signals are hooked up (or more SIGRTMIN+18 multiplex | |
607 | * operations added, this level should be increased). */ | |
608 | (void) sd_notify(/* unset_environment= */ false, | |
609 | "X_SYSTEMD_SIGNALS_LEVEL=2"); | |
610 | ||
611 | if (MANAGER_IS_SYSTEM(m)) | |
612 | return enable_special_signals(m); | |
613 | ||
614 | return 0; | |
615 | } | |
616 | ||
617 | static char** sanitize_environment(char **l) { | |
618 | ||
619 | /* Let's remove some environment variables that we need ourselves to communicate with our clients */ | |
620 | strv_env_unset_many( | |
621 | l, | |
622 | "CACHE_DIRECTORY", | |
623 | "CONFIGURATION_DIRECTORY", | |
624 | "CREDENTIALS_DIRECTORY", | |
625 | "EXIT_CODE", | |
626 | "EXIT_STATUS", | |
627 | "INVOCATION_ID", | |
628 | "JOURNAL_STREAM", | |
629 | "LISTEN_FDNAMES", | |
630 | "LISTEN_FDS", | |
631 | "LISTEN_PID", | |
632 | "LOGS_DIRECTORY", | |
633 | "LOG_NAMESPACE", | |
634 | "MAINPID", | |
635 | "MANAGERPID", | |
636 | "MEMORY_PRESSURE_WATCH", | |
637 | "MEMORY_PRESSURE_WRITE", | |
638 | "MONITOR_EXIT_CODE", | |
639 | "MONITOR_EXIT_STATUS", | |
640 | "MONITOR_INVOCATION_ID", | |
641 | "MONITOR_SERVICE_RESULT", | |
642 | "MONITOR_UNIT", | |
643 | "NOTIFY_SOCKET", | |
644 | "PIDFILE", | |
645 | "REMOTE_ADDR", | |
646 | "REMOTE_PORT", | |
647 | "RUNTIME_DIRECTORY", | |
648 | "SERVICE_RESULT", | |
649 | "STATE_DIRECTORY", | |
650 | "SYSTEMD_EXEC_PID", | |
651 | "TRIGGER_PATH", | |
652 | "TRIGGER_TIMER_MONOTONIC_USEC", | |
653 | "TRIGGER_TIMER_REALTIME_USEC", | |
654 | "TRIGGER_UNIT", | |
655 | "WATCHDOG_PID", | |
656 | "WATCHDOG_USEC"); | |
657 | ||
658 | /* Let's order the environment alphabetically, just to make it pretty */ | |
659 | return strv_sort(l); | |
660 | } | |
661 | ||
662 | int manager_default_environment(Manager *m) { | |
663 | assert(m); | |
664 | ||
665 | m->transient_environment = strv_free(m->transient_environment); | |
666 | ||
667 | if (MANAGER_IS_SYSTEM(m)) { | |
668 | /* The system manager always starts with a clean environment for its children. It does not | |
669 | * import the kernel's or the parents' exported variables. | |
670 | * | |
671 | * The initial passed environment is untouched to keep /proc/self/environ valid; it is used | |
672 | * for tagging the init process inside containers. */ | |
673 | char *path = strjoin("PATH=", default_PATH()); | |
674 | if (!path) | |
675 | return log_oom(); | |
676 | ||
677 | if (strv_consume(&m->transient_environment, path) < 0) | |
678 | return log_oom(); | |
679 | ||
680 | /* Import locale variables LC_*= from configuration */ | |
681 | (void) locale_setup(&m->transient_environment); | |
682 | } else { | |
683 | /* The user manager passes its own environment along to its children, except for $PATH and | |
684 | * session envs. */ | |
685 | ||
686 | m->transient_environment = strv_copy(environ); | |
687 | if (!m->transient_environment) | |
688 | return log_oom(); | |
689 | ||
690 | char *path = strjoin("PATH=", default_user_PATH()); | |
691 | if (!path) | |
692 | return log_oom(); | |
693 | ||
694 | if (strv_env_replace_consume(&m->transient_environment, path) < 0) | |
695 | return log_oom(); | |
696 | ||
697 | /* Envvars set for our 'manager' class session are private and should not be propagated | |
698 | * to children. Also it's likely that the graphical session will set these on their own. */ | |
699 | strv_env_unset_many(m->transient_environment, | |
700 | "XDG_SESSION_ID", | |
701 | "XDG_SESSION_CLASS", | |
702 | "XDG_SESSION_TYPE", | |
703 | "XDG_SESSION_DESKTOP", | |
704 | "XDG_SEAT", | |
705 | "XDG_VTNR"); | |
706 | } | |
707 | ||
708 | sanitize_environment(m->transient_environment); | |
709 | return 0; | |
710 | } | |
711 | ||
712 | static int manager_setup_prefix(Manager *m) { | |
713 | struct table_entry { | |
714 | uint64_t type; | |
715 | const char *suffix; | |
716 | }; | |
717 | ||
718 | static const struct table_entry paths_system[_EXEC_DIRECTORY_TYPE_MAX] = { | |
719 | [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_SYSTEM_RUNTIME, NULL }, | |
720 | [EXEC_DIRECTORY_STATE] = { SD_PATH_SYSTEM_STATE_PRIVATE, NULL }, | |
721 | [EXEC_DIRECTORY_CACHE] = { SD_PATH_SYSTEM_STATE_CACHE, NULL }, | |
722 | [EXEC_DIRECTORY_LOGS] = { SD_PATH_SYSTEM_STATE_LOGS, NULL }, | |
723 | [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_SYSTEM_CONFIGURATION, NULL }, | |
724 | }; | |
725 | ||
726 | static const struct table_entry paths_user[_EXEC_DIRECTORY_TYPE_MAX] = { | |
727 | [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_USER_RUNTIME, NULL }, | |
728 | [EXEC_DIRECTORY_STATE] = { SD_PATH_USER_STATE_PRIVATE, NULL }, | |
729 | [EXEC_DIRECTORY_CACHE] = { SD_PATH_USER_STATE_CACHE, NULL }, | |
730 | [EXEC_DIRECTORY_LOGS] = { SD_PATH_USER_STATE_PRIVATE, "log" }, | |
731 | [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_USER_CONFIGURATION, NULL }, | |
732 | }; | |
733 | ||
734 | assert(m); | |
735 | ||
736 | const struct table_entry *p = MANAGER_IS_SYSTEM(m) ? paths_system : paths_user; | |
737 | int r; | |
738 | ||
739 | for (ExecDirectoryType i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) { | |
740 | r = sd_path_lookup(p[i].type, p[i].suffix, &m->prefix[i]); | |
741 | if (r < 0) | |
742 | return log_warning_errno(r, "Failed to lookup %s path: %m", | |
743 | exec_directory_type_to_string(i)); | |
744 | } | |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
749 | static void manager_free_unit_name_maps(Manager *m) { | |
750 | m->unit_id_map = hashmap_free(m->unit_id_map); | |
751 | m->unit_name_map = hashmap_free(m->unit_name_map); | |
752 | m->unit_path_cache = set_free(m->unit_path_cache); | |
753 | m->unit_cache_timestamp_hash = 0; | |
754 | } | |
755 | ||
756 | static int manager_setup_run_queue(Manager *m) { | |
757 | int r; | |
758 | ||
759 | assert(m); | |
760 | assert(!m->run_queue_event_source); | |
761 | ||
762 | r = sd_event_add_defer(m->event, &m->run_queue_event_source, manager_dispatch_run_queue, m); | |
763 | if (r < 0) | |
764 | return r; | |
765 | ||
766 | r = sd_event_source_set_priority(m->run_queue_event_source, EVENT_PRIORITY_RUN_QUEUE); | |
767 | if (r < 0) | |
768 | return r; | |
769 | ||
770 | r = sd_event_source_set_enabled(m->run_queue_event_source, SD_EVENT_OFF); | |
771 | if (r < 0) | |
772 | return r; | |
773 | ||
774 | (void) sd_event_source_set_description(m->run_queue_event_source, "manager-run-queue"); | |
775 | ||
776 | return 0; | |
777 | } | |
778 | ||
779 | static int manager_setup_sigchld_event_source(Manager *m) { | |
780 | int r; | |
781 | ||
782 | assert(m); | |
783 | assert(!m->sigchld_event_source); | |
784 | ||
785 | r = sd_event_add_defer(m->event, &m->sigchld_event_source, manager_dispatch_sigchld, m); | |
786 | if (r < 0) | |
787 | return r; | |
788 | ||
789 | r = sd_event_source_set_priority(m->sigchld_event_source, EVENT_PRIORITY_SIGCHLD); | |
790 | if (r < 0) | |
791 | return r; | |
792 | ||
793 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF); | |
794 | if (r < 0) | |
795 | return r; | |
796 | ||
797 | (void) sd_event_source_set_description(m->sigchld_event_source, "manager-sigchld"); | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
802 | int manager_setup_memory_pressure_event_source(Manager *m) { | |
803 | int r; | |
804 | ||
805 | assert(m); | |
806 | ||
807 | m->memory_pressure_event_source = sd_event_source_disable_unref(m->memory_pressure_event_source); | |
808 | ||
809 | r = sd_event_add_memory_pressure(m->event, &m->memory_pressure_event_source, NULL, NULL); | |
810 | if (r < 0) | |
811 | log_full_errno(ERRNO_IS_NOT_SUPPORTED(r) || ERRNO_IS_PRIVILEGE(r) || (r == -EHOSTDOWN) ? LOG_DEBUG : LOG_NOTICE, r, | |
812 | "Failed to establish memory pressure event source, ignoring: %m"); | |
813 | else if (m->defaults.memory_pressure_threshold_usec != USEC_INFINITY) { | |
814 | ||
815 | /* If there's a default memory pressure threshold set, also apply it to the service manager itself */ | |
816 | r = sd_event_source_set_memory_pressure_period( | |
817 | m->memory_pressure_event_source, | |
818 | m->defaults.memory_pressure_threshold_usec, | |
819 | MEMORY_PRESSURE_DEFAULT_WINDOW_USEC); | |
820 | if (r < 0) | |
821 | log_warning_errno(r, "Failed to adjust memory pressure threshold, ignoring: %m"); | |
822 | } | |
823 | ||
824 | return 0; | |
825 | } | |
826 | ||
827 | static int manager_find_credentials_dirs(Manager *m) { | |
828 | const char *e; | |
829 | int r; | |
830 | ||
831 | assert(m); | |
832 | ||
833 | r = get_credentials_dir(&e); | |
834 | if (r < 0) { | |
835 | if (r != -ENXIO) | |
836 | log_debug_errno(r, "Failed to determine credentials directory, ignoring: %m"); | |
837 | } else { | |
838 | m->received_credentials_directory = strdup(e); | |
839 | if (!m->received_credentials_directory) | |
840 | return -ENOMEM; | |
841 | } | |
842 | ||
843 | r = get_encrypted_credentials_dir(&e); | |
844 | if (r < 0) { | |
845 | if (r != -ENXIO) | |
846 | log_debug_errno(r, "Failed to determine encrypted credentials directory, ignoring: %m"); | |
847 | } else { | |
848 | m->received_encrypted_credentials_directory = strdup(e); | |
849 | if (!m->received_encrypted_credentials_directory) | |
850 | return -ENOMEM; | |
851 | } | |
852 | ||
853 | return 0; | |
854 | } | |
855 | ||
856 | void manager_set_switching_root(Manager *m, bool switching_root) { | |
857 | assert(m); | |
858 | ||
859 | m->switching_root = MANAGER_IS_SYSTEM(m) && switching_root; | |
860 | } | |
861 | ||
862 | double manager_get_progress(Manager *m) { | |
863 | assert(m); | |
864 | ||
865 | if (MANAGER_IS_FINISHED(m) || m->n_installed_jobs == 0) | |
866 | return 1.0; | |
867 | ||
868 | return 1.0 - ((double) hashmap_size(m->jobs) / (double) m->n_installed_jobs); | |
869 | } | |
870 | ||
871 | static int compare_job_priority(const void *a, const void *b) { | |
872 | const Job *x = a, *y = b; | |
873 | ||
874 | return unit_compare_priority(x->unit, y->unit); | |
875 | } | |
876 | ||
877 | int manager_new(RuntimeScope runtime_scope, ManagerTestRunFlags test_run_flags, Manager **ret) { | |
878 | _cleanup_(manager_freep) Manager *m = NULL; | |
879 | int r; | |
880 | ||
881 | assert(IN_SET(runtime_scope, RUNTIME_SCOPE_SYSTEM, RUNTIME_SCOPE_USER)); | |
882 | assert(ret); | |
883 | ||
884 | m = new(Manager, 1); | |
885 | if (!m) | |
886 | return -ENOMEM; | |
887 | ||
888 | *m = (Manager) { | |
889 | .runtime_scope = runtime_scope, | |
890 | .objective = _MANAGER_OBJECTIVE_INVALID, | |
891 | .previous_objective = _MANAGER_OBJECTIVE_INVALID, | |
892 | ||
893 | .status_unit_format = STATUS_UNIT_FORMAT_DEFAULT, | |
894 | ||
895 | .original_log_level = -1, | |
896 | .original_log_target = _LOG_TARGET_INVALID, | |
897 | ||
898 | .watchdog_overridden[WATCHDOG_RUNTIME] = USEC_INFINITY, | |
899 | .watchdog_overridden[WATCHDOG_REBOOT] = USEC_INFINITY, | |
900 | .watchdog_overridden[WATCHDOG_KEXEC] = USEC_INFINITY, | |
901 | .watchdog_overridden[WATCHDOG_PRETIMEOUT] = USEC_INFINITY, | |
902 | ||
903 | .show_status_overridden = _SHOW_STATUS_INVALID, | |
904 | ||
905 | .notify_fd = -EBADF, | |
906 | .cgroups_agent_fd = -EBADF, | |
907 | .signal_fd = -EBADF, | |
908 | .user_lookup_fds = EBADF_PAIR, | |
909 | .handoff_timestamp_fds = EBADF_PAIR, | |
910 | .private_listen_fd = -EBADF, | |
911 | .dev_autofs_fd = -EBADF, | |
912 | .cgroup_inotify_fd = -EBADF, | |
913 | .pin_cgroupfs_fd = -EBADF, | |
914 | .ask_password_inotify_fd = -EBADF, | |
915 | .idle_pipe = { -EBADF, -EBADF, -EBADF, -EBADF}, | |
916 | ||
917 | /* start as id #1, so that we can leave #0 around as "null-like" value */ | |
918 | .current_job_id = 1, | |
919 | ||
920 | .have_ask_password = -EINVAL, /* we don't know */ | |
921 | .first_boot = -1, | |
922 | .test_run_flags = test_run_flags, | |
923 | ||
924 | .dump_ratelimit = (const RateLimit) { .interval = 10 * USEC_PER_MINUTE, .burst = 10 }, | |
925 | ||
926 | .executor_fd = -EBADF, | |
927 | }; | |
928 | ||
929 | unit_defaults_init(&m->defaults, runtime_scope); | |
930 | ||
931 | #if ENABLE_EFI | |
932 | if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) | |
933 | boot_timestamps(m->timestamps + MANAGER_TIMESTAMP_USERSPACE, | |
934 | m->timestamps + MANAGER_TIMESTAMP_FIRMWARE, | |
935 | m->timestamps + MANAGER_TIMESTAMP_LOADER); | |
936 | #endif | |
937 | ||
938 | /* Prepare log fields we can use for structured logging */ | |
939 | if (MANAGER_IS_SYSTEM(m)) { | |
940 | m->unit_log_field = "UNIT="; | |
941 | m->unit_log_format_string = "UNIT=%s"; | |
942 | ||
943 | m->invocation_log_field = "INVOCATION_ID="; | |
944 | m->invocation_log_format_string = "INVOCATION_ID=%s"; | |
945 | } else { | |
946 | m->unit_log_field = "USER_UNIT="; | |
947 | m->unit_log_format_string = "USER_UNIT=%s"; | |
948 | ||
949 | m->invocation_log_field = "USER_INVOCATION_ID="; | |
950 | m->invocation_log_format_string = "USER_INVOCATION_ID=%s"; | |
951 | } | |
952 | ||
953 | /* Reboot immediately if the user hits C-A-D more often than 7x per 2s */ | |
954 | m->ctrl_alt_del_ratelimit = (const RateLimit) { .interval = 2 * USEC_PER_SEC, .burst = 7 }; | |
955 | ||
956 | r = manager_default_environment(m); | |
957 | if (r < 0) | |
958 | return r; | |
959 | ||
960 | r = hashmap_ensure_allocated(&m->units, &string_hash_ops); | |
961 | if (r < 0) | |
962 | return r; | |
963 | ||
964 | r = hashmap_ensure_allocated(&m->cgroup_unit, &path_hash_ops); | |
965 | if (r < 0) | |
966 | return r; | |
967 | ||
968 | r = hashmap_ensure_allocated(&m->watch_bus, &string_hash_ops); | |
969 | if (r < 0) | |
970 | return r; | |
971 | ||
972 | r = prioq_ensure_allocated(&m->run_queue, compare_job_priority); | |
973 | if (r < 0) | |
974 | return r; | |
975 | ||
976 | r = manager_setup_prefix(m); | |
977 | if (r < 0) | |
978 | return r; | |
979 | ||
980 | r = manager_find_credentials_dirs(m); | |
981 | if (r < 0) | |
982 | return r; | |
983 | ||
984 | r = sd_event_default(&m->event); | |
985 | if (r < 0) | |
986 | return r; | |
987 | ||
988 | r = manager_setup_run_queue(m); | |
989 | if (r < 0) | |
990 | return r; | |
991 | ||
992 | if (FLAGS_SET(test_run_flags, MANAGER_TEST_RUN_MINIMAL)) { | |
993 | m->cgroup_root = strdup(""); | |
994 | if (!m->cgroup_root) | |
995 | return -ENOMEM; | |
996 | } else { | |
997 | r = manager_setup_signals(m); | |
998 | if (r < 0) | |
999 | return r; | |
1000 | ||
1001 | r = manager_setup_cgroup(m); | |
1002 | if (r < 0) | |
1003 | return r; | |
1004 | ||
1005 | r = manager_setup_time_change(m); | |
1006 | if (r < 0) | |
1007 | return r; | |
1008 | ||
1009 | r = manager_read_timezone_stat(m); | |
1010 | if (r < 0) | |
1011 | return r; | |
1012 | ||
1013 | (void) manager_setup_timezone_change(m); | |
1014 | ||
1015 | r = manager_setup_sigchld_event_source(m); | |
1016 | if (r < 0) | |
1017 | return r; | |
1018 | ||
1019 | r = manager_setup_memory_pressure_event_source(m); | |
1020 | if (r < 0) | |
1021 | return r; | |
1022 | ||
1023 | #if HAVE_LIBBPF | |
1024 | if (MANAGER_IS_SYSTEM(m) && bpf_restrict_fs_supported(/* initialize = */ true)) { | |
1025 | r = bpf_restrict_fs_setup(m); | |
1026 | if (r < 0) | |
1027 | log_warning_errno(r, "Failed to setup LSM BPF, ignoring: %m"); | |
1028 | } | |
1029 | #endif | |
1030 | } | |
1031 | ||
1032 | if (test_run_flags == 0) { | |
1033 | if (MANAGER_IS_SYSTEM(m)) | |
1034 | r = mkdir_label("/run/systemd/units", 0755); | |
1035 | else { | |
1036 | _cleanup_free_ char *units_path = NULL; | |
1037 | r = xdg_user_runtime_dir(&units_path, "/systemd/units"); | |
1038 | if (r < 0) | |
1039 | return r; | |
1040 | r = mkdir_p_label(units_path, 0755); | |
1041 | } | |
1042 | ||
1043 | if (r < 0 && r != -EEXIST) | |
1044 | return r; | |
1045 | } | |
1046 | ||
1047 | if (!FLAGS_SET(test_run_flags, MANAGER_TEST_DONT_OPEN_EXECUTOR)) { | |
1048 | m->executor_fd = pin_callout_binary(SYSTEMD_EXECUTOR_BINARY_PATH); | |
1049 | if (m->executor_fd < 0) | |
1050 | return log_debug_errno(m->executor_fd, "Failed to pin executor binary: %m"); | |
1051 | ||
1052 | _cleanup_free_ char *executor_path = NULL; | |
1053 | r = fd_get_path(m->executor_fd, &executor_path); | |
1054 | if (r < 0) | |
1055 | return r; | |
1056 | ||
1057 | log_debug("Using systemd-executor binary from '%s'.", executor_path); | |
1058 | } | |
1059 | ||
1060 | /* Note that we do not set up the notify fd here. We do that after deserialization, | |
1061 | * since they might have gotten serialized across the reexec. */ | |
1062 | ||
1063 | *ret = TAKE_PTR(m); | |
1064 | ||
1065 | return 0; | |
1066 | } | |
1067 | ||
1068 | static int manager_setup_notify(Manager *m) { | |
1069 | int r; | |
1070 | ||
1071 | if (MANAGER_IS_TEST_RUN(m)) | |
1072 | return 0; | |
1073 | ||
1074 | if (m->notify_fd < 0) { | |
1075 | _cleanup_close_ int fd = -EBADF; | |
1076 | union sockaddr_union sa; | |
1077 | socklen_t sa_len; | |
1078 | ||
1079 | /* First free all secondary fields */ | |
1080 | m->notify_socket = mfree(m->notify_socket); | |
1081 | m->notify_event_source = sd_event_source_disable_unref(m->notify_event_source); | |
1082 | ||
1083 | fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0); | |
1084 | if (fd < 0) | |
1085 | return log_error_errno(errno, "Failed to allocate notification socket: %m"); | |
1086 | ||
1087 | fd_increase_rxbuf(fd, NOTIFY_RCVBUF_SIZE); | |
1088 | ||
1089 | m->notify_socket = path_join(m->prefix[EXEC_DIRECTORY_RUNTIME], "systemd/notify"); | |
1090 | if (!m->notify_socket) | |
1091 | return log_oom(); | |
1092 | ||
1093 | r = sockaddr_un_set_path(&sa.un, m->notify_socket); | |
1094 | if (r < 0) | |
1095 | return log_error_errno(r, "Notify socket '%s' not valid for AF_UNIX socket address, refusing.", | |
1096 | m->notify_socket); | |
1097 | sa_len = r; | |
1098 | ||
1099 | (void) mkdir_parents_label(m->notify_socket, 0755); | |
1100 | (void) sockaddr_un_unlink(&sa.un); | |
1101 | ||
1102 | r = mac_selinux_bind(fd, &sa.sa, sa_len); | |
1103 | if (r < 0) | |
1104 | return log_error_errno(r, "bind(%s) failed: %m", m->notify_socket); | |
1105 | ||
1106 | r = setsockopt_int(fd, SOL_SOCKET, SO_PASSCRED, true); | |
1107 | if (r < 0) | |
1108 | return log_error_errno(r, "SO_PASSCRED failed: %m"); | |
1109 | ||
1110 | m->notify_fd = TAKE_FD(fd); | |
1111 | ||
1112 | log_debug("Using notification socket %s", m->notify_socket); | |
1113 | } | |
1114 | ||
1115 | if (!m->notify_event_source) { | |
1116 | r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLIN, manager_dispatch_notify_fd, m); | |
1117 | if (r < 0) | |
1118 | return log_error_errno(r, "Failed to allocate notify event source: %m"); | |
1119 | ||
1120 | /* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which | |
1121 | * service an exit message belongs. */ | |
1122 | r = sd_event_source_set_priority(m->notify_event_source, EVENT_PRIORITY_NOTIFY); | |
1123 | if (r < 0) | |
1124 | return log_error_errno(r, "Failed to set priority of notify event source: %m"); | |
1125 | ||
1126 | (void) sd_event_source_set_description(m->notify_event_source, "manager-notify"); | |
1127 | } | |
1128 | ||
1129 | return 0; | |
1130 | } | |
1131 | ||
1132 | static int manager_setup_cgroups_agent(Manager *m) { | |
1133 | ||
1134 | static const union sockaddr_union sa = { | |
1135 | .un.sun_family = AF_UNIX, | |
1136 | .un.sun_path = "/run/systemd/cgroups-agent", | |
1137 | }; | |
1138 | int r; | |
1139 | ||
1140 | /* This creates a listening socket we receive cgroups agent messages on. We do not use D-Bus for delivering | |
1141 | * these messages from the cgroups agent binary to PID 1, as the cgroups agent binary is very short-living, and | |
1142 | * each instance of it needs a new D-Bus connection. Since D-Bus connections are SOCK_STREAM/AF_UNIX, on | |
1143 | * overloaded systems the backlog of the D-Bus socket becomes relevant, as not more than the configured number | |
1144 | * of D-Bus connections may be queued until the kernel will start dropping further incoming connections, | |
1145 | * possibly resulting in lost cgroups agent messages. To avoid this, we'll use a private SOCK_DGRAM/AF_UNIX | |
1146 | * socket, where no backlog is relevant as communication may take place without an actual connect() cycle, and | |
1147 | * we thus won't lose messages. | |
1148 | * | |
1149 | * Note that PID 1 will forward the agent message to system bus, so that the user systemd instance may listen | |
1150 | * to it. The system instance hence listens on this special socket, but the user instances listen on the system | |
1151 | * bus for these messages. */ | |
1152 | ||
1153 | if (MANAGER_IS_TEST_RUN(m)) | |
1154 | return 0; | |
1155 | ||
1156 | if (!MANAGER_IS_SYSTEM(m)) | |
1157 | return 0; | |
1158 | ||
1159 | r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER); | |
1160 | if (r < 0) | |
1161 | return log_error_errno(r, "Failed to determine whether unified cgroups hierarchy is used: %m"); | |
1162 | if (r > 0) /* We don't need this anymore on the unified hierarchy */ | |
1163 | return 0; | |
1164 | ||
1165 | if (m->cgroups_agent_fd < 0) { | |
1166 | _cleanup_close_ int fd = -EBADF; | |
1167 | ||
1168 | /* First free all secondary fields */ | |
1169 | m->cgroups_agent_event_source = sd_event_source_disable_unref(m->cgroups_agent_event_source); | |
1170 | ||
1171 | fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0); | |
1172 | if (fd < 0) | |
1173 | return log_error_errno(errno, "Failed to allocate cgroups agent socket: %m"); | |
1174 | ||
1175 | fd_increase_rxbuf(fd, CGROUPS_AGENT_RCVBUF_SIZE); | |
1176 | ||
1177 | (void) sockaddr_un_unlink(&sa.un); | |
1178 | ||
1179 | /* Only allow root to connect to this socket */ | |
1180 | WITH_UMASK(0077) | |
1181 | r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un)); | |
1182 | if (r < 0) | |
1183 | return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path); | |
1184 | ||
1185 | m->cgroups_agent_fd = TAKE_FD(fd); | |
1186 | } | |
1187 | ||
1188 | if (!m->cgroups_agent_event_source) { | |
1189 | r = sd_event_add_io(m->event, &m->cgroups_agent_event_source, m->cgroups_agent_fd, EPOLLIN, manager_dispatch_cgroups_agent_fd, m); | |
1190 | if (r < 0) | |
1191 | return log_error_errno(r, "Failed to allocate cgroups agent event source: %m"); | |
1192 | ||
1193 | /* Process cgroups notifications early. Note that when the agent notification is received | |
1194 | * we'll just enqueue the unit in the cgroup empty queue, hence pick a high priority than | |
1195 | * that. Also see handling of cgroup inotify for the unified cgroup stuff. */ | |
1196 | r = sd_event_source_set_priority(m->cgroups_agent_event_source, EVENT_PRIORITY_CGROUP_AGENT); | |
1197 | if (r < 0) | |
1198 | return log_error_errno(r, "Failed to set priority of cgroups agent event source: %m"); | |
1199 | ||
1200 | (void) sd_event_source_set_description(m->cgroups_agent_event_source, "manager-cgroups-agent"); | |
1201 | } | |
1202 | ||
1203 | return 0; | |
1204 | } | |
1205 | ||
1206 | static int manager_setup_user_lookup_fd(Manager *m) { | |
1207 | int r; | |
1208 | ||
1209 | assert(m); | |
1210 | ||
1211 | /* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID | |
1212 | * 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation, | |
1213 | * and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked | |
1214 | * off processes right before executing the binaries to start. In order to be able to clean up any IPC objects | |
1215 | * created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes, | |
1216 | * hence we establish this communication channel so that forked off processes can pass their UID/GID | |
1217 | * information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple | |
1218 | * datagram, along with their unit name, so that we can share one communication socket pair among all units for | |
1219 | * this purpose. | |
1220 | * | |
1221 | * You might wonder why we need a communication channel for this that is independent of the usual notification | |
1222 | * socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET | |
1223 | * channel is only accepted if it originates from the right unit and if reception was enabled for it. The user | |
1224 | * lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available. | |
1225 | * | |
1226 | * Note that this function is called under two circumstances: when we first initialize (in which case we | |
1227 | * allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload | |
1228 | * (in which case the socket pair already exists but we still need to allocate the event source for it). */ | |
1229 | ||
1230 | if (m->user_lookup_fds[0] < 0) { | |
1231 | ||
1232 | /* Free all secondary fields */ | |
1233 | safe_close_pair(m->user_lookup_fds); | |
1234 | m->user_lookup_event_source = sd_event_source_disable_unref(m->user_lookup_event_source); | |
1235 | ||
1236 | if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0) | |
1237 | return log_error_errno(errno, "Failed to allocate user lookup socket: %m"); | |
1238 | ||
1239 | (void) fd_increase_rxbuf(m->user_lookup_fds[0], NOTIFY_RCVBUF_SIZE); | |
1240 | } | |
1241 | ||
1242 | if (!m->user_lookup_event_source) { | |
1243 | r = sd_event_add_io(m->event, &m->user_lookup_event_source, m->user_lookup_fds[0], EPOLLIN, manager_dispatch_user_lookup_fd, m); | |
1244 | if (r < 0) | |
1245 | return log_error_errno(r, "Failed to allocate user lookup event source: %m"); | |
1246 | ||
1247 | /* Process even earlier than the notify event source, so that we always know first about valid UID/GID | |
1248 | * resolutions */ | |
1249 | r = sd_event_source_set_priority(m->user_lookup_event_source, EVENT_PRIORITY_USER_LOOKUP); | |
1250 | if (r < 0) | |
1251 | return log_error_errno(r, "Failed to set priority of user lookup event source: %m"); | |
1252 | ||
1253 | (void) sd_event_source_set_description(m->user_lookup_event_source, "user-lookup"); | |
1254 | } | |
1255 | ||
1256 | return 0; | |
1257 | } | |
1258 | ||
1259 | static int manager_setup_handoff_timestamp_fd(Manager *m) { | |
1260 | int r; | |
1261 | ||
1262 | assert(m); | |
1263 | ||
1264 | /* Set up the socket pair used for passing timestamps back when the executor processes we fork | |
1265 | * off invokes execve(), i.e. when we hand off control to our payload processes. */ | |
1266 | ||
1267 | if (m->handoff_timestamp_fds[0] < 0) { | |
1268 | m->handoff_timestamp_event_source = sd_event_source_disable_unref(m->handoff_timestamp_event_source); | |
1269 | safe_close_pair(m->handoff_timestamp_fds); | |
1270 | ||
1271 | if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->handoff_timestamp_fds) < 0) | |
1272 | return log_error_errno(errno, "Failed to allocate handoff timestamp socket: %m"); | |
1273 | ||
1274 | /* Make sure children never have to block */ | |
1275 | (void) fd_increase_rxbuf(m->handoff_timestamp_fds[0], NOTIFY_RCVBUF_SIZE); | |
1276 | ||
1277 | r = setsockopt_int(m->handoff_timestamp_fds[0], SOL_SOCKET, SO_PASSCRED, true); | |
1278 | if (r < 0) | |
1279 | return log_error_errno(r, "SO_PASSCRED failed: %m"); | |
1280 | ||
1281 | /* Mark the receiving socket as O_NONBLOCK (but leave sending side as-is) */ | |
1282 | r = fd_nonblock(m->handoff_timestamp_fds[0], true); | |
1283 | if (r < 0) | |
1284 | return log_error_errno(r, "Failed to make handoff timestamp socket O_NONBLOCK: %m"); | |
1285 | } | |
1286 | ||
1287 | if (!m->handoff_timestamp_event_source) { | |
1288 | r = sd_event_add_io(m->event, &m->handoff_timestamp_event_source, m->handoff_timestamp_fds[0], EPOLLIN, manager_dispatch_handoff_timestamp_fd, m); | |
1289 | if (r < 0) | |
1290 | return log_error_errno(r, "Failed to allocate handoff timestamp event source: %m"); | |
1291 | ||
1292 | r = sd_event_source_set_priority(m->handoff_timestamp_event_source, EVENT_PRIORITY_HANDOFF_TIMESTAMP); | |
1293 | if (r < 0) | |
1294 | return log_error_errno(r, "Failed to set priority of handoff timestamp event source: %m"); | |
1295 | ||
1296 | (void) sd_event_source_set_description(m->handoff_timestamp_event_source, "handoff-timestamp"); | |
1297 | } | |
1298 | ||
1299 | return 0; | |
1300 | } | |
1301 | ||
1302 | static unsigned manager_dispatch_cleanup_queue(Manager *m) { | |
1303 | Unit *u; | |
1304 | unsigned n = 0; | |
1305 | ||
1306 | assert(m); | |
1307 | ||
1308 | while ((u = m->cleanup_queue)) { | |
1309 | assert(u->in_cleanup_queue); | |
1310 | ||
1311 | unit_free(u); | |
1312 | n++; | |
1313 | } | |
1314 | ||
1315 | return n; | |
1316 | } | |
1317 | ||
1318 | static unsigned manager_dispatch_release_resources_queue(Manager *m) { | |
1319 | unsigned n = 0; | |
1320 | Unit *u; | |
1321 | ||
1322 | assert(m); | |
1323 | ||
1324 | while ((u = LIST_POP(release_resources_queue, m->release_resources_queue))) { | |
1325 | assert(u->in_release_resources_queue); | |
1326 | u->in_release_resources_queue = false; | |
1327 | ||
1328 | n++; | |
1329 | ||
1330 | unit_release_resources(u); | |
1331 | } | |
1332 | ||
1333 | return n; | |
1334 | } | |
1335 | ||
1336 | enum { | |
1337 | GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */ | |
1338 | GC_OFFSET_UNSURE, /* No clue */ | |
1339 | GC_OFFSET_GOOD, /* We still need this unit */ | |
1340 | GC_OFFSET_BAD, /* We don't need this unit anymore */ | |
1341 | _GC_OFFSET_MAX | |
1342 | }; | |
1343 | ||
1344 | static void unit_gc_mark_good(Unit *u, unsigned gc_marker) { | |
1345 | Unit *other; | |
1346 | ||
1347 | u->gc_marker = gc_marker + GC_OFFSET_GOOD; | |
1348 | ||
1349 | /* Recursively mark referenced units as GOOD as well */ | |
1350 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCES) | |
1351 | if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE) | |
1352 | unit_gc_mark_good(other, gc_marker); | |
1353 | } | |
1354 | ||
1355 | static void unit_gc_sweep(Unit *u, unsigned gc_marker) { | |
1356 | Unit *other; | |
1357 | bool is_bad; | |
1358 | ||
1359 | assert(u); | |
1360 | ||
1361 | if (IN_SET(u->gc_marker - gc_marker, | |
1362 | GC_OFFSET_GOOD, GC_OFFSET_BAD, GC_OFFSET_UNSURE, GC_OFFSET_IN_PATH)) | |
1363 | return; | |
1364 | ||
1365 | if (u->in_cleanup_queue) | |
1366 | goto bad; | |
1367 | ||
1368 | if (!unit_may_gc(u)) | |
1369 | goto good; | |
1370 | ||
1371 | u->gc_marker = gc_marker + GC_OFFSET_IN_PATH; | |
1372 | ||
1373 | is_bad = true; | |
1374 | ||
1375 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCED_BY) { | |
1376 | unit_gc_sweep(other, gc_marker); | |
1377 | ||
1378 | if (other->gc_marker == gc_marker + GC_OFFSET_GOOD) | |
1379 | goto good; | |
1380 | ||
1381 | if (other->gc_marker != gc_marker + GC_OFFSET_BAD) | |
1382 | is_bad = false; | |
1383 | } | |
1384 | ||
1385 | LIST_FOREACH(refs_by_target, ref, u->refs_by_target) { | |
1386 | unit_gc_sweep(ref->source, gc_marker); | |
1387 | ||
1388 | if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD) | |
1389 | goto good; | |
1390 | ||
1391 | if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD) | |
1392 | is_bad = false; | |
1393 | } | |
1394 | ||
1395 | if (is_bad) | |
1396 | goto bad; | |
1397 | ||
1398 | /* We were unable to find anything out about this entry, so | |
1399 | * let's investigate it later */ | |
1400 | u->gc_marker = gc_marker + GC_OFFSET_UNSURE; | |
1401 | unit_add_to_gc_queue(u); | |
1402 | return; | |
1403 | ||
1404 | bad: | |
1405 | /* We definitely know that this one is not useful anymore, so | |
1406 | * let's mark it for deletion */ | |
1407 | u->gc_marker = gc_marker + GC_OFFSET_BAD; | |
1408 | unit_add_to_cleanup_queue(u); | |
1409 | return; | |
1410 | ||
1411 | good: | |
1412 | unit_gc_mark_good(u, gc_marker); | |
1413 | } | |
1414 | ||
1415 | static unsigned manager_dispatch_gc_unit_queue(Manager *m) { | |
1416 | unsigned n = 0, gc_marker; | |
1417 | Unit *u; | |
1418 | ||
1419 | assert(m); | |
1420 | ||
1421 | /* log_debug("Running GC..."); */ | |
1422 | ||
1423 | m->gc_marker += _GC_OFFSET_MAX; | |
1424 | if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX) | |
1425 | m->gc_marker = 1; | |
1426 | ||
1427 | gc_marker = m->gc_marker; | |
1428 | ||
1429 | while ((u = LIST_POP(gc_queue, m->gc_unit_queue))) { | |
1430 | assert(u->in_gc_queue); | |
1431 | ||
1432 | unit_gc_sweep(u, gc_marker); | |
1433 | ||
1434 | u->in_gc_queue = false; | |
1435 | ||
1436 | n++; | |
1437 | ||
1438 | if (IN_SET(u->gc_marker - gc_marker, | |
1439 | GC_OFFSET_BAD, GC_OFFSET_UNSURE)) { | |
1440 | if (u->id) | |
1441 | log_unit_debug(u, "Collecting."); | |
1442 | u->gc_marker = gc_marker + GC_OFFSET_BAD; | |
1443 | unit_add_to_cleanup_queue(u); | |
1444 | } | |
1445 | } | |
1446 | ||
1447 | return n; | |
1448 | } | |
1449 | ||
1450 | static unsigned manager_dispatch_gc_job_queue(Manager *m) { | |
1451 | unsigned n = 0; | |
1452 | Job *j; | |
1453 | ||
1454 | assert(m); | |
1455 | ||
1456 | while ((j = LIST_POP(gc_queue, m->gc_job_queue))) { | |
1457 | assert(j->in_gc_queue); | |
1458 | j->in_gc_queue = false; | |
1459 | ||
1460 | n++; | |
1461 | ||
1462 | if (!job_may_gc(j)) | |
1463 | continue; | |
1464 | ||
1465 | log_unit_debug(j->unit, "Collecting job."); | |
1466 | (void) job_finish_and_invalidate(j, JOB_COLLECTED, false, false); | |
1467 | } | |
1468 | ||
1469 | return n; | |
1470 | } | |
1471 | ||
1472 | static int manager_ratelimit_requeue(sd_event_source *s, uint64_t usec, void *userdata) { | |
1473 | Unit *u = userdata; | |
1474 | ||
1475 | assert(u); | |
1476 | assert(s == u->auto_start_stop_event_source); | |
1477 | ||
1478 | u->auto_start_stop_event_source = sd_event_source_unref(u->auto_start_stop_event_source); | |
1479 | ||
1480 | /* Re-queue to all queues, if the rate limit hit we might have been throttled on any of them. */ | |
1481 | unit_submit_to_stop_when_unneeded_queue(u); | |
1482 | unit_submit_to_start_when_upheld_queue(u); | |
1483 | unit_submit_to_stop_when_bound_queue(u); | |
1484 | ||
1485 | return 0; | |
1486 | } | |
1487 | ||
1488 | static int manager_ratelimit_check_and_queue(Unit *u) { | |
1489 | int r; | |
1490 | ||
1491 | assert(u); | |
1492 | ||
1493 | if (ratelimit_below(&u->auto_start_stop_ratelimit)) | |
1494 | return 1; | |
1495 | ||
1496 | /* Already queued, no need to requeue */ | |
1497 | if (u->auto_start_stop_event_source) | |
1498 | return 0; | |
1499 | ||
1500 | r = sd_event_add_time( | |
1501 | u->manager->event, | |
1502 | &u->auto_start_stop_event_source, | |
1503 | CLOCK_MONOTONIC, | |
1504 | ratelimit_end(&u->auto_start_stop_ratelimit), | |
1505 | 0, | |
1506 | manager_ratelimit_requeue, | |
1507 | u); | |
1508 | if (r < 0) | |
1509 | return log_unit_error_errno(u, r, "Failed to queue timer on event loop: %m"); | |
1510 | ||
1511 | return 0; | |
1512 | } | |
1513 | ||
1514 | static unsigned manager_dispatch_stop_when_unneeded_queue(Manager *m) { | |
1515 | unsigned n = 0; | |
1516 | Unit *u; | |
1517 | int r; | |
1518 | ||
1519 | assert(m); | |
1520 | ||
1521 | while ((u = LIST_POP(stop_when_unneeded_queue, m->stop_when_unneeded_queue))) { | |
1522 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
1523 | ||
1524 | assert(u->in_stop_when_unneeded_queue); | |
1525 | u->in_stop_when_unneeded_queue = false; | |
1526 | ||
1527 | n++; | |
1528 | ||
1529 | if (!unit_is_unneeded(u)) | |
1530 | continue; | |
1531 | ||
1532 | log_unit_debug(u, "Unit is not needed anymore."); | |
1533 | ||
1534 | /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the | |
1535 | * service being unnecessary after a while. */ | |
1536 | ||
1537 | r = manager_ratelimit_check_and_queue(u); | |
1538 | if (r <= 0) { | |
1539 | log_unit_warning(u, | |
1540 | "Unit not needed anymore, but not stopping since we tried this too often recently.%s", | |
1541 | r == 0 ? " Will retry later." : ""); | |
1542 | continue; | |
1543 | } | |
1544 | ||
1545 | /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */ | |
1546 | r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL); | |
1547 | if (r < 0) | |
1548 | log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r)); | |
1549 | } | |
1550 | ||
1551 | return n; | |
1552 | } | |
1553 | ||
1554 | static unsigned manager_dispatch_start_when_upheld_queue(Manager *m) { | |
1555 | unsigned n = 0; | |
1556 | Unit *u; | |
1557 | int r; | |
1558 | ||
1559 | assert(m); | |
1560 | ||
1561 | while ((u = LIST_POP(start_when_upheld_queue, m->start_when_upheld_queue))) { | |
1562 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
1563 | Unit *culprit = NULL; | |
1564 | ||
1565 | assert(u->in_start_when_upheld_queue); | |
1566 | u->in_start_when_upheld_queue = false; | |
1567 | ||
1568 | n++; | |
1569 | ||
1570 | if (!unit_is_upheld_by_active(u, &culprit)) | |
1571 | continue; | |
1572 | ||
1573 | log_unit_debug(u, "Unit is started because upheld by active unit %s.", culprit->id); | |
1574 | ||
1575 | /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the | |
1576 | * service being unnecessary after a while. */ | |
1577 | ||
1578 | r = manager_ratelimit_check_and_queue(u); | |
1579 | if (r <= 0) { | |
1580 | log_unit_warning(u, | |
1581 | "Unit needs to be started because active unit %s upholds it, but not starting since we tried this too often recently.%s", | |
1582 | culprit->id, | |
1583 | r == 0 ? " Will retry later." : ""); | |
1584 | continue; | |
1585 | } | |
1586 | ||
1587 | r = manager_add_job(u->manager, JOB_START, u, JOB_FAIL, NULL, &error, NULL); | |
1588 | if (r < 0) | |
1589 | log_unit_warning_errno(u, r, "Failed to enqueue start job, ignoring: %s", bus_error_message(&error, r)); | |
1590 | } | |
1591 | ||
1592 | return n; | |
1593 | } | |
1594 | ||
1595 | static unsigned manager_dispatch_stop_when_bound_queue(Manager *m) { | |
1596 | unsigned n = 0; | |
1597 | Unit *u; | |
1598 | int r; | |
1599 | ||
1600 | assert(m); | |
1601 | ||
1602 | while ((u = LIST_POP(stop_when_bound_queue, m->stop_when_bound_queue))) { | |
1603 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
1604 | Unit *culprit = NULL; | |
1605 | ||
1606 | assert(u->in_stop_when_bound_queue); | |
1607 | u->in_stop_when_bound_queue = false; | |
1608 | ||
1609 | n++; | |
1610 | ||
1611 | if (!unit_is_bound_by_inactive(u, &culprit)) | |
1612 | continue; | |
1613 | ||
1614 | log_unit_debug(u, "Unit is stopped because bound to inactive unit %s.", culprit->id); | |
1615 | ||
1616 | /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the | |
1617 | * service being unnecessary after a while. */ | |
1618 | ||
1619 | r = manager_ratelimit_check_and_queue(u); | |
1620 | if (r <= 0) { | |
1621 | log_unit_warning(u, | |
1622 | "Unit needs to be stopped because it is bound to inactive unit %s it, but not stopping since we tried this too often recently.%s", | |
1623 | culprit->id, | |
1624 | r == 0 ? " Will retry later." : ""); | |
1625 | continue; | |
1626 | } | |
1627 | ||
1628 | r = manager_add_job(u->manager, JOB_STOP, u, JOB_REPLACE, NULL, &error, NULL); | |
1629 | if (r < 0) | |
1630 | log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r)); | |
1631 | } | |
1632 | ||
1633 | return n; | |
1634 | } | |
1635 | ||
1636 | static void manager_clear_jobs_and_units(Manager *m) { | |
1637 | Unit *u; | |
1638 | ||
1639 | assert(m); | |
1640 | ||
1641 | while ((u = hashmap_first(m->units))) | |
1642 | unit_free(u); | |
1643 | ||
1644 | manager_dispatch_cleanup_queue(m); | |
1645 | ||
1646 | assert(!m->load_queue); | |
1647 | assert(prioq_isempty(m->run_queue)); | |
1648 | assert(!m->dbus_unit_queue); | |
1649 | assert(!m->dbus_job_queue); | |
1650 | assert(!m->cleanup_queue); | |
1651 | assert(!m->gc_unit_queue); | |
1652 | assert(!m->gc_job_queue); | |
1653 | assert(!m->cgroup_realize_queue); | |
1654 | assert(!m->cgroup_empty_queue); | |
1655 | assert(!m->cgroup_oom_queue); | |
1656 | assert(!m->target_deps_queue); | |
1657 | assert(!m->stop_when_unneeded_queue); | |
1658 | assert(!m->start_when_upheld_queue); | |
1659 | assert(!m->stop_when_bound_queue); | |
1660 | assert(!m->release_resources_queue); | |
1661 | ||
1662 | assert(hashmap_isempty(m->jobs)); | |
1663 | assert(hashmap_isempty(m->units)); | |
1664 | ||
1665 | m->n_on_console = 0; | |
1666 | m->n_running_jobs = 0; | |
1667 | m->n_installed_jobs = 0; | |
1668 | m->n_failed_jobs = 0; | |
1669 | } | |
1670 | ||
1671 | Manager* manager_free(Manager *m) { | |
1672 | if (!m) | |
1673 | return NULL; | |
1674 | ||
1675 | manager_clear_jobs_and_units(m); | |
1676 | ||
1677 | for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) | |
1678 | if (unit_vtable[c]->shutdown) | |
1679 | unit_vtable[c]->shutdown(m); | |
1680 | ||
1681 | /* Keep the cgroup hierarchy in place except when we know we are going down for good */ | |
1682 | manager_shutdown_cgroup(m, /* delete= */ IN_SET(m->objective, MANAGER_EXIT, MANAGER_REBOOT, MANAGER_POWEROFF, MANAGER_HALT, MANAGER_KEXEC)); | |
1683 | ||
1684 | lookup_paths_flush_generator(&m->lookup_paths); | |
1685 | ||
1686 | bus_done(m); | |
1687 | manager_varlink_done(m); | |
1688 | ||
1689 | exec_shared_runtime_vacuum(m); | |
1690 | hashmap_free(m->exec_shared_runtime_by_id); | |
1691 | ||
1692 | dynamic_user_vacuum(m, false); | |
1693 | hashmap_free(m->dynamic_users); | |
1694 | ||
1695 | hashmap_free(m->units); | |
1696 | hashmap_free(m->units_by_invocation_id); | |
1697 | hashmap_free(m->jobs); | |
1698 | hashmap_free(m->watch_pids); | |
1699 | hashmap_free(m->watch_pids_more); | |
1700 | hashmap_free(m->watch_bus); | |
1701 | ||
1702 | prioq_free(m->run_queue); | |
1703 | ||
1704 | set_free(m->startup_units); | |
1705 | set_free(m->failed_units); | |
1706 | ||
1707 | sd_event_source_unref(m->signal_event_source); | |
1708 | sd_event_source_unref(m->sigchld_event_source); | |
1709 | sd_event_source_unref(m->notify_event_source); | |
1710 | sd_event_source_unref(m->cgroups_agent_event_source); | |
1711 | sd_event_source_unref(m->time_change_event_source); | |
1712 | sd_event_source_unref(m->timezone_change_event_source); | |
1713 | sd_event_source_unref(m->jobs_in_progress_event_source); | |
1714 | sd_event_source_unref(m->run_queue_event_source); | |
1715 | sd_event_source_unref(m->user_lookup_event_source); | |
1716 | sd_event_source_unref(m->handoff_timestamp_event_source); | |
1717 | sd_event_source_unref(m->memory_pressure_event_source); | |
1718 | ||
1719 | safe_close(m->signal_fd); | |
1720 | safe_close(m->notify_fd); | |
1721 | safe_close(m->cgroups_agent_fd); | |
1722 | safe_close_pair(m->user_lookup_fds); | |
1723 | safe_close_pair(m->handoff_timestamp_fds); | |
1724 | ||
1725 | manager_close_ask_password(m); | |
1726 | ||
1727 | manager_close_idle_pipe(m); | |
1728 | ||
1729 | sd_event_unref(m->event); | |
1730 | ||
1731 | free(m->notify_socket); | |
1732 | ||
1733 | lookup_paths_done(&m->lookup_paths); | |
1734 | strv_free(m->transient_environment); | |
1735 | strv_free(m->client_environment); | |
1736 | ||
1737 | hashmap_free(m->cgroup_unit); | |
1738 | manager_free_unit_name_maps(m); | |
1739 | ||
1740 | free(m->switch_root); | |
1741 | free(m->switch_root_init); | |
1742 | ||
1743 | unit_defaults_done(&m->defaults); | |
1744 | ||
1745 | FOREACH_ARRAY(map, m->units_needing_mounts_for, _UNIT_MOUNT_DEPENDENCY_TYPE_MAX) { | |
1746 | assert(hashmap_isempty(*map)); | |
1747 | hashmap_free(*map); | |
1748 | } | |
1749 | ||
1750 | hashmap_free(m->uid_refs); | |
1751 | hashmap_free(m->gid_refs); | |
1752 | ||
1753 | for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) | |
1754 | m->prefix[dt] = mfree(m->prefix[dt]); | |
1755 | free(m->received_credentials_directory); | |
1756 | free(m->received_encrypted_credentials_directory); | |
1757 | ||
1758 | free(m->watchdog_pretimeout_governor); | |
1759 | free(m->watchdog_pretimeout_governor_overridden); | |
1760 | ||
1761 | m->fw_ctx = fw_ctx_free(m->fw_ctx); | |
1762 | ||
1763 | #if BPF_FRAMEWORK | |
1764 | bpf_restrict_fs_destroy(m->restrict_fs); | |
1765 | #endif | |
1766 | ||
1767 | safe_close(m->executor_fd); | |
1768 | ||
1769 | return mfree(m); | |
1770 | } | |
1771 | ||
1772 | static void manager_enumerate_perpetual(Manager *m) { | |
1773 | assert(m); | |
1774 | ||
1775 | if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL)) | |
1776 | return; | |
1777 | ||
1778 | /* Let's ask every type to load all units from disk/kernel that it might know */ | |
1779 | for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) { | |
1780 | if (!unit_type_supported(c)) { | |
1781 | log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c)); | |
1782 | continue; | |
1783 | } | |
1784 | ||
1785 | if (unit_vtable[c]->enumerate_perpetual) | |
1786 | unit_vtable[c]->enumerate_perpetual(m); | |
1787 | } | |
1788 | } | |
1789 | ||
1790 | static void manager_enumerate(Manager *m) { | |
1791 | assert(m); | |
1792 | ||
1793 | if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL)) | |
1794 | return; | |
1795 | ||
1796 | /* Let's ask every type to load all units from disk/kernel that it might know */ | |
1797 | for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) { | |
1798 | if (!unit_type_supported(c)) { | |
1799 | log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c)); | |
1800 | continue; | |
1801 | } | |
1802 | ||
1803 | if (unit_vtable[c]->enumerate) | |
1804 | unit_vtable[c]->enumerate(m); | |
1805 | } | |
1806 | ||
1807 | manager_dispatch_load_queue(m); | |
1808 | } | |
1809 | ||
1810 | static void manager_coldplug(Manager *m) { | |
1811 | Unit *u; | |
1812 | char *k; | |
1813 | int r; | |
1814 | ||
1815 | assert(m); | |
1816 | ||
1817 | log_debug("Invoking unit coldplug() handlers%s", special_glyph(SPECIAL_GLYPH_ELLIPSIS)); | |
1818 | ||
1819 | /* Let's place the units back into their deserialized state */ | |
1820 | HASHMAP_FOREACH_KEY(u, k, m->units) { | |
1821 | ||
1822 | /* ignore aliases */ | |
1823 | if (u->id != k) | |
1824 | continue; | |
1825 | ||
1826 | r = unit_coldplug(u); | |
1827 | if (r < 0) | |
1828 | log_warning_errno(r, "We couldn't coldplug %s, proceeding anyway: %m", u->id); | |
1829 | } | |
1830 | } | |
1831 | ||
1832 | static void manager_catchup(Manager *m) { | |
1833 | Unit *u; | |
1834 | char *k; | |
1835 | ||
1836 | assert(m); | |
1837 | ||
1838 | log_debug("Invoking unit catchup() handlers%s", special_glyph(SPECIAL_GLYPH_ELLIPSIS)); | |
1839 | ||
1840 | /* Let's catch up on any state changes that happened while we were reloading/reexecing */ | |
1841 | HASHMAP_FOREACH_KEY(u, k, m->units) { | |
1842 | ||
1843 | /* ignore aliases */ | |
1844 | if (u->id != k) | |
1845 | continue; | |
1846 | ||
1847 | unit_catchup(u); | |
1848 | } | |
1849 | } | |
1850 | ||
1851 | static void manager_distribute_fds(Manager *m, FDSet *fds) { | |
1852 | Unit *u; | |
1853 | ||
1854 | assert(m); | |
1855 | ||
1856 | HASHMAP_FOREACH(u, m->units) { | |
1857 | ||
1858 | if (fdset_isempty(fds)) | |
1859 | break; | |
1860 | ||
1861 | if (!UNIT_VTABLE(u)->distribute_fds) | |
1862 | continue; | |
1863 | ||
1864 | UNIT_VTABLE(u)->distribute_fds(u, fds); | |
1865 | } | |
1866 | } | |
1867 | ||
1868 | static bool manager_dbus_is_running(Manager *m, bool deserialized) { | |
1869 | Unit *u; | |
1870 | ||
1871 | assert(m); | |
1872 | ||
1873 | /* This checks whether the dbus instance we are supposed to expose our APIs on is up. We check both the socket | |
1874 | * and the service unit. If the 'deserialized' parameter is true we'll check the deserialized state of the unit | |
1875 | * rather than the current one. */ | |
1876 | ||
1877 | if (MANAGER_IS_TEST_RUN(m)) | |
1878 | return false; | |
1879 | ||
1880 | u = manager_get_unit(m, SPECIAL_DBUS_SOCKET); | |
1881 | if (!u) | |
1882 | return false; | |
1883 | if ((deserialized ? SOCKET(u)->deserialized_state : SOCKET(u)->state) != SOCKET_RUNNING) | |
1884 | return false; | |
1885 | ||
1886 | u = manager_get_unit(m, SPECIAL_DBUS_SERVICE); | |
1887 | if (!u) | |
1888 | return false; | |
1889 | if (!IN_SET((deserialized ? SERVICE(u)->deserialized_state : SERVICE(u)->state), | |
1890 | SERVICE_RUNNING, | |
1891 | SERVICE_RELOAD, | |
1892 | SERVICE_RELOAD_NOTIFY, | |
1893 | SERVICE_RELOAD_SIGNAL)) | |
1894 | return false; | |
1895 | ||
1896 | return true; | |
1897 | } | |
1898 | ||
1899 | static void manager_setup_bus(Manager *m) { | |
1900 | assert(m); | |
1901 | ||
1902 | /* Let's set up our private bus connection now, unconditionally */ | |
1903 | (void) bus_init_private(m); | |
1904 | ||
1905 | /* If we are in --user mode also connect to the system bus now */ | |
1906 | if (MANAGER_IS_USER(m)) | |
1907 | (void) bus_init_system(m); | |
1908 | ||
1909 | /* Let's connect to the bus now, but only if the unit is supposed to be up */ | |
1910 | if (manager_dbus_is_running(m, MANAGER_IS_RELOADING(m))) { | |
1911 | (void) bus_init_api(m); | |
1912 | ||
1913 | if (MANAGER_IS_SYSTEM(m)) | |
1914 | (void) bus_init_system(m); | |
1915 | } | |
1916 | } | |
1917 | ||
1918 | static void manager_preset_all(Manager *m) { | |
1919 | int r; | |
1920 | ||
1921 | assert(m); | |
1922 | ||
1923 | if (m->first_boot <= 0) | |
1924 | return; | |
1925 | ||
1926 | if (!MANAGER_IS_SYSTEM(m)) | |
1927 | return; | |
1928 | ||
1929 | if (MANAGER_IS_TEST_RUN(m)) | |
1930 | return; | |
1931 | ||
1932 | /* If this is the first boot, and we are in the host system, then preset everything */ | |
1933 | UnitFilePresetMode mode = | |
1934 | ENABLE_FIRST_BOOT_FULL_PRESET ? UNIT_FILE_PRESET_FULL : UNIT_FILE_PRESET_ENABLE_ONLY; | |
1935 | ||
1936 | r = unit_file_preset_all(RUNTIME_SCOPE_SYSTEM, 0, NULL, mode, NULL, 0); | |
1937 | if (r < 0) | |
1938 | log_full_errno(r == -EEXIST ? LOG_NOTICE : LOG_WARNING, r, | |
1939 | "Failed to populate /etc with preset unit settings, ignoring: %m"); | |
1940 | else | |
1941 | log_info("Populated /etc with preset unit settings."); | |
1942 | } | |
1943 | ||
1944 | static void manager_ready(Manager *m) { | |
1945 | assert(m); | |
1946 | ||
1947 | /* After having loaded everything, do the final round of catching up with what might have changed */ | |
1948 | ||
1949 | m->objective = MANAGER_OK; /* Tell everyone we are up now */ | |
1950 | ||
1951 | /* It might be safe to log to the journal now and connect to dbus */ | |
1952 | manager_recheck_journal(m); | |
1953 | manager_recheck_dbus(m); | |
1954 | ||
1955 | /* Let's finally catch up with any changes that took place while we were reloading/reexecing */ | |
1956 | manager_catchup(m); | |
1957 | ||
1958 | /* Create a file which will indicate when the manager started loading units the last time. */ | |
1959 | if (MANAGER_IS_SYSTEM(m)) | |
1960 | (void) touch_file("/run/systemd/systemd-units-load", false, | |
1961 | m->timestamps[MANAGER_TIMESTAMP_UNITS_LOAD].realtime ?: now(CLOCK_REALTIME), | |
1962 | UID_INVALID, GID_INVALID, 0444); | |
1963 | } | |
1964 | ||
1965 | Manager* manager_reloading_start(Manager *m) { | |
1966 | m->n_reloading++; | |
1967 | dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD); | |
1968 | return m; | |
1969 | } | |
1970 | ||
1971 | void manager_reloading_stopp(Manager **m) { | |
1972 | if (*m) { | |
1973 | assert((*m)->n_reloading > 0); | |
1974 | (*m)->n_reloading--; | |
1975 | } | |
1976 | } | |
1977 | ||
1978 | int manager_startup(Manager *m, FILE *serialization, FDSet *fds, const char *root) { | |
1979 | int r; | |
1980 | ||
1981 | assert(m); | |
1982 | ||
1983 | /* If we are running in test mode, we still want to run the generators, | |
1984 | * but we should not touch the real generator directories. */ | |
1985 | r = lookup_paths_init_or_warn(&m->lookup_paths, m->runtime_scope, | |
1986 | MANAGER_IS_TEST_RUN(m) ? LOOKUP_PATHS_TEMPORARY_GENERATED : 0, | |
1987 | root); | |
1988 | if (r < 0) | |
1989 | return r; | |
1990 | ||
1991 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_START)); | |
1992 | r = manager_run_environment_generators(m); | |
1993 | if (r >= 0) | |
1994 | r = manager_run_generators(m); | |
1995 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_FINISH)); | |
1996 | if (r < 0) | |
1997 | return r; | |
1998 | ||
1999 | manager_preset_all(m); | |
2000 | ||
2001 | lookup_paths_log(&m->lookup_paths); | |
2002 | ||
2003 | { | |
2004 | /* This block is (optionally) done with the reloading counter bumped */ | |
2005 | _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL; | |
2006 | ||
2007 | /* Make sure we don't have a left-over from a previous run */ | |
2008 | if (!serialization) | |
2009 | (void) rm_rf(m->lookup_paths.transient, 0); | |
2010 | ||
2011 | /* If we will deserialize make sure that during enumeration this is already known, so we increase the | |
2012 | * counter here already */ | |
2013 | if (serialization) | |
2014 | reloading = manager_reloading_start(m); | |
2015 | ||
2016 | /* First, enumerate what we can from all config files */ | |
2017 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_START)); | |
2018 | manager_enumerate_perpetual(m); | |
2019 | manager_enumerate(m); | |
2020 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_FINISH)); | |
2021 | ||
2022 | /* Second, deserialize if there is something to deserialize */ | |
2023 | if (serialization) { | |
2024 | r = manager_deserialize(m, serialization, fds); | |
2025 | if (r < 0) | |
2026 | return log_error_errno(r, "Deserialization failed: %m"); | |
2027 | } | |
2028 | ||
2029 | if (m->previous_objective >= 0) { | |
2030 | if (IN_SET(m->previous_objective, MANAGER_REEXECUTE, MANAGER_SOFT_REBOOT, MANAGER_SWITCH_ROOT)) | |
2031 | log_debug("Launching as effect of a '%s' operation.", | |
2032 | manager_objective_to_string(m->previous_objective)); | |
2033 | else | |
2034 | log_warning("Got unexpected previous objective '%s', ignoring.", | |
2035 | manager_objective_to_string(m->previous_objective)); | |
2036 | } | |
2037 | ||
2038 | /* If we are in a new soft-reboot iteration bump the counter now before starting units, so | |
2039 | * that they can reliably read it. We get the previous objective from serialized state. */ | |
2040 | if (m->previous_objective == MANAGER_SOFT_REBOOT) | |
2041 | m->soft_reboots_count++; | |
2042 | ||
2043 | /* Any fds left? Find some unit which wants them. This is useful to allow container managers to pass | |
2044 | * some file descriptors to us pre-initialized. This enables socket-based activation of entire | |
2045 | * containers. */ | |
2046 | manager_distribute_fds(m, fds); | |
2047 | ||
2048 | /* We might have deserialized the notify fd, but if we didn't then let's create the bus now */ | |
2049 | r = manager_setup_notify(m); | |
2050 | if (r < 0) | |
2051 | /* No sense to continue without notifications, our children would fail anyway. */ | |
2052 | return r; | |
2053 | ||
2054 | r = manager_setup_cgroups_agent(m); | |
2055 | if (r < 0) | |
2056 | /* Likewise, no sense to continue without empty cgroup notifications. */ | |
2057 | return r; | |
2058 | ||
2059 | r = manager_setup_user_lookup_fd(m); | |
2060 | if (r < 0) | |
2061 | /* This shouldn't fail, except if things are really broken. */ | |
2062 | return r; | |
2063 | ||
2064 | r = manager_setup_handoff_timestamp_fd(m); | |
2065 | if (r < 0) | |
2066 | /* This shouldn't fail, except if things are really broken. */ | |
2067 | return r; | |
2068 | ||
2069 | /* Connect to the bus if we are good for it */ | |
2070 | manager_setup_bus(m); | |
2071 | ||
2072 | /* Now that we are connected to all possible buses, let's deserialize who is tracking us. */ | |
2073 | r = bus_track_coldplug(m, &m->subscribed, false, m->deserialized_subscribed); | |
2074 | if (r < 0) | |
2075 | log_warning_errno(r, "Failed to deserialized tracked clients, ignoring: %m"); | |
2076 | m->deserialized_subscribed = strv_free(m->deserialized_subscribed); | |
2077 | ||
2078 | r = manager_varlink_init(m); | |
2079 | if (r < 0) | |
2080 | log_warning_errno(r, "Failed to set up Varlink, ignoring: %m"); | |
2081 | ||
2082 | /* Third, fire things up! */ | |
2083 | manager_coldplug(m); | |
2084 | ||
2085 | /* Clean up runtime objects */ | |
2086 | manager_vacuum(m); | |
2087 | ||
2088 | if (serialization) | |
2089 | /* Let's wait for the UnitNew/JobNew messages being sent, before we notify that the | |
2090 | * reload is finished */ | |
2091 | m->send_reloading_done = true; | |
2092 | } | |
2093 | ||
2094 | manager_ready(m); | |
2095 | ||
2096 | manager_set_switching_root(m, false); | |
2097 | ||
2098 | return 0; | |
2099 | } | |
2100 | ||
2101 | int manager_add_job( | |
2102 | Manager *m, | |
2103 | JobType type, | |
2104 | Unit *unit, | |
2105 | JobMode mode, | |
2106 | Set *affected_jobs, | |
2107 | sd_bus_error *error, | |
2108 | Job **ret) { | |
2109 | ||
2110 | _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL; | |
2111 | int r; | |
2112 | ||
2113 | assert(m); | |
2114 | assert(type < _JOB_TYPE_MAX); | |
2115 | assert(unit); | |
2116 | assert(mode < _JOB_MODE_MAX); | |
2117 | ||
2118 | if (mode == JOB_ISOLATE && type != JOB_START) | |
2119 | return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start."); | |
2120 | ||
2121 | if (mode == JOB_ISOLATE && !unit->allow_isolate) | |
2122 | return sd_bus_error_set(error, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated."); | |
2123 | ||
2124 | if (mode == JOB_TRIGGERING && type != JOB_STOP) | |
2125 | return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=triggering is only valid for stop."); | |
2126 | ||
2127 | if (mode == JOB_RESTART_DEPENDENCIES && type != JOB_START) | |
2128 | return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=restart-dependencies is only valid for start."); | |
2129 | ||
2130 | log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode)); | |
2131 | ||
2132 | type = job_type_collapse(type, unit); | |
2133 | ||
2134 | tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY); | |
2135 | if (!tr) | |
2136 | return -ENOMEM; | |
2137 | ||
2138 | r = transaction_add_job_and_dependencies( | |
2139 | tr, | |
2140 | type, | |
2141 | unit, | |
2142 | /* by= */ NULL, | |
2143 | TRANSACTION_MATTERS | | |
2144 | (IN_SET(mode, JOB_IGNORE_DEPENDENCIES, JOB_IGNORE_REQUIREMENTS) ? TRANSACTION_IGNORE_REQUIREMENTS : 0) | | |
2145 | (mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0) | | |
2146 | (mode == JOB_RESTART_DEPENDENCIES ? TRANSACTION_PROPAGATE_START_AS_RESTART : 0), | |
2147 | error); | |
2148 | if (r < 0) | |
2149 | return r; | |
2150 | ||
2151 | if (mode == JOB_ISOLATE) { | |
2152 | r = transaction_add_isolate_jobs(tr, m); | |
2153 | if (r < 0) | |
2154 | return r; | |
2155 | } | |
2156 | ||
2157 | if (mode == JOB_TRIGGERING) { | |
2158 | r = transaction_add_triggering_jobs(tr, unit); | |
2159 | if (r < 0) | |
2160 | return r; | |
2161 | } | |
2162 | ||
2163 | r = transaction_activate(tr, m, mode, affected_jobs, error); | |
2164 | if (r < 0) | |
2165 | return r; | |
2166 | ||
2167 | log_unit_debug(unit, | |
2168 | "Enqueued job %s/%s as %u", unit->id, | |
2169 | job_type_to_string(type), (unsigned) tr->anchor_job->id); | |
2170 | ||
2171 | if (ret) | |
2172 | *ret = tr->anchor_job; | |
2173 | ||
2174 | tr = transaction_free(tr); | |
2175 | return 0; | |
2176 | } | |
2177 | ||
2178 | int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, sd_bus_error *e, Job **ret) { | |
2179 | Unit *unit = NULL; /* just to appease gcc, initialization is not really necessary */ | |
2180 | int r; | |
2181 | ||
2182 | assert(m); | |
2183 | assert(type < _JOB_TYPE_MAX); | |
2184 | assert(name); | |
2185 | assert(mode < _JOB_MODE_MAX); | |
2186 | ||
2187 | r = manager_load_unit(m, name, NULL, NULL, &unit); | |
2188 | if (r < 0) | |
2189 | return r; | |
2190 | assert(unit); | |
2191 | ||
2192 | return manager_add_job(m, type, unit, mode, affected_jobs, e, ret); | |
2193 | } | |
2194 | ||
2195 | int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, Job **ret) { | |
2196 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
2197 | int r; | |
2198 | ||
2199 | assert(m); | |
2200 | assert(type < _JOB_TYPE_MAX); | |
2201 | assert(name); | |
2202 | assert(mode < _JOB_MODE_MAX); | |
2203 | ||
2204 | r = manager_add_job_by_name(m, type, name, mode, affected_jobs, &error, ret); | |
2205 | if (r < 0) | |
2206 | return log_warning_errno(r, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode), name, bus_error_message(&error, r)); | |
2207 | ||
2208 | return r; | |
2209 | } | |
2210 | ||
2211 | int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e) { | |
2212 | int r; | |
2213 | _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL; | |
2214 | ||
2215 | assert(m); | |
2216 | assert(unit); | |
2217 | assert(mode < _JOB_MODE_MAX); | |
2218 | assert(mode != JOB_ISOLATE); /* Isolate is only valid for start */ | |
2219 | ||
2220 | tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY); | |
2221 | if (!tr) | |
2222 | return -ENOMEM; | |
2223 | ||
2224 | /* We need an anchor job */ | |
2225 | r = transaction_add_job_and_dependencies(tr, JOB_NOP, unit, NULL, TRANSACTION_IGNORE_REQUIREMENTS|TRANSACTION_IGNORE_ORDER, e); | |
2226 | if (r < 0) | |
2227 | return r; | |
2228 | ||
2229 | /* Failure in adding individual dependencies is ignored, so this always succeeds. */ | |
2230 | transaction_add_propagate_reload_jobs( | |
2231 | tr, | |
2232 | unit, | |
2233 | tr->anchor_job, | |
2234 | mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0); | |
2235 | ||
2236 | r = transaction_activate(tr, m, mode, NULL, e); | |
2237 | if (r < 0) | |
2238 | return r; | |
2239 | ||
2240 | tr = transaction_free(tr); | |
2241 | return 0; | |
2242 | } | |
2243 | ||
2244 | Job *manager_get_job(Manager *m, uint32_t id) { | |
2245 | assert(m); | |
2246 | ||
2247 | return hashmap_get(m->jobs, UINT32_TO_PTR(id)); | |
2248 | } | |
2249 | ||
2250 | Unit *manager_get_unit(Manager *m, const char *name) { | |
2251 | assert(m); | |
2252 | assert(name); | |
2253 | ||
2254 | return hashmap_get(m->units, name); | |
2255 | } | |
2256 | ||
2257 | static int manager_dispatch_target_deps_queue(Manager *m) { | |
2258 | Unit *u; | |
2259 | int r = 0; | |
2260 | ||
2261 | assert(m); | |
2262 | ||
2263 | while ((u = LIST_POP(target_deps_queue, m->target_deps_queue))) { | |
2264 | _cleanup_free_ Unit **targets = NULL; | |
2265 | int n_targets; | |
2266 | ||
2267 | assert(u->in_target_deps_queue); | |
2268 | ||
2269 | u->in_target_deps_queue = false; | |
2270 | ||
2271 | /* Take an "atomic" snapshot of dependencies here, as the call below will likely modify the | |
2272 | * dependencies, and we can't have it that hash tables we iterate through are modified while | |
2273 | * we are iterating through them. */ | |
2274 | n_targets = unit_get_dependency_array(u, UNIT_ATOM_DEFAULT_TARGET_DEPENDENCIES, &targets); | |
2275 | if (n_targets < 0) | |
2276 | return n_targets; | |
2277 | ||
2278 | FOREACH_ARRAY(i, targets, n_targets) { | |
2279 | r = unit_add_default_target_dependency(u, *i); | |
2280 | if (r < 0) | |
2281 | return r; | |
2282 | } | |
2283 | } | |
2284 | ||
2285 | return r; | |
2286 | } | |
2287 | ||
2288 | unsigned manager_dispatch_load_queue(Manager *m) { | |
2289 | Unit *u; | |
2290 | unsigned n = 0; | |
2291 | ||
2292 | assert(m); | |
2293 | ||
2294 | /* Make sure we are not run recursively */ | |
2295 | if (m->dispatching_load_queue) | |
2296 | return 0; | |
2297 | ||
2298 | m->dispatching_load_queue = true; | |
2299 | ||
2300 | /* Dispatches the load queue. Takes a unit from the queue and | |
2301 | * tries to load its data until the queue is empty */ | |
2302 | ||
2303 | while ((u = m->load_queue)) { | |
2304 | assert(u->in_load_queue); | |
2305 | ||
2306 | unit_load(u); | |
2307 | n++; | |
2308 | } | |
2309 | ||
2310 | m->dispatching_load_queue = false; | |
2311 | ||
2312 | /* Dispatch the units waiting for their target dependencies to be added now, as all targets that we know about | |
2313 | * should be loaded and have aliases resolved */ | |
2314 | (void) manager_dispatch_target_deps_queue(m); | |
2315 | ||
2316 | return n; | |
2317 | } | |
2318 | ||
2319 | bool manager_unit_cache_should_retry_load(Unit *u) { | |
2320 | assert(u); | |
2321 | ||
2322 | /* Automatic reloading from disk only applies to units which were not found sometime in the past, and | |
2323 | * the not-found stub is kept pinned in the unit graph by dependencies. For units that were | |
2324 | * previously loaded, we don't do automatic reloading, and daemon-reload is necessary to update. */ | |
2325 | if (u->load_state != UNIT_NOT_FOUND) | |
2326 | return false; | |
2327 | ||
2328 | /* The cache has been updated since the last time we tried to load the unit. There might be new | |
2329 | * fragment paths to read. */ | |
2330 | if (u->manager->unit_cache_timestamp_hash != u->fragment_not_found_timestamp_hash) | |
2331 | return true; | |
2332 | ||
2333 | /* The cache needs to be updated because there are modifications on disk. */ | |
2334 | return !lookup_paths_timestamp_hash_same(&u->manager->lookup_paths, u->manager->unit_cache_timestamp_hash, NULL); | |
2335 | } | |
2336 | ||
2337 | int manager_load_unit_prepare( | |
2338 | Manager *m, | |
2339 | const char *name, | |
2340 | const char *path, | |
2341 | sd_bus_error *e, | |
2342 | Unit **ret) { | |
2343 | ||
2344 | _cleanup_(unit_freep) Unit *cleanup_unit = NULL; | |
2345 | _cleanup_free_ char *nbuf = NULL; | |
2346 | int r; | |
2347 | ||
2348 | assert(m); | |
2349 | assert(ret); | |
2350 | assert(name || path); | |
2351 | ||
2352 | /* This will prepare the unit for loading, but not actually load anything from disk. */ | |
2353 | ||
2354 | if (path && !path_is_absolute(path)) | |
2355 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path); | |
2356 | ||
2357 | if (!name) { | |
2358 | r = path_extract_filename(path, &nbuf); | |
2359 | if (r < 0) | |
2360 | return r; | |
2361 | if (r == O_DIRECTORY) | |
2362 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path '%s' refers to directory, refusing.", path); | |
2363 | ||
2364 | name = nbuf; | |
2365 | } | |
2366 | ||
2367 | UnitType t = unit_name_to_type(name); | |
2368 | ||
2369 | if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) { | |
2370 | if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) | |
2371 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name); | |
2372 | ||
2373 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name); | |
2374 | } | |
2375 | ||
2376 | Unit *unit = manager_get_unit(m, name); | |
2377 | if (unit) { | |
2378 | /* The time-based cache allows new units to be started without daemon-reload, | |
2379 | * but if they are already referenced (because of dependencies or ordering) | |
2380 | * then we have to force a load of the fragment. As an optimization, check | |
2381 | * first if anything in the usual paths was modified since the last time | |
2382 | * the cache was loaded. Also check if the last time an attempt to load the | |
2383 | * unit was made was before the most recent cache refresh, so that we know | |
2384 | * we need to try again — even if the cache is current, it might have been | |
2385 | * updated in a different context before we had a chance to retry loading | |
2386 | * this particular unit. */ | |
2387 | if (manager_unit_cache_should_retry_load(unit)) | |
2388 | unit->load_state = UNIT_STUB; | |
2389 | else { | |
2390 | *ret = unit; | |
2391 | return 0; /* The unit was already loaded */ | |
2392 | } | |
2393 | } else { | |
2394 | unit = cleanup_unit = unit_new(m, unit_vtable[t]->object_size); | |
2395 | if (!unit) | |
2396 | return -ENOMEM; | |
2397 | } | |
2398 | ||
2399 | if (path) { | |
2400 | r = free_and_strdup(&unit->fragment_path, path); | |
2401 | if (r < 0) | |
2402 | return r; | |
2403 | } | |
2404 | ||
2405 | r = unit_add_name(unit, name); | |
2406 | if (r < 0) | |
2407 | return r; | |
2408 | ||
2409 | unit_add_to_load_queue(unit); | |
2410 | unit_add_to_dbus_queue(unit); | |
2411 | unit_add_to_gc_queue(unit); | |
2412 | ||
2413 | *ret = unit; | |
2414 | TAKE_PTR(cleanup_unit); | |
2415 | ||
2416 | return 1; /* The unit was added the load queue */ | |
2417 | } | |
2418 | ||
2419 | int manager_load_unit( | |
2420 | Manager *m, | |
2421 | const char *name, | |
2422 | const char *path, | |
2423 | sd_bus_error *e, | |
2424 | Unit **ret) { | |
2425 | int r; | |
2426 | ||
2427 | assert(m); | |
2428 | assert(ret); | |
2429 | ||
2430 | /* This will load the unit config, but not actually start any services or anything. */ | |
2431 | ||
2432 | r = manager_load_unit_prepare(m, name, path, e, ret); | |
2433 | if (r <= 0) | |
2434 | return r; | |
2435 | ||
2436 | /* Unit was newly loaded */ | |
2437 | manager_dispatch_load_queue(m); | |
2438 | *ret = unit_follow_merge(*ret); | |
2439 | return 0; | |
2440 | } | |
2441 | ||
2442 | int manager_load_startable_unit_or_warn( | |
2443 | Manager *m, | |
2444 | const char *name, | |
2445 | const char *path, | |
2446 | Unit **ret) { | |
2447 | ||
2448 | /* Load a unit, make sure it loaded fully and is not masked. */ | |
2449 | ||
2450 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
2451 | Unit *unit; | |
2452 | int r; | |
2453 | ||
2454 | r = manager_load_unit(m, name, path, &error, &unit); | |
2455 | if (r < 0) | |
2456 | return log_error_errno(r, "Failed to load %s %s: %s", | |
2457 | name ? "unit" : "unit file", name ?: path, | |
2458 | bus_error_message(&error, r)); | |
2459 | ||
2460 | r = bus_unit_validate_load_state(unit, &error); | |
2461 | if (r < 0) | |
2462 | return log_error_errno(r, "%s", bus_error_message(&error, r)); | |
2463 | ||
2464 | *ret = unit; | |
2465 | return 0; | |
2466 | } | |
2467 | ||
2468 | void manager_clear_jobs(Manager *m) { | |
2469 | Job *j; | |
2470 | ||
2471 | assert(m); | |
2472 | ||
2473 | while ((j = hashmap_first(m->jobs))) | |
2474 | /* No need to recurse. We're cancelling all jobs. */ | |
2475 | job_finish_and_invalidate(j, JOB_CANCELED, false, false); | |
2476 | } | |
2477 | ||
2478 | void manager_unwatch_pidref(Manager *m, const PidRef *pid) { | |
2479 | assert(m); | |
2480 | ||
2481 | for (;;) { | |
2482 | Unit *u; | |
2483 | ||
2484 | u = manager_get_unit_by_pidref_watching(m, pid); | |
2485 | if (!u) | |
2486 | break; | |
2487 | ||
2488 | unit_unwatch_pidref(u, pid); | |
2489 | } | |
2490 | } | |
2491 | ||
2492 | static int manager_dispatch_run_queue(sd_event_source *source, void *userdata) { | |
2493 | Manager *m = ASSERT_PTR(userdata); | |
2494 | Job *j; | |
2495 | ||
2496 | assert(source); | |
2497 | ||
2498 | while ((j = prioq_peek(m->run_queue))) { | |
2499 | assert(j->installed); | |
2500 | assert(j->in_run_queue); | |
2501 | ||
2502 | (void) job_run_and_invalidate(j); | |
2503 | } | |
2504 | ||
2505 | if (m->n_running_jobs > 0) | |
2506 | manager_watch_jobs_in_progress(m); | |
2507 | ||
2508 | if (m->n_on_console > 0) | |
2509 | manager_watch_idle_pipe(m); | |
2510 | ||
2511 | return 1; | |
2512 | } | |
2513 | ||
2514 | void manager_trigger_run_queue(Manager *m) { | |
2515 | int r; | |
2516 | ||
2517 | assert(m); | |
2518 | ||
2519 | r = sd_event_source_set_enabled( | |
2520 | m->run_queue_event_source, | |
2521 | prioq_isempty(m->run_queue) ? SD_EVENT_OFF : SD_EVENT_ONESHOT); | |
2522 | if (r < 0) | |
2523 | log_warning_errno(r, "Failed to enable job run queue event source, ignoring: %m"); | |
2524 | } | |
2525 | ||
2526 | static unsigned manager_dispatch_dbus_queue(Manager *m) { | |
2527 | unsigned n = 0, budget; | |
2528 | Unit *u; | |
2529 | Job *j; | |
2530 | ||
2531 | assert(m); | |
2532 | ||
2533 | /* When we are reloading, let's not wait with generating signals, since we need to exit the manager as quickly | |
2534 | * as we can. There's no point in throttling generation of signals in that case. */ | |
2535 | if (MANAGER_IS_RELOADING(m) || m->send_reloading_done || m->pending_reload_message) | |
2536 | budget = UINT_MAX; /* infinite budget in this case */ | |
2537 | else { | |
2538 | /* Anything to do at all? */ | |
2539 | if (!m->dbus_unit_queue && !m->dbus_job_queue) | |
2540 | return 0; | |
2541 | ||
2542 | /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's | |
2543 | * sit this cycle out, and process things in a later cycle when the queues got a bit emptier. */ | |
2544 | if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD) | |
2545 | return 0; | |
2546 | ||
2547 | /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't | |
2548 | * overly full before this call we shouldn't increase it in size too wildly in one step, and we | |
2549 | * shouldn't monopolize CPU time with generating these messages. Note the difference in counting of | |
2550 | * this "budget" and the "threshold" above: the "budget" is decreased only once per generated message, | |
2551 | * regardless how many buses/direct connections it is enqueued on, while the "threshold" is applied to | |
2552 | * each queued instance of bus message, i.e. if the same message is enqueued to five buses/direct | |
2553 | * connections it will be counted five times. This difference in counting ("references" | |
2554 | * vs. "instances") is primarily a result of the fact that it's easier to implement it this way, | |
2555 | * however it also reflects the thinking that the "threshold" should put a limit on used queue memory, | |
2556 | * i.e. space, while the "budget" should put a limit on time. Also note that the "threshold" is | |
2557 | * currently chosen much higher than the "budget". */ | |
2558 | budget = MANAGER_BUS_MESSAGE_BUDGET; | |
2559 | } | |
2560 | ||
2561 | while (budget != 0 && (u = m->dbus_unit_queue)) { | |
2562 | ||
2563 | assert(u->in_dbus_queue); | |
2564 | ||
2565 | bus_unit_send_change_signal(u); | |
2566 | n++; | |
2567 | ||
2568 | if (budget != UINT_MAX) | |
2569 | budget--; | |
2570 | } | |
2571 | ||
2572 | while (budget != 0 && (j = m->dbus_job_queue)) { | |
2573 | assert(j->in_dbus_queue); | |
2574 | ||
2575 | bus_job_send_change_signal(j); | |
2576 | n++; | |
2577 | ||
2578 | if (budget != UINT_MAX) | |
2579 | budget--; | |
2580 | } | |
2581 | ||
2582 | if (m->send_reloading_done) { | |
2583 | m->send_reloading_done = false; | |
2584 | bus_manager_send_reloading(m, false); | |
2585 | n++; | |
2586 | } | |
2587 | ||
2588 | if (m->pending_reload_message) { | |
2589 | bus_send_pending_reload_message(m); | |
2590 | n++; | |
2591 | } | |
2592 | ||
2593 | return n; | |
2594 | } | |
2595 | ||
2596 | static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
2597 | Manager *m = userdata; | |
2598 | char buf[PATH_MAX]; | |
2599 | ssize_t n; | |
2600 | ||
2601 | n = recv(fd, buf, sizeof(buf), 0); | |
2602 | if (n < 0) | |
2603 | return log_error_errno(errno, "Failed to read cgroups agent message: %m"); | |
2604 | if (n == 0) { | |
2605 | log_error("Got zero-length cgroups agent message, ignoring."); | |
2606 | return 0; | |
2607 | } | |
2608 | if ((size_t) n >= sizeof(buf)) { | |
2609 | log_error("Got overly long cgroups agent message, ignoring."); | |
2610 | return 0; | |
2611 | } | |
2612 | ||
2613 | if (memchr(buf, 0, n)) { | |
2614 | log_error("Got cgroups agent message with embedded NUL byte, ignoring."); | |
2615 | return 0; | |
2616 | } | |
2617 | buf[n] = 0; | |
2618 | ||
2619 | manager_notify_cgroup_empty(m, buf); | |
2620 | (void) bus_forward_agent_released(m, buf); | |
2621 | ||
2622 | return 0; | |
2623 | } | |
2624 | ||
2625 | static bool manager_process_barrier_fd(char * const *tags, FDSet *fds) { | |
2626 | ||
2627 | /* nothing else must be sent when using BARRIER=1 */ | |
2628 | if (strv_contains(tags, "BARRIER=1")) { | |
2629 | if (strv_length(tags) != 1) | |
2630 | log_warning("Extra notification messages sent with BARRIER=1, ignoring everything."); | |
2631 | else if (fdset_size(fds) != 1) | |
2632 | log_warning("Got incorrect number of fds with BARRIER=1, closing them."); | |
2633 | ||
2634 | /* Drop the message if BARRIER=1 was found */ | |
2635 | return true; | |
2636 | } | |
2637 | ||
2638 | return false; | |
2639 | } | |
2640 | ||
2641 | static void manager_invoke_notify_message( | |
2642 | Manager *m, | |
2643 | Unit *u, | |
2644 | const struct ucred *ucred, | |
2645 | char * const *tags, | |
2646 | FDSet *fds) { | |
2647 | ||
2648 | assert(m); | |
2649 | assert(u); | |
2650 | assert(ucred); | |
2651 | assert(tags); | |
2652 | ||
2653 | if (u->notifygen == m->notifygen) /* Already invoked on this same unit in this same iteration? */ | |
2654 | return; | |
2655 | u->notifygen = m->notifygen; | |
2656 | ||
2657 | if (UNIT_VTABLE(u)->notify_message) | |
2658 | UNIT_VTABLE(u)->notify_message(u, ucred, tags, fds); | |
2659 | ||
2660 | else if (DEBUG_LOGGING) { | |
2661 | _cleanup_free_ char *joined = strv_join(tags, ", "); | |
2662 | char buf[CELLESCAPE_DEFAULT_LENGTH]; | |
2663 | ||
2664 | log_unit_debug(u, "Got notification message from unexpected unit type, ignoring: %s", | |
2665 | joined ? cellescape(buf, sizeof(buf), joined) : "(null)"); | |
2666 | } | |
2667 | } | |
2668 | ||
2669 | static int manager_get_units_for_pidref(Manager *m, const PidRef *pidref, Unit ***ret_units) { | |
2670 | /* Determine array of every unit that is interested in the specified process */ | |
2671 | ||
2672 | assert(m); | |
2673 | assert(pidref_is_set(pidref)); | |
2674 | ||
2675 | Unit *u1, *u2, **array; | |
2676 | u1 = manager_get_unit_by_pidref_cgroup(m, pidref); | |
2677 | u2 = hashmap_get(m->watch_pids, pidref); | |
2678 | array = hashmap_get(m->watch_pids_more, pidref); | |
2679 | ||
2680 | size_t n = 0; | |
2681 | if (u1) | |
2682 | n++; | |
2683 | if (u2) | |
2684 | n++; | |
2685 | if (array) | |
2686 | for (size_t j = 0; array[j]; j++) | |
2687 | n++; | |
2688 | ||
2689 | assert(n <= INT_MAX); /* Make sure we can reasonably return the counter as "int" */ | |
2690 | ||
2691 | if (ret_units) { | |
2692 | _cleanup_free_ Unit **units = NULL; | |
2693 | ||
2694 | if (n > 0) { | |
2695 | units = new(Unit*, n + 1); | |
2696 | if (!units) | |
2697 | return -ENOMEM; | |
2698 | ||
2699 | /* We return a dense array, and put the "main" unit first, i.e. unit in whose cgroup | |
2700 | * the process currently is. Note that we do not bother with filtering duplicates | |
2701 | * here. */ | |
2702 | ||
2703 | size_t i = 0; | |
2704 | if (u1) | |
2705 | units[i++] = u1; | |
2706 | if (u2) | |
2707 | units[i++] = u2; | |
2708 | if (array) | |
2709 | for (size_t j = 0; array[j]; j++) | |
2710 | units[i++] = array[j]; | |
2711 | assert(i == n); | |
2712 | ||
2713 | units[i] = NULL; /* end array in an extra NULL */ | |
2714 | } | |
2715 | ||
2716 | *ret_units = TAKE_PTR(units); | |
2717 | } | |
2718 | ||
2719 | return (int) n; | |
2720 | } | |
2721 | ||
2722 | static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
2723 | Manager *m = ASSERT_PTR(userdata); | |
2724 | _cleanup_fdset_free_ FDSet *fds = NULL; | |
2725 | char buf[NOTIFY_BUFFER_MAX+1]; | |
2726 | struct iovec iovec = { | |
2727 | .iov_base = buf, | |
2728 | .iov_len = sizeof(buf)-1, | |
2729 | }; | |
2730 | CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred)) + | |
2731 | CMSG_SPACE(sizeof(int) * NOTIFY_FD_MAX)) control; | |
2732 | struct msghdr msghdr = { | |
2733 | .msg_iov = &iovec, | |
2734 | .msg_iovlen = 1, | |
2735 | .msg_control = &control, | |
2736 | .msg_controllen = sizeof(control), | |
2737 | }; | |
2738 | ||
2739 | struct cmsghdr *cmsg; | |
2740 | struct ucred *ucred = NULL; | |
2741 | _cleanup_strv_free_ char **tags = NULL; | |
2742 | int r, *fd_array = NULL; | |
2743 | size_t n_fds = 0; | |
2744 | ssize_t n; | |
2745 | ||
2746 | assert(m->notify_fd == fd); | |
2747 | ||
2748 | if (revents != EPOLLIN) { | |
2749 | log_warning("Got unexpected poll event for notify fd."); | |
2750 | return 0; | |
2751 | } | |
2752 | ||
2753 | n = recvmsg_safe(m->notify_fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC); | |
2754 | if (ERRNO_IS_NEG_TRANSIENT(n)) | |
2755 | return 0; /* Spurious wakeup, try again */ | |
2756 | if (n == -EXFULL) { | |
2757 | log_warning("Got message with truncated control data (too many fds sent?), ignoring."); | |
2758 | return 0; | |
2759 | } | |
2760 | if (n < 0) | |
2761 | /* If this is any other, real error, then stop processing this socket. This of course means | |
2762 | * we won't take notification messages anymore, but that's still better than busy looping: | |
2763 | * being woken up over and over again, but being unable to actually read the message from the | |
2764 | * socket. */ | |
2765 | return log_error_errno(n, "Failed to receive notification message: %m"); | |
2766 | ||
2767 | CMSG_FOREACH(cmsg, &msghdr) | |
2768 | if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { | |
2769 | ||
2770 | assert(!fd_array); | |
2771 | fd_array = CMSG_TYPED_DATA(cmsg, int); | |
2772 | n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int); | |
2773 | ||
2774 | } else if (cmsg->cmsg_level == SOL_SOCKET && | |
2775 | cmsg->cmsg_type == SCM_CREDENTIALS && | |
2776 | cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) { | |
2777 | ||
2778 | assert(!ucred); | |
2779 | ucred = CMSG_TYPED_DATA(cmsg, struct ucred); | |
2780 | } | |
2781 | ||
2782 | if (n_fds > 0) { | |
2783 | assert(fd_array); | |
2784 | ||
2785 | r = fdset_new_array(&fds, fd_array, n_fds); | |
2786 | if (r < 0) { | |
2787 | close_many(fd_array, n_fds); | |
2788 | log_oom(); | |
2789 | return 0; | |
2790 | } | |
2791 | } | |
2792 | ||
2793 | if (!ucred || !pid_is_valid(ucred->pid)) { | |
2794 | log_warning("Received notify message without valid credentials. Ignoring."); | |
2795 | return 0; | |
2796 | } | |
2797 | ||
2798 | if ((size_t) n >= sizeof(buf) || (msghdr.msg_flags & MSG_TRUNC)) { | |
2799 | log_warning("Received notify message exceeded maximum size. Ignoring."); | |
2800 | return 0; | |
2801 | } | |
2802 | ||
2803 | /* As extra safety check, let's make sure the string we get doesn't contain embedded NUL bytes. | |
2804 | * We permit one trailing NUL byte in the message, but don't expect it. */ | |
2805 | if (n > 1 && memchr(buf, 0, n-1)) { | |
2806 | log_warning("Received notify message with embedded NUL bytes. Ignoring."); | |
2807 | return 0; | |
2808 | } | |
2809 | ||
2810 | /* Make sure it's NUL-terminated, then parse it to obtain the tags list. */ | |
2811 | buf[n] = 0; | |
2812 | tags = strv_split_newlines(buf); | |
2813 | if (!tags) { | |
2814 | log_oom(); | |
2815 | return 0; | |
2816 | } | |
2817 | ||
2818 | /* Possibly a barrier fd, let's see. */ | |
2819 | if (manager_process_barrier_fd(tags, fds)) { | |
2820 | log_debug("Received barrier notification message from PID " PID_FMT ".", ucred->pid); | |
2821 | return 0; | |
2822 | } | |
2823 | ||
2824 | /* Increase the generation counter used for filtering out duplicate unit invocations. */ | |
2825 | m->notifygen++; | |
2826 | ||
2827 | /* Generate lookup key from the PID (we have no pidfd here, after all) */ | |
2828 | PidRef pidref = PIDREF_MAKE_FROM_PID(ucred->pid); | |
2829 | ||
2830 | /* Notify every unit that might be interested, which might be multiple. */ | |
2831 | _cleanup_free_ Unit **array = NULL; | |
2832 | ||
2833 | int n_array = manager_get_units_for_pidref(m, &pidref, &array); | |
2834 | if (n_array < 0) { | |
2835 | log_warning_errno(n_array, "Failed to determine units for PID " PID_FMT ", ignoring: %m", ucred->pid); | |
2836 | return 0; | |
2837 | } | |
2838 | if (n_array == 0) | |
2839 | log_debug("Cannot find unit for notify message of PID "PID_FMT", ignoring.", ucred->pid); | |
2840 | else | |
2841 | /* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle | |
2842 | * duplicate units – making sure we only invoke each unit's handler once. */ | |
2843 | FOREACH_ARRAY(u, array, n_array) | |
2844 | manager_invoke_notify_message(m, *u, ucred, tags, fds); | |
2845 | ||
2846 | if (!fdset_isempty(fds)) | |
2847 | log_warning("Got extra auxiliary fds with notification message, closing them."); | |
2848 | ||
2849 | return 0; | |
2850 | } | |
2851 | ||
2852 | static void manager_invoke_sigchld_event( | |
2853 | Manager *m, | |
2854 | Unit *u, | |
2855 | const siginfo_t *si) { | |
2856 | ||
2857 | assert(m); | |
2858 | assert(u); | |
2859 | assert(si); | |
2860 | ||
2861 | /* Already invoked the handler of this unit in this iteration? Then don't process this again */ | |
2862 | if (u->sigchldgen == m->sigchldgen) | |
2863 | return; | |
2864 | u->sigchldgen = m->sigchldgen; | |
2865 | ||
2866 | log_unit_debug(u, "Child "PID_FMT" belongs to %s.", si->si_pid, u->id); | |
2867 | unit_unwatch_pid(u, si->si_pid); | |
2868 | ||
2869 | if (UNIT_VTABLE(u)->sigchld_event) | |
2870 | UNIT_VTABLE(u)->sigchld_event(u, si->si_pid, si->si_code, si->si_status); | |
2871 | } | |
2872 | ||
2873 | static int manager_dispatch_sigchld(sd_event_source *source, void *userdata) { | |
2874 | Manager *m = ASSERT_PTR(userdata); | |
2875 | siginfo_t si = {}; | |
2876 | int r; | |
2877 | ||
2878 | assert(source); | |
2879 | ||
2880 | /* First we call waitid() for a PID and do not reap the zombie. That way we can still access | |
2881 | * /proc/$PID for it while it is a zombie. */ | |
2882 | ||
2883 | if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) { | |
2884 | ||
2885 | if (errno != ECHILD) | |
2886 | log_error_errno(errno, "Failed to peek for child with waitid(), ignoring: %m"); | |
2887 | ||
2888 | goto turn_off; | |
2889 | } | |
2890 | ||
2891 | if (si.si_pid <= 0) | |
2892 | goto turn_off; | |
2893 | ||
2894 | if (IN_SET(si.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED)) { | |
2895 | _cleanup_free_ char *name = NULL; | |
2896 | (void) pid_get_comm(si.si_pid, &name); | |
2897 | ||
2898 | log_debug("Child "PID_FMT" (%s) died (code=%s, status=%i/%s)", | |
2899 | si.si_pid, strna(name), | |
2900 | sigchld_code_to_string(si.si_code), | |
2901 | si.si_status, | |
2902 | strna(si.si_code == CLD_EXITED | |
2903 | ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL) | |
2904 | : signal_to_string(si.si_status))); | |
2905 | ||
2906 | /* Increase the generation counter used for filtering out duplicate unit invocations */ | |
2907 | m->sigchldgen++; | |
2908 | ||
2909 | /* We look this up by a PidRef that only consists of the PID. After all we couldn't create a | |
2910 | * pidfd here any more even if we wanted (since the process just exited). */ | |
2911 | PidRef pidref = PIDREF_MAKE_FROM_PID(si.si_pid); | |
2912 | ||
2913 | /* And now figure out the units this belongs to, there might be multiple... */ | |
2914 | _cleanup_free_ Unit **array = NULL; | |
2915 | int n_array = manager_get_units_for_pidref(m, &pidref, &array); | |
2916 | if (n_array < 0) | |
2917 | log_warning_errno(n_array, "Failed to get units for process " PID_FMT ", ignoring: %m", si.si_pid); | |
2918 | else if (n_array == 0) | |
2919 | log_debug("Got SIGCHLD for process " PID_FMT " we weren't interested in, ignoring.", si.si_pid); | |
2920 | else { | |
2921 | /* We check for an OOM condition, in case we got SIGCHLD before the OOM notification. | |
2922 | * We only do this for the cgroup the PID belonged to, which is the f */ | |
2923 | (void) unit_check_oom(array[0]); | |
2924 | ||
2925 | /* We check if systemd-oomd performed a kill so that we log and notify appropriately */ | |
2926 | (void) unit_check_oomd_kill(array[0]); | |
2927 | ||
2928 | /* Finally, execute them all. Note that the array might contain duplicates, but that's fine, | |
2929 | * manager_invoke_sigchld_event() will ensure we only invoke the handlers once for each | |
2930 | * iteration. */ | |
2931 | FOREACH_ARRAY(u, array, n_array) | |
2932 | manager_invoke_sigchld_event(m, *u, &si); | |
2933 | } | |
2934 | } | |
2935 | ||
2936 | /* And now, we actually reap the zombie. */ | |
2937 | if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) { | |
2938 | log_error_errno(errno, "Failed to dequeue child, ignoring: %m"); | |
2939 | return 0; | |
2940 | } | |
2941 | ||
2942 | return 0; | |
2943 | ||
2944 | turn_off: | |
2945 | /* All children processed for now, turn off event source */ | |
2946 | ||
2947 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF); | |
2948 | if (r < 0) | |
2949 | return log_error_errno(r, "Failed to disable SIGCHLD event source: %m"); | |
2950 | ||
2951 | return 0; | |
2952 | } | |
2953 | ||
2954 | static void manager_start_special(Manager *m, const char *name, JobMode mode) { | |
2955 | Job *job; | |
2956 | ||
2957 | if (manager_add_job_by_name_and_warn(m, JOB_START, name, mode, NULL, &job) < 0) | |
2958 | return; | |
2959 | ||
2960 | const char *s = unit_status_string(job->unit, NULL); | |
2961 | ||
2962 | log_info("Activating special unit %s...", s); | |
2963 | ||
2964 | (void) sd_notifyf(/* unset_environment= */ false, | |
2965 | "STATUS=Activating special unit %s...", s); | |
2966 | m->status_ready = false; | |
2967 | } | |
2968 | ||
2969 | static void manager_handle_ctrl_alt_del(Manager *m) { | |
2970 | /* If the user presses C-A-D more than | |
2971 | * 7 times within 2s, we reboot/shutdown immediately, | |
2972 | * unless it was disabled in system.conf */ | |
2973 | ||
2974 | if (ratelimit_below(&m->ctrl_alt_del_ratelimit) || m->cad_burst_action == EMERGENCY_ACTION_NONE) | |
2975 | manager_start_special(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE_IRREVERSIBLY); | |
2976 | else | |
2977 | emergency_action(m, m->cad_burst_action, EMERGENCY_ACTION_WARN, NULL, -1, | |
2978 | "Ctrl-Alt-Del was pressed more than 7 times within 2s"); | |
2979 | } | |
2980 | ||
2981 | static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
2982 | Manager *m = ASSERT_PTR(userdata); | |
2983 | ssize_t n; | |
2984 | struct signalfd_siginfo sfsi; | |
2985 | int r; | |
2986 | ||
2987 | assert(m->signal_fd == fd); | |
2988 | ||
2989 | if (revents != EPOLLIN) { | |
2990 | log_warning("Got unexpected events from signal file descriptor."); | |
2991 | return 0; | |
2992 | } | |
2993 | ||
2994 | n = read(m->signal_fd, &sfsi, sizeof(sfsi)); | |
2995 | if (n < 0) { | |
2996 | if (ERRNO_IS_TRANSIENT(errno)) | |
2997 | return 0; | |
2998 | ||
2999 | /* We return an error here, which will kill this handler, | |
3000 | * to avoid a busy loop on read error. */ | |
3001 | return log_error_errno(errno, "Reading from signal fd failed: %m"); | |
3002 | } | |
3003 | if (n != sizeof(sfsi)) { | |
3004 | log_warning("Truncated read from signal fd (%zi bytes), ignoring!", n); | |
3005 | return 0; | |
3006 | } | |
3007 | ||
3008 | log_received_signal(sfsi.ssi_signo == SIGCHLD || | |
3009 | (sfsi.ssi_signo == SIGTERM && MANAGER_IS_USER(m)) | |
3010 | ? LOG_DEBUG : LOG_INFO, | |
3011 | &sfsi); | |
3012 | ||
3013 | switch (sfsi.ssi_signo) { | |
3014 | ||
3015 | case SIGCHLD: | |
3016 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON); | |
3017 | if (r < 0) | |
3018 | log_warning_errno(r, "Failed to enable SIGCHLD event source, ignoring: %m"); | |
3019 | ||
3020 | break; | |
3021 | ||
3022 | case SIGTERM: | |
3023 | if (MANAGER_IS_SYSTEM(m)) { | |
3024 | /* This is for compatibility with the original sysvinit */ | |
3025 | if (verify_run_space_and_log("Refusing to reexecute") < 0) | |
3026 | break; | |
3027 | ||
3028 | m->objective = MANAGER_REEXECUTE; | |
3029 | break; | |
3030 | } | |
3031 | ||
3032 | _fallthrough_; | |
3033 | case SIGINT: | |
3034 | if (MANAGER_IS_SYSTEM(m)) | |
3035 | manager_handle_ctrl_alt_del(m); | |
3036 | else | |
3037 | manager_start_special(m, SPECIAL_EXIT_TARGET, JOB_REPLACE_IRREVERSIBLY); | |
3038 | break; | |
3039 | ||
3040 | case SIGWINCH: | |
3041 | /* This is a nop on non-init */ | |
3042 | if (MANAGER_IS_SYSTEM(m)) | |
3043 | manager_start_special(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE); | |
3044 | ||
3045 | break; | |
3046 | ||
3047 | case SIGPWR: | |
3048 | /* This is a nop on non-init */ | |
3049 | if (MANAGER_IS_SYSTEM(m)) | |
3050 | manager_start_special(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE); | |
3051 | ||
3052 | break; | |
3053 | ||
3054 | case SIGUSR1: | |
3055 | if (manager_dbus_is_running(m, false)) { | |
3056 | log_info("Trying to reconnect to bus..."); | |
3057 | ||
3058 | (void) bus_init_api(m); | |
3059 | ||
3060 | if (MANAGER_IS_SYSTEM(m)) | |
3061 | (void) bus_init_system(m); | |
3062 | } else | |
3063 | manager_start_special(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE); | |
3064 | ||
3065 | break; | |
3066 | ||
3067 | case SIGUSR2: { | |
3068 | _cleanup_free_ char *dump = NULL; | |
3069 | ||
3070 | r = manager_get_dump_string(m, /* patterns= */ NULL, &dump); | |
3071 | if (r < 0) { | |
3072 | log_warning_errno(r, "Failed to acquire manager dump: %m"); | |
3073 | break; | |
3074 | } | |
3075 | ||
3076 | log_dump(LOG_INFO, dump); | |
3077 | break; | |
3078 | } | |
3079 | ||
3080 | case SIGHUP: | |
3081 | if (verify_run_space_and_log("Refusing to reload") < 0) | |
3082 | break; | |
3083 | ||
3084 | m->objective = MANAGER_RELOAD; | |
3085 | break; | |
3086 | ||
3087 | default: { | |
3088 | ||
3089 | /* Starting SIGRTMIN+0 */ | |
3090 | static const struct { | |
3091 | const char *target; | |
3092 | JobMode mode; | |
3093 | } target_table[] = { | |
3094 | [0] = { SPECIAL_DEFAULT_TARGET, JOB_ISOLATE }, | |
3095 | [1] = { SPECIAL_RESCUE_TARGET, JOB_ISOLATE }, | |
3096 | [2] = { SPECIAL_EMERGENCY_TARGET, JOB_ISOLATE }, | |
3097 | [3] = { SPECIAL_HALT_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3098 | [4] = { SPECIAL_POWEROFF_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3099 | [5] = { SPECIAL_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3100 | [6] = { SPECIAL_KEXEC_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3101 | [7] = { SPECIAL_SOFT_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3102 | }; | |
3103 | ||
3104 | /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */ | |
3105 | static const ManagerObjective objective_table[] = { | |
3106 | [0] = MANAGER_HALT, | |
3107 | [1] = MANAGER_POWEROFF, | |
3108 | [2] = MANAGER_REBOOT, | |
3109 | [3] = MANAGER_KEXEC, | |
3110 | [4] = MANAGER_SOFT_REBOOT, | |
3111 | }; | |
3112 | ||
3113 | if ((int) sfsi.ssi_signo >= SIGRTMIN+0 && | |
3114 | (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) { | |
3115 | int idx = (int) sfsi.ssi_signo - SIGRTMIN; | |
3116 | manager_start_special(m, target_table[idx].target, target_table[idx].mode); | |
3117 | break; | |
3118 | } | |
3119 | ||
3120 | if ((int) sfsi.ssi_signo >= SIGRTMIN+13 && | |
3121 | (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(objective_table)) { | |
3122 | m->objective = objective_table[sfsi.ssi_signo - SIGRTMIN - 13]; | |
3123 | break; | |
3124 | } | |
3125 | ||
3126 | switch (sfsi.ssi_signo - SIGRTMIN) { | |
3127 | ||
3128 | case 18: { | |
3129 | bool generic = false; | |
3130 | ||
3131 | if (sfsi.ssi_code != SI_QUEUE) | |
3132 | generic = true; | |
3133 | else { | |
3134 | /* Override a few select commands by our own PID1-specific logic */ | |
3135 | ||
3136 | switch (sfsi.ssi_int) { | |
3137 | ||
3138 | case _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE..._COMMON_SIGNAL_COMMAND_LOG_LEVEL_END: | |
3139 | manager_override_log_level(m, sfsi.ssi_int - _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE); | |
3140 | break; | |
3141 | ||
3142 | case COMMON_SIGNAL_COMMAND_CONSOLE: | |
3143 | manager_override_log_target(m, LOG_TARGET_CONSOLE); | |
3144 | break; | |
3145 | ||
3146 | case COMMON_SIGNAL_COMMAND_JOURNAL: | |
3147 | manager_override_log_target(m, LOG_TARGET_JOURNAL); | |
3148 | break; | |
3149 | ||
3150 | case COMMON_SIGNAL_COMMAND_KMSG: | |
3151 | manager_override_log_target(m, LOG_TARGET_KMSG); | |
3152 | break; | |
3153 | ||
3154 | case COMMON_SIGNAL_COMMAND_NULL: | |
3155 | manager_override_log_target(m, LOG_TARGET_NULL); | |
3156 | break; | |
3157 | ||
3158 | case MANAGER_SIGNAL_COMMAND_DUMP_JOBS: { | |
3159 | _cleanup_free_ char *dump_jobs = NULL; | |
3160 | ||
3161 | r = manager_get_dump_jobs_string(m, /* patterns= */ NULL, " ", &dump_jobs); | |
3162 | if (r < 0) { | |
3163 | log_warning_errno(r, "Failed to acquire manager jobs dump: %m"); | |
3164 | break; | |
3165 | } | |
3166 | ||
3167 | log_dump(LOG_INFO, dump_jobs); | |
3168 | break; | |
3169 | } | |
3170 | ||
3171 | default: | |
3172 | generic = true; | |
3173 | } | |
3174 | } | |
3175 | ||
3176 | if (generic) | |
3177 | return sigrtmin18_handler(source, &sfsi, NULL); | |
3178 | ||
3179 | break; | |
3180 | } | |
3181 | ||
3182 | case 20: | |
3183 | manager_override_show_status(m, SHOW_STATUS_YES, "signal"); | |
3184 | break; | |
3185 | ||
3186 | case 21: | |
3187 | manager_override_show_status(m, SHOW_STATUS_NO, "signal"); | |
3188 | break; | |
3189 | ||
3190 | case 22: | |
3191 | manager_override_log_level(m, LOG_DEBUG); | |
3192 | break; | |
3193 | ||
3194 | case 23: | |
3195 | manager_restore_original_log_level(m); | |
3196 | break; | |
3197 | ||
3198 | case 24: | |
3199 | if (MANAGER_IS_USER(m)) { | |
3200 | m->objective = MANAGER_EXIT; | |
3201 | return 0; | |
3202 | } | |
3203 | ||
3204 | /* This is a nop on init */ | |
3205 | break; | |
3206 | ||
3207 | case 25: | |
3208 | m->objective = MANAGER_REEXECUTE; | |
3209 | break; | |
3210 | ||
3211 | case 26: | |
3212 | case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */ | |
3213 | manager_restore_original_log_target(m); | |
3214 | break; | |
3215 | ||
3216 | case 27: | |
3217 | manager_override_log_target(m, LOG_TARGET_CONSOLE); | |
3218 | break; | |
3219 | ||
3220 | case 28: | |
3221 | manager_override_log_target(m, LOG_TARGET_KMSG); | |
3222 | break; | |
3223 | ||
3224 | default: | |
3225 | log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo)); | |
3226 | } | |
3227 | }} | |
3228 | ||
3229 | return 0; | |
3230 | } | |
3231 | ||
3232 | static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
3233 | Manager *m = ASSERT_PTR(userdata); | |
3234 | Unit *u; | |
3235 | ||
3236 | log_struct(LOG_DEBUG, | |
3237 | "MESSAGE_ID=" SD_MESSAGE_TIME_CHANGE_STR, | |
3238 | LOG_MESSAGE("Time has been changed")); | |
3239 | ||
3240 | /* Restart the watch */ | |
3241 | (void) manager_setup_time_change(m); | |
3242 | ||
3243 | HASHMAP_FOREACH(u, m->units) | |
3244 | if (UNIT_VTABLE(u)->time_change) | |
3245 | UNIT_VTABLE(u)->time_change(u); | |
3246 | ||
3247 | return 0; | |
3248 | } | |
3249 | ||
3250 | static int manager_dispatch_timezone_change( | |
3251 | sd_event_source *source, | |
3252 | const struct inotify_event *e, | |
3253 | void *userdata) { | |
3254 | ||
3255 | Manager *m = ASSERT_PTR(userdata); | |
3256 | int changed; | |
3257 | Unit *u; | |
3258 | ||
3259 | log_debug("inotify event for /etc/localtime"); | |
3260 | ||
3261 | changed = manager_read_timezone_stat(m); | |
3262 | if (changed <= 0) | |
3263 | return changed; | |
3264 | ||
3265 | /* Something changed, restart the watch, to ensure we watch the new /etc/localtime if it changed */ | |
3266 | (void) manager_setup_timezone_change(m); | |
3267 | ||
3268 | /* Read the new timezone */ | |
3269 | tzset(); | |
3270 | ||
3271 | log_debug("Timezone has been changed (now: %s).", tzname[daylight]); | |
3272 | ||
3273 | HASHMAP_FOREACH(u, m->units) | |
3274 | if (UNIT_VTABLE(u)->timezone_change) | |
3275 | UNIT_VTABLE(u)->timezone_change(u); | |
3276 | ||
3277 | return 0; | |
3278 | } | |
3279 | ||
3280 | static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
3281 | Manager *m = ASSERT_PTR(userdata); | |
3282 | ||
3283 | assert(m->idle_pipe[2] == fd); | |
3284 | ||
3285 | /* There's at least one Type=idle child that just gave up on us waiting for the boot process to | |
3286 | * complete. Let's now turn off any further console output if there's at least one service that needs | |
3287 | * console access, so that from now on our own output should not spill into that service's output | |
3288 | * anymore. After all, we support Type=idle only to beautify console output and it generally is set | |
3289 | * on services that want to own the console exclusively without our interference. */ | |
3290 | m->no_console_output = m->n_on_console > 0; | |
3291 | ||
3292 | /* Acknowledge the child's request, and let all other children know too that they shouldn't wait | |
3293 | * any longer by closing the pipes towards them, which is what they are waiting for. */ | |
3294 | manager_close_idle_pipe(m); | |
3295 | ||
3296 | return 0; | |
3297 | } | |
3298 | ||
3299 | static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata) { | |
3300 | Manager *m = ASSERT_PTR(userdata); | |
3301 | int r; | |
3302 | ||
3303 | assert(source); | |
3304 | ||
3305 | manager_print_jobs_in_progress(m); | |
3306 | ||
3307 | r = sd_event_source_set_time_relative(source, JOBS_IN_PROGRESS_PERIOD_USEC); | |
3308 | if (r < 0) | |
3309 | return r; | |
3310 | ||
3311 | return sd_event_source_set_enabled(source, SD_EVENT_ONESHOT); | |
3312 | } | |
3313 | ||
3314 | int manager_loop(Manager *m) { | |
3315 | RateLimit rl = { .interval = 1*USEC_PER_SEC, .burst = 50000 }; | |
3316 | int r; | |
3317 | ||
3318 | assert(m); | |
3319 | assert(m->objective == MANAGER_OK); /* Ensure manager_startup() has been called */ | |
3320 | ||
3321 | manager_check_finished(m); | |
3322 | ||
3323 | /* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */ | |
3324 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON); | |
3325 | if (r < 0) | |
3326 | return log_error_errno(r, "Failed to enable SIGCHLD event source: %m"); | |
3327 | ||
3328 | while (m->objective == MANAGER_OK) { | |
3329 | ||
3330 | (void) watchdog_ping(); | |
3331 | ||
3332 | if (!ratelimit_below(&rl)) { | |
3333 | /* Yay, something is going seriously wrong, pause a little */ | |
3334 | log_warning("Looping too fast. Throttling execution a little."); | |
3335 | sleep(1); | |
3336 | } | |
3337 | ||
3338 | if (manager_dispatch_load_queue(m) > 0) | |
3339 | continue; | |
3340 | ||
3341 | if (manager_dispatch_gc_job_queue(m) > 0) | |
3342 | continue; | |
3343 | ||
3344 | if (manager_dispatch_gc_unit_queue(m) > 0) | |
3345 | continue; | |
3346 | ||
3347 | if (manager_dispatch_cleanup_queue(m) > 0) | |
3348 | continue; | |
3349 | ||
3350 | if (manager_dispatch_cgroup_realize_queue(m) > 0) | |
3351 | continue; | |
3352 | ||
3353 | if (manager_dispatch_start_when_upheld_queue(m) > 0) | |
3354 | continue; | |
3355 | ||
3356 | if (manager_dispatch_stop_when_bound_queue(m) > 0) | |
3357 | continue; | |
3358 | ||
3359 | if (manager_dispatch_stop_when_unneeded_queue(m) > 0) | |
3360 | continue; | |
3361 | ||
3362 | if (manager_dispatch_release_resources_queue(m) > 0) | |
3363 | continue; | |
3364 | ||
3365 | if (manager_dispatch_dbus_queue(m) > 0) | |
3366 | continue; | |
3367 | ||
3368 | /* Sleep for watchdog runtime wait time */ | |
3369 | r = sd_event_run(m->event, watchdog_runtime_wait()); | |
3370 | if (r < 0) | |
3371 | return log_error_errno(r, "Failed to run event loop: %m"); | |
3372 | } | |
3373 | ||
3374 | return m->objective; | |
3375 | } | |
3376 | ||
3377 | int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u) { | |
3378 | _cleanup_free_ char *n = NULL; | |
3379 | sd_id128_t invocation_id; | |
3380 | Unit *u; | |
3381 | int r; | |
3382 | ||
3383 | assert(m); | |
3384 | assert(s); | |
3385 | assert(_u); | |
3386 | ||
3387 | r = unit_name_from_dbus_path(s, &n); | |
3388 | if (r < 0) | |
3389 | return r; | |
3390 | ||
3391 | /* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128-bit ID then | |
3392 | * we use it as invocation ID. */ | |
3393 | r = sd_id128_from_string(n, &invocation_id); | |
3394 | if (r >= 0) { | |
3395 | u = hashmap_get(m->units_by_invocation_id, &invocation_id); | |
3396 | if (u) { | |
3397 | *_u = u; | |
3398 | return 0; | |
3399 | } | |
3400 | ||
3401 | return sd_bus_error_setf(e, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID, | |
3402 | "No unit with the specified invocation ID " SD_ID128_FORMAT_STR " known.", | |
3403 | SD_ID128_FORMAT_VAL(invocation_id)); | |
3404 | } | |
3405 | ||
3406 | /* If this didn't work, we check if this is a unit name */ | |
3407 | if (!unit_name_is_valid(n, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) { | |
3408 | _cleanup_free_ char *nn = NULL; | |
3409 | ||
3410 | nn = cescape(n); | |
3411 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, | |
3412 | "Unit name %s is neither a valid invocation ID nor unit name.", strnull(nn)); | |
3413 | } | |
3414 | ||
3415 | r = manager_load_unit(m, n, NULL, e, &u); | |
3416 | if (r < 0) | |
3417 | return r; | |
3418 | ||
3419 | *_u = u; | |
3420 | return 0; | |
3421 | } | |
3422 | ||
3423 | int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) { | |
3424 | const char *p; | |
3425 | unsigned id; | |
3426 | Job *j; | |
3427 | int r; | |
3428 | ||
3429 | assert(m); | |
3430 | assert(s); | |
3431 | assert(_j); | |
3432 | ||
3433 | p = startswith(s, "/org/freedesktop/systemd1/job/"); | |
3434 | if (!p) | |
3435 | return -EINVAL; | |
3436 | ||
3437 | r = safe_atou(p, &id); | |
3438 | if (r < 0) | |
3439 | return r; | |
3440 | ||
3441 | j = manager_get_job(m, id); | |
3442 | if (!j) | |
3443 | return -ENOENT; | |
3444 | ||
3445 | *_j = j; | |
3446 | ||
3447 | return 0; | |
3448 | } | |
3449 | ||
3450 | void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) { | |
3451 | ||
3452 | #if HAVE_AUDIT | |
3453 | _cleanup_free_ char *p = NULL; | |
3454 | const char *msg; | |
3455 | int audit_fd, r; | |
3456 | ||
3457 | assert(m); | |
3458 | assert(u); | |
3459 | ||
3460 | if (!MANAGER_IS_SYSTEM(m)) | |
3461 | return; | |
3462 | ||
3463 | /* Don't generate audit events if the service was already started and we're just deserializing */ | |
3464 | if (MANAGER_IS_RELOADING(m)) | |
3465 | return; | |
3466 | ||
3467 | audit_fd = get_audit_fd(); | |
3468 | if (audit_fd < 0) | |
3469 | return; | |
3470 | ||
3471 | r = unit_name_to_prefix_and_instance(u->id, &p); | |
3472 | if (r < 0) { | |
3473 | log_warning_errno(r, "Failed to extract prefix and instance of unit name, ignoring: %m"); | |
3474 | return; | |
3475 | } | |
3476 | ||
3477 | msg = strjoina("unit=", p); | |
3478 | if (audit_log_user_comm_message(audit_fd, type, msg, "systemd", NULL, NULL, NULL, success) < 0) { | |
3479 | if (ERRNO_IS_PRIVILEGE(errno)) { | |
3480 | /* We aren't allowed to send audit messages? Then let's not retry again. */ | |
3481 | log_debug_errno(errno, "Failed to send audit message, closing audit socket: %m"); | |
3482 | close_audit_fd(); | |
3483 | } else | |
3484 | log_warning_errno(errno, "Failed to send audit message, ignoring: %m"); | |
3485 | } | |
3486 | #endif | |
3487 | } | |
3488 | ||
3489 | void manager_send_unit_plymouth(Manager *m, Unit *u) { | |
3490 | _cleanup_free_ char *message = NULL; | |
3491 | int c, r; | |
3492 | ||
3493 | assert(m); | |
3494 | assert(u); | |
3495 | ||
3496 | if (!MANAGER_IS_SYSTEM(m)) | |
3497 | return; | |
3498 | ||
3499 | /* Don't generate plymouth events if the service was already started and we're just deserializing */ | |
3500 | if (MANAGER_IS_RELOADING(m)) | |
3501 | return; | |
3502 | ||
3503 | if (detect_container() > 0) | |
3504 | return; | |
3505 | ||
3506 | if (!UNIT_VTABLE(u)->notify_plymouth) | |
3507 | return; | |
3508 | ||
3509 | c = asprintf(&message, "U\x02%c%s%c", (int) (strlen(u->id) + 1), u->id, '\x00'); | |
3510 | if (c < 0) | |
3511 | return (void) log_oom(); | |
3512 | ||
3513 | /* We set SOCK_NONBLOCK here so that we rather drop the message then wait for plymouth */ | |
3514 | r = plymouth_send_raw(message, c, SOCK_NONBLOCK); | |
3515 | if (r < 0) | |
3516 | log_full_errno(ERRNO_IS_NO_PLYMOUTH(r) ? LOG_DEBUG : LOG_WARNING, r, | |
3517 | "Failed to communicate with plymouth: %m"); | |
3518 | } | |
3519 | ||
3520 | void manager_send_unit_supervisor(Manager *m, Unit *u, bool active) { | |
3521 | assert(m); | |
3522 | assert(u); | |
3523 | ||
3524 | /* Notify a "supervisor" process about our progress, i.e. a container manager, hypervisor, or | |
3525 | * surrounding service manager. */ | |
3526 | ||
3527 | if (MANAGER_IS_RELOADING(m)) | |
3528 | return; | |
3529 | ||
3530 | if (!UNIT_VTABLE(u)->notify_supervisor) | |
3531 | return; | |
3532 | ||
3533 | if (in_initrd()) /* Only send these once we left the initrd */ | |
3534 | return; | |
3535 | ||
3536 | (void) sd_notifyf(/* unset_environment= */ false, | |
3537 | active ? "X_SYSTEMD_UNIT_ACTIVE=%s" : "X_SYSTEMD_UNIT_INACTIVE=%s", | |
3538 | u->id); | |
3539 | } | |
3540 | ||
3541 | usec_t manager_get_watchdog(Manager *m, WatchdogType t) { | |
3542 | assert(m); | |
3543 | ||
3544 | if (MANAGER_IS_USER(m)) | |
3545 | return USEC_INFINITY; | |
3546 | ||
3547 | if (m->watchdog_overridden[t] != USEC_INFINITY) | |
3548 | return m->watchdog_overridden[t]; | |
3549 | ||
3550 | return m->watchdog[t]; | |
3551 | } | |
3552 | ||
3553 | void manager_set_watchdog(Manager *m, WatchdogType t, usec_t timeout) { | |
3554 | ||
3555 | assert(m); | |
3556 | ||
3557 | if (MANAGER_IS_USER(m)) | |
3558 | return; | |
3559 | ||
3560 | if (m->watchdog[t] == timeout) | |
3561 | return; | |
3562 | ||
3563 | if (m->watchdog_overridden[t] == USEC_INFINITY) { | |
3564 | if (t == WATCHDOG_RUNTIME) | |
3565 | (void) watchdog_setup(timeout); | |
3566 | else if (t == WATCHDOG_PRETIMEOUT) | |
3567 | (void) watchdog_setup_pretimeout(timeout); | |
3568 | } | |
3569 | ||
3570 | m->watchdog[t] = timeout; | |
3571 | } | |
3572 | ||
3573 | void manager_override_watchdog(Manager *m, WatchdogType t, usec_t timeout) { | |
3574 | usec_t usec; | |
3575 | ||
3576 | assert(m); | |
3577 | ||
3578 | if (MANAGER_IS_USER(m)) | |
3579 | return; | |
3580 | ||
3581 | if (m->watchdog_overridden[t] == timeout) | |
3582 | return; | |
3583 | ||
3584 | usec = timeout == USEC_INFINITY ? m->watchdog[t] : timeout; | |
3585 | if (t == WATCHDOG_RUNTIME) | |
3586 | (void) watchdog_setup(usec); | |
3587 | else if (t == WATCHDOG_PRETIMEOUT) | |
3588 | (void) watchdog_setup_pretimeout(usec); | |
3589 | ||
3590 | m->watchdog_overridden[t] = timeout; | |
3591 | } | |
3592 | ||
3593 | int manager_set_watchdog_pretimeout_governor(Manager *m, const char *governor) { | |
3594 | _cleanup_free_ char *p = NULL; | |
3595 | int r; | |
3596 | ||
3597 | assert(m); | |
3598 | ||
3599 | if (MANAGER_IS_USER(m)) | |
3600 | return 0; | |
3601 | ||
3602 | if (streq_ptr(m->watchdog_pretimeout_governor, governor)) | |
3603 | return 0; | |
3604 | ||
3605 | p = strdup(governor); | |
3606 | if (!p) | |
3607 | return -ENOMEM; | |
3608 | ||
3609 | r = watchdog_setup_pretimeout_governor(governor); | |
3610 | if (r < 0) | |
3611 | return r; | |
3612 | ||
3613 | return free_and_replace(m->watchdog_pretimeout_governor, p); | |
3614 | } | |
3615 | ||
3616 | int manager_override_watchdog_pretimeout_governor(Manager *m, const char *governor) { | |
3617 | _cleanup_free_ char *p = NULL; | |
3618 | int r; | |
3619 | ||
3620 | assert(m); | |
3621 | ||
3622 | if (MANAGER_IS_USER(m)) | |
3623 | return 0; | |
3624 | ||
3625 | if (streq_ptr(m->watchdog_pretimeout_governor_overridden, governor)) | |
3626 | return 0; | |
3627 | ||
3628 | p = strdup(governor); | |
3629 | if (!p) | |
3630 | return -ENOMEM; | |
3631 | ||
3632 | r = watchdog_setup_pretimeout_governor(governor); | |
3633 | if (r < 0) | |
3634 | return r; | |
3635 | ||
3636 | return free_and_replace(m->watchdog_pretimeout_governor_overridden, p); | |
3637 | } | |
3638 | ||
3639 | int manager_reload(Manager *m) { | |
3640 | _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL; | |
3641 | _cleanup_fdset_free_ FDSet *fds = NULL; | |
3642 | _cleanup_fclose_ FILE *f = NULL; | |
3643 | int r; | |
3644 | ||
3645 | assert(m); | |
3646 | ||
3647 | r = manager_open_serialization(m, &f); | |
3648 | if (r < 0) | |
3649 | return log_error_errno(r, "Failed to create serialization file: %m"); | |
3650 | ||
3651 | fds = fdset_new(); | |
3652 | if (!fds) | |
3653 | return log_oom(); | |
3654 | ||
3655 | /* We are officially in reload mode from here on. */ | |
3656 | reloading = manager_reloading_start(m); | |
3657 | ||
3658 | r = manager_serialize(m, f, fds, false); | |
3659 | if (r < 0) | |
3660 | return r; | |
3661 | ||
3662 | if (fseeko(f, 0, SEEK_SET) < 0) | |
3663 | return log_error_errno(errno, "Failed to seek to beginning of serialization: %m"); | |
3664 | ||
3665 | /* 💀 This is the point of no return, from here on there is no way back. 💀 */ | |
3666 | reloading = NULL; | |
3667 | ||
3668 | bus_manager_send_reloading(m, true); | |
3669 | ||
3670 | /* Start by flushing out all jobs and units, all generated units, all runtime environments, all dynamic users | |
3671 | * and everything else that is worth flushing out. We'll get it all back from the serialization — if we need | |
3672 | * it. */ | |
3673 | ||
3674 | manager_clear_jobs_and_units(m); | |
3675 | lookup_paths_flush_generator(&m->lookup_paths); | |
3676 | lookup_paths_done(&m->lookup_paths); | |
3677 | exec_shared_runtime_vacuum(m); | |
3678 | dynamic_user_vacuum(m, false); | |
3679 | m->uid_refs = hashmap_free(m->uid_refs); | |
3680 | m->gid_refs = hashmap_free(m->gid_refs); | |
3681 | ||
3682 | r = lookup_paths_init_or_warn(&m->lookup_paths, m->runtime_scope, 0, NULL); | |
3683 | if (r < 0) | |
3684 | return r; | |
3685 | ||
3686 | (void) manager_run_environment_generators(m); | |
3687 | (void) manager_run_generators(m); | |
3688 | ||
3689 | lookup_paths_log(&m->lookup_paths); | |
3690 | ||
3691 | /* We flushed out generated files, for which we don't watch mtime, so we should flush the old map. */ | |
3692 | manager_free_unit_name_maps(m); | |
3693 | m->unit_file_state_outdated = false; | |
3694 | ||
3695 | /* First, enumerate what we can from kernel and suchlike */ | |
3696 | manager_enumerate_perpetual(m); | |
3697 | manager_enumerate(m); | |
3698 | ||
3699 | /* Second, deserialize our stored data */ | |
3700 | r = manager_deserialize(m, f, fds); | |
3701 | if (r < 0) | |
3702 | log_warning_errno(r, "Deserialization failed, proceeding anyway: %m"); | |
3703 | ||
3704 | /* We don't need the serialization anymore */ | |
3705 | f = safe_fclose(f); | |
3706 | ||
3707 | /* Re-register notify_fd as event source, and set up other sockets/communication channels we might need */ | |
3708 | (void) manager_setup_notify(m); | |
3709 | (void) manager_setup_cgroups_agent(m); | |
3710 | (void) manager_setup_user_lookup_fd(m); | |
3711 | (void) manager_setup_handoff_timestamp_fd(m); | |
3712 | ||
3713 | /* Third, fire things up! */ | |
3714 | manager_coldplug(m); | |
3715 | ||
3716 | /* Clean up runtime objects no longer referenced */ | |
3717 | manager_vacuum(m); | |
3718 | ||
3719 | /* Clean up deserialized tracked clients */ | |
3720 | m->deserialized_subscribed = strv_free(m->deserialized_subscribed); | |
3721 | ||
3722 | /* Consider the reload process complete now. */ | |
3723 | assert(m->n_reloading > 0); | |
3724 | m->n_reloading--; | |
3725 | ||
3726 | manager_ready(m); | |
3727 | ||
3728 | m->send_reloading_done = true; | |
3729 | return 0; | |
3730 | } | |
3731 | ||
3732 | void manager_reset_failed(Manager *m) { | |
3733 | Unit *u; | |
3734 | ||
3735 | assert(m); | |
3736 | ||
3737 | HASHMAP_FOREACH(u, m->units) | |
3738 | unit_reset_failed(u); | |
3739 | } | |
3740 | ||
3741 | bool manager_unit_inactive_or_pending(Manager *m, const char *name) { | |
3742 | Unit *u; | |
3743 | ||
3744 | assert(m); | |
3745 | assert(name); | |
3746 | ||
3747 | /* Returns true if the unit is inactive or going down */ | |
3748 | u = manager_get_unit(m, name); | |
3749 | if (!u) | |
3750 | return true; | |
3751 | ||
3752 | return unit_inactive_or_pending(u); | |
3753 | } | |
3754 | ||
3755 | static void log_taint_string(Manager *m) { | |
3756 | assert(m); | |
3757 | ||
3758 | if (MANAGER_IS_USER(m) || m->taint_logged) | |
3759 | return; | |
3760 | ||
3761 | m->taint_logged = true; /* only check for taint once */ | |
3762 | ||
3763 | _cleanup_free_ char *taint = taint_string(); | |
3764 | if (isempty(taint)) | |
3765 | return; | |
3766 | ||
3767 | log_struct(LOG_NOTICE, | |
3768 | LOG_MESSAGE("System is tainted: %s", taint), | |
3769 | "TAINT=%s", taint, | |
3770 | "MESSAGE_ID=" SD_MESSAGE_TAINTED_STR); | |
3771 | } | |
3772 | ||
3773 | static void manager_notify_finished(Manager *m) { | |
3774 | usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec; | |
3775 | ||
3776 | if (MANAGER_IS_TEST_RUN(m)) | |
3777 | return; | |
3778 | ||
3779 | if (MANAGER_IS_SYSTEM(m) && m->soft_reboots_count > 0) { | |
3780 | /* The soft-reboot case, where we only report data for the last reboot */ | |
3781 | firmware_usec = loader_usec = initrd_usec = kernel_usec = 0; | |
3782 | total_usec = userspace_usec = usec_sub_unsigned(m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic, | |
3783 | m->timestamps[MANAGER_TIMESTAMP_SHUTDOWN_START].monotonic); | |
3784 | ||
3785 | log_struct(LOG_INFO, | |
3786 | "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR, | |
3787 | "USERSPACE_USEC="USEC_FMT, userspace_usec, | |
3788 | LOG_MESSAGE("Soft-reboot finished in %s, counter is now at %u.", | |
3789 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC), | |
3790 | m->soft_reboots_count)); | |
3791 | } else if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) { | |
3792 | char buf[FORMAT_TIMESPAN_MAX + STRLEN(" (firmware) + ") + FORMAT_TIMESPAN_MAX + STRLEN(" (loader) + ")] | |
3793 | = {}; | |
3794 | char *p = buf; | |
3795 | size_t size = sizeof buf; | |
3796 | ||
3797 | /* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and | |
3798 | * MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered | |
3799 | * negative values. */ | |
3800 | ||
3801 | firmware_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic - m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic; | |
3802 | loader_usec = m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic; | |
3803 | userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic; | |
3804 | total_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic + m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic; | |
3805 | ||
3806 | if (firmware_usec > 0) | |
3807 | size = strpcpyf(&p, size, "%s (firmware) + ", FORMAT_TIMESPAN(firmware_usec, USEC_PER_MSEC)); | |
3808 | if (loader_usec > 0) | |
3809 | size = strpcpyf(&p, size, "%s (loader) + ", FORMAT_TIMESPAN(loader_usec, USEC_PER_MSEC)); | |
3810 | ||
3811 | if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) { | |
3812 | ||
3813 | /* The initrd case on bare-metal */ | |
3814 | kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic; | |
3815 | initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic; | |
3816 | ||
3817 | log_struct(LOG_INFO, | |
3818 | "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR, | |
3819 | "KERNEL_USEC="USEC_FMT, kernel_usec, | |
3820 | "INITRD_USEC="USEC_FMT, initrd_usec, | |
3821 | "USERSPACE_USEC="USEC_FMT, userspace_usec, | |
3822 | LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (initrd) + %s (userspace) = %s.", | |
3823 | buf, | |
3824 | FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC), | |
3825 | FORMAT_TIMESPAN(initrd_usec, USEC_PER_MSEC), | |
3826 | FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC), | |
3827 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC))); | |
3828 | } else { | |
3829 | /* The initrd-less case on bare-metal */ | |
3830 | ||
3831 | kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic; | |
3832 | initrd_usec = 0; | |
3833 | ||
3834 | log_struct(LOG_INFO, | |
3835 | "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR, | |
3836 | "KERNEL_USEC="USEC_FMT, kernel_usec, | |
3837 | "USERSPACE_USEC="USEC_FMT, userspace_usec, | |
3838 | LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (userspace) = %s.", | |
3839 | buf, | |
3840 | FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC), | |
3841 | FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC), | |
3842 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC))); | |
3843 | } | |
3844 | } else { | |
3845 | /* The container and --user case */ | |
3846 | firmware_usec = loader_usec = initrd_usec = kernel_usec = 0; | |
3847 | total_usec = userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic; | |
3848 | ||
3849 | log_struct(LOG_INFO, | |
3850 | "MESSAGE_ID=" SD_MESSAGE_USER_STARTUP_FINISHED_STR, | |
3851 | "USERSPACE_USEC="USEC_FMT, userspace_usec, | |
3852 | LOG_MESSAGE("Startup finished in %s.", | |
3853 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC))); | |
3854 | } | |
3855 | ||
3856 | bus_manager_send_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec); | |
3857 | ||
3858 | log_taint_string(m); | |
3859 | } | |
3860 | ||
3861 | static void manager_send_ready_user_scope(Manager *m) { | |
3862 | int r; | |
3863 | ||
3864 | assert(m); | |
3865 | ||
3866 | /* We send READY=1 on reaching basic.target only when running in --user mode. */ | |
3867 | if (!MANAGER_IS_USER(m) || m->ready_sent) | |
3868 | return; | |
3869 | ||
3870 | r = sd_notify(/* unset_environment= */ false, | |
3871 | "READY=1\n" | |
3872 | "STATUS=Reached " SPECIAL_BASIC_TARGET "."); | |
3873 | if (r < 0) | |
3874 | log_warning_errno(r, "Failed to send readiness notification, ignoring: %m"); | |
3875 | ||
3876 | m->ready_sent = true; | |
3877 | m->status_ready = false; | |
3878 | } | |
3879 | ||
3880 | static void manager_send_ready_system_scope(Manager *m) { | |
3881 | int r; | |
3882 | ||
3883 | assert(m); | |
3884 | ||
3885 | if (!MANAGER_IS_SYSTEM(m)) | |
3886 | return; | |
3887 | ||
3888 | /* Skip the notification if nothing changed. */ | |
3889 | if (m->ready_sent && m->status_ready) | |
3890 | return; | |
3891 | ||
3892 | r = sd_notify(/* unset_environment= */ false, | |
3893 | "READY=1\n" | |
3894 | "STATUS=Ready."); | |
3895 | if (r < 0) | |
3896 | log_full_errno(m->ready_sent ? LOG_DEBUG : LOG_WARNING, r, | |
3897 | "Failed to send readiness notification, ignoring: %m"); | |
3898 | ||
3899 | m->ready_sent = m->status_ready = true; | |
3900 | } | |
3901 | ||
3902 | static void manager_check_basic_target(Manager *m) { | |
3903 | Unit *u; | |
3904 | ||
3905 | assert(m); | |
3906 | ||
3907 | /* Small shortcut */ | |
3908 | if (m->ready_sent && m->taint_logged) | |
3909 | return; | |
3910 | ||
3911 | u = manager_get_unit(m, SPECIAL_BASIC_TARGET); | |
3912 | if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) | |
3913 | return; | |
3914 | ||
3915 | /* For user managers, send out READY=1 as soon as we reach basic.target */ | |
3916 | manager_send_ready_user_scope(m); | |
3917 | ||
3918 | /* Log the taint string as soon as we reach basic.target */ | |
3919 | log_taint_string(m); | |
3920 | } | |
3921 | ||
3922 | void manager_check_finished(Manager *m) { | |
3923 | assert(m); | |
3924 | ||
3925 | if (MANAGER_IS_RELOADING(m)) | |
3926 | return; | |
3927 | ||
3928 | /* Verify that we have entered the event loop already, and not left it again. */ | |
3929 | if (!MANAGER_IS_RUNNING(m)) | |
3930 | return; | |
3931 | ||
3932 | manager_check_basic_target(m); | |
3933 | ||
3934 | if (!hashmap_isempty(m->jobs)) { | |
3935 | if (m->jobs_in_progress_event_source) | |
3936 | /* Ignore any failure, this is only for feedback */ | |
3937 | (void) sd_event_source_set_time(m->jobs_in_progress_event_source, | |
3938 | manager_watch_jobs_next_time(m)); | |
3939 | return; | |
3940 | } | |
3941 | ||
3942 | /* The jobs hashmap tends to grow a lot during boot, and then it's not reused until shutdown. Let's | |
3943 | kill the hashmap if it is relatively large. */ | |
3944 | if (hashmap_buckets(m->jobs) > hashmap_size(m->units) / 10) | |
3945 | m->jobs = hashmap_free(m->jobs); | |
3946 | ||
3947 | manager_send_ready_system_scope(m); | |
3948 | ||
3949 | /* Notify Type=idle units that we are done now */ | |
3950 | manager_close_idle_pipe(m); | |
3951 | ||
3952 | if (MANAGER_IS_FINISHED(m)) | |
3953 | return; | |
3954 | ||
3955 | manager_flip_auto_status(m, false, "boot finished"); | |
3956 | ||
3957 | /* Turn off confirm spawn now */ | |
3958 | m->confirm_spawn = NULL; | |
3959 | ||
3960 | /* No need to update ask password status when we're going non-interactive */ | |
3961 | manager_close_ask_password(m); | |
3962 | ||
3963 | /* This is no longer the first boot */ | |
3964 | manager_set_first_boot(m, false); | |
3965 | ||
3966 | dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_FINISH); | |
3967 | ||
3968 | manager_notify_finished(m); | |
3969 | ||
3970 | manager_invalidate_startup_units(m); | |
3971 | } | |
3972 | ||
3973 | void manager_send_reloading(Manager *m) { | |
3974 | assert(m); | |
3975 | ||
3976 | /* Let whoever invoked us know that we are now reloading */ | |
3977 | (void) notify_reloading_full(/* status = */ NULL); | |
3978 | ||
3979 | /* And ensure that we'll send READY=1 again as soon as we are ready again */ | |
3980 | m->ready_sent = false; | |
3981 | } | |
3982 | ||
3983 | static bool generator_path_any(const char* const* paths) { | |
3984 | bool found = false; | |
3985 | ||
3986 | /* Optimize by skipping the whole process by not creating output directories | |
3987 | * if no generators are found. */ | |
3988 | STRV_FOREACH(path, paths) | |
3989 | if (access(*path, F_OK) == 0) | |
3990 | found = true; | |
3991 | else if (errno != ENOENT) | |
3992 | log_warning_errno(errno, "Failed to open generator directory %s: %m", *path); | |
3993 | ||
3994 | return found; | |
3995 | } | |
3996 | ||
3997 | static int manager_run_environment_generators(Manager *m) { | |
3998 | char **tmp = NULL; /* this is only used in the forked process, no cleanup here */ | |
3999 | _cleanup_strv_free_ char **paths = NULL; | |
4000 | void* args[] = { | |
4001 | [STDOUT_GENERATE] = &tmp, | |
4002 | [STDOUT_COLLECT] = &tmp, | |
4003 | [STDOUT_CONSUME] = &m->transient_environment, | |
4004 | }; | |
4005 | int r; | |
4006 | ||
4007 | if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_ENV_GENERATORS)) | |
4008 | return 0; | |
4009 | ||
4010 | paths = env_generator_binary_paths(m->runtime_scope); | |
4011 | if (!paths) | |
4012 | return log_oom(); | |
4013 | ||
4014 | if (!generator_path_any((const char* const*) paths)) | |
4015 | return 0; | |
4016 | ||
4017 | WITH_UMASK(0022) | |
4018 | r = execute_directories((const char* const*) paths, DEFAULT_TIMEOUT_USEC, gather_environment, | |
4019 | args, NULL, m->transient_environment, | |
4020 | EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID); | |
4021 | return r; | |
4022 | } | |
4023 | ||
4024 | static int build_generator_environment(Manager *m, char ***ret) { | |
4025 | _cleanup_strv_free_ char **nl = NULL; | |
4026 | Virtualization v; | |
4027 | ConfidentialVirtualization cv; | |
4028 | int r; | |
4029 | ||
4030 | assert(m); | |
4031 | assert(ret); | |
4032 | ||
4033 | /* Generators oftentimes want to know some basic facts about the environment they run in, in order to | |
4034 | * adjust generated units to that. Let's pass down some bits of information that are easy for us to | |
4035 | * determine (but a bit harder for generator scripts to determine), as environment variables. */ | |
4036 | ||
4037 | nl = strv_copy(m->transient_environment); | |
4038 | if (!nl) | |
4039 | return -ENOMEM; | |
4040 | ||
4041 | r = strv_env_assign(&nl, "SYSTEMD_SCOPE", runtime_scope_to_string(m->runtime_scope)); | |
4042 | if (r < 0) | |
4043 | return r; | |
4044 | ||
4045 | if (MANAGER_IS_SYSTEM(m)) { | |
4046 | /* Note that $SYSTEMD_IN_INITRD may be used to override the initrd detection in much of our | |
4047 | * codebase. This is hence more than purely informational. It will shortcut detection of the | |
4048 | * initrd state if generators invoke our own tools. But that's OK, as it would come to the | |
4049 | * same results (hopefully). */ | |
4050 | r = strv_env_assign(&nl, "SYSTEMD_IN_INITRD", one_zero(in_initrd())); | |
4051 | if (r < 0) | |
4052 | return r; | |
4053 | ||
4054 | if (m->first_boot >= 0) { | |
4055 | r = strv_env_assign(&nl, "SYSTEMD_FIRST_BOOT", one_zero(m->first_boot)); | |
4056 | if (r < 0) | |
4057 | return r; | |
4058 | } | |
4059 | } | |
4060 | ||
4061 | v = detect_virtualization(); | |
4062 | if (v < 0) | |
4063 | log_debug_errno(v, "Failed to detect virtualization, ignoring: %m"); | |
4064 | else if (v > 0) { | |
4065 | const char *s; | |
4066 | ||
4067 | s = strjoina(VIRTUALIZATION_IS_VM(v) ? "vm:" : | |
4068 | VIRTUALIZATION_IS_CONTAINER(v) ? "container:" : ":", | |
4069 | virtualization_to_string(v)); | |
4070 | ||
4071 | r = strv_env_assign(&nl, "SYSTEMD_VIRTUALIZATION", s); | |
4072 | if (r < 0) | |
4073 | return r; | |
4074 | } | |
4075 | ||
4076 | cv = detect_confidential_virtualization(); | |
4077 | if (cv < 0) | |
4078 | log_debug_errno(cv, "Failed to detect confidential virtualization, ignoring: %m"); | |
4079 | else if (cv > 0) { | |
4080 | r = strv_env_assign(&nl, "SYSTEMD_CONFIDENTIAL_VIRTUALIZATION", confidential_virtualization_to_string(cv)); | |
4081 | if (r < 0) | |
4082 | return r; | |
4083 | } | |
4084 | ||
4085 | r = strv_env_assign(&nl, "SYSTEMD_ARCHITECTURE", architecture_to_string(uname_architecture())); | |
4086 | if (r < 0) | |
4087 | return r; | |
4088 | ||
4089 | *ret = TAKE_PTR(nl); | |
4090 | return 0; | |
4091 | } | |
4092 | ||
4093 | static int manager_execute_generators(Manager *m, char **paths, bool remount_ro) { | |
4094 | _cleanup_strv_free_ char **ge = NULL; | |
4095 | const char *argv[] = { | |
4096 | NULL, /* Leave this empty, execute_directory() will fill something in */ | |
4097 | m->lookup_paths.generator, | |
4098 | m->lookup_paths.generator_early, | |
4099 | m->lookup_paths.generator_late, | |
4100 | NULL, | |
4101 | }; | |
4102 | int r; | |
4103 | ||
4104 | r = build_generator_environment(m, &ge); | |
4105 | if (r < 0) | |
4106 | return log_error_errno(r, "Failed to build generator environment: %m"); | |
4107 | ||
4108 | if (remount_ro) { | |
4109 | /* Remount most of the filesystem tree read-only. We leave /sys/ as-is, because our code | |
4110 | * checks whether it is read-only to detect containerized execution environments. We leave | |
4111 | * /run/ as-is too, because that's where our output goes. We also leave /proc/ and /dev/shm/ | |
4112 | * because they're API, and /tmp/ that safe_fork() mounted for us. | |
4113 | */ | |
4114 | r = bind_remount_recursive("/", MS_RDONLY, MS_RDONLY, | |
4115 | STRV_MAKE("/sys", "/run", "/proc", "/dev/shm", "/tmp")); | |
4116 | if (r < 0) | |
4117 | log_warning_errno(r, "Read-only bind remount failed, ignoring: %m"); | |
4118 | } | |
4119 | ||
4120 | BLOCK_WITH_UMASK(0022); | |
4121 | return execute_directories( | |
4122 | (const char* const*) paths, | |
4123 | DEFAULT_TIMEOUT_USEC, | |
4124 | /* callbacks= */ NULL, /* callback_args= */ NULL, | |
4125 | (char**) argv, | |
4126 | ge, | |
4127 | EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID); | |
4128 | } | |
4129 | ||
4130 | static int manager_run_generators(Manager *m) { | |
4131 | ForkFlags flags = FORK_RESET_SIGNALS | FORK_WAIT | FORK_NEW_MOUNTNS | FORK_MOUNTNS_SLAVE; | |
4132 | _cleanup_strv_free_ char **paths = NULL; | |
4133 | int r; | |
4134 | ||
4135 | assert(m); | |
4136 | ||
4137 | if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_GENERATORS)) | |
4138 | return 0; | |
4139 | ||
4140 | paths = generator_binary_paths(m->runtime_scope); | |
4141 | if (!paths) | |
4142 | return log_oom(); | |
4143 | ||
4144 | if (!generator_path_any((const char* const*) paths)) | |
4145 | return 0; | |
4146 | ||
4147 | r = lookup_paths_mkdir_generator(&m->lookup_paths); | |
4148 | if (r < 0) { | |
4149 | log_error_errno(r, "Failed to create generator directories: %m"); | |
4150 | goto finish; | |
4151 | } | |
4152 | ||
4153 | /* If we are the system manager, we fork and invoke the generators in a sanitized mount namespace. If | |
4154 | * we are the user manager, let's just execute the generators directly. We might not have the | |
4155 | * necessary privileges, and the system manager has already mounted /tmp/ and everything else for us. | |
4156 | */ | |
4157 | if (MANAGER_IS_USER(m)) { | |
4158 | r = manager_execute_generators(m, paths, /* remount_ro= */ false); | |
4159 | goto finish; | |
4160 | } | |
4161 | ||
4162 | /* On some systems /tmp/ doesn't exist, and on some other systems we cannot create it at all. Avoid | |
4163 | * trying to mount a private tmpfs on it as there's no one size fits all. */ | |
4164 | if (is_dir("/tmp", /* follow= */ false) > 0 && !MANAGER_IS_TEST_RUN(m)) | |
4165 | flags |= FORK_PRIVATE_TMP; | |
4166 | ||
4167 | r = safe_fork("(sd-gens)", flags, NULL); | |
4168 | if (r == 0) { | |
4169 | r = manager_execute_generators(m, paths, /* remount_ro= */ true); | |
4170 | _exit(r >= 0 ? EXIT_SUCCESS : EXIT_FAILURE); | |
4171 | } | |
4172 | if (r < 0) { | |
4173 | if (!ERRNO_IS_PRIVILEGE(r) && r != -EINVAL) { | |
4174 | log_error_errno(r, "Failed to fork off sandboxing environment for executing generators: %m"); | |
4175 | goto finish; | |
4176 | } | |
4177 | ||
4178 | /* Failed to fork with new mount namespace? Maybe, running in a container environment with | |
4179 | * seccomp or without capability. | |
4180 | * | |
4181 | * We also allow -EINVAL to allow running without CLONE_NEWNS. | |
4182 | * | |
4183 | * Also, when running on non-native userland architecture via systemd-nspawn and | |
4184 | * qemu-user-static QEMU-emulator, clone() with CLONE_NEWNS fails with EINVAL, see | |
4185 | * https://github.com/systemd/systemd/issues/28901. | |
4186 | */ | |
4187 | log_debug_errno(r, | |
4188 | "Failed to fork off sandboxing environment for executing generators. " | |
4189 | "Falling back to execute generators without sandboxing: %m"); | |
4190 | r = manager_execute_generators(m, paths, /* remount_ro= */ false); | |
4191 | } | |
4192 | ||
4193 | finish: | |
4194 | lookup_paths_trim_generator(&m->lookup_paths); | |
4195 | return r; | |
4196 | } | |
4197 | ||
4198 | int manager_transient_environment_add(Manager *m, char **plus) { | |
4199 | char **a; | |
4200 | ||
4201 | assert(m); | |
4202 | ||
4203 | if (strv_isempty(plus)) | |
4204 | return 0; | |
4205 | ||
4206 | a = strv_env_merge(m->transient_environment, plus); | |
4207 | if (!a) | |
4208 | return log_oom(); | |
4209 | ||
4210 | sanitize_environment(a); | |
4211 | ||
4212 | return strv_free_and_replace(m->transient_environment, a); | |
4213 | } | |
4214 | ||
4215 | int manager_client_environment_modify( | |
4216 | Manager *m, | |
4217 | char **minus, | |
4218 | char **plus) { | |
4219 | ||
4220 | char **a = NULL, **b = NULL, **l; | |
4221 | ||
4222 | assert(m); | |
4223 | ||
4224 | if (strv_isempty(minus) && strv_isempty(plus)) | |
4225 | return 0; | |
4226 | ||
4227 | l = m->client_environment; | |
4228 | ||
4229 | if (!strv_isempty(minus)) { | |
4230 | a = strv_env_delete(l, 1, minus); | |
4231 | if (!a) | |
4232 | return -ENOMEM; | |
4233 | ||
4234 | l = a; | |
4235 | } | |
4236 | ||
4237 | if (!strv_isempty(plus)) { | |
4238 | b = strv_env_merge(l, plus); | |
4239 | if (!b) { | |
4240 | strv_free(a); | |
4241 | return -ENOMEM; | |
4242 | } | |
4243 | ||
4244 | l = b; | |
4245 | } | |
4246 | ||
4247 | if (m->client_environment != l) | |
4248 | strv_free(m->client_environment); | |
4249 | ||
4250 | if (a != l) | |
4251 | strv_free(a); | |
4252 | if (b != l) | |
4253 | strv_free(b); | |
4254 | ||
4255 | m->client_environment = sanitize_environment(l); | |
4256 | return 0; | |
4257 | } | |
4258 | ||
4259 | int manager_get_effective_environment(Manager *m, char ***ret) { | |
4260 | char **l; | |
4261 | ||
4262 | assert(m); | |
4263 | assert(ret); | |
4264 | ||
4265 | l = strv_env_merge(m->transient_environment, m->client_environment); | |
4266 | if (!l) | |
4267 | return -ENOMEM; | |
4268 | ||
4269 | *ret = l; | |
4270 | return 0; | |
4271 | } | |
4272 | ||
4273 | int manager_set_unit_defaults(Manager *m, const UnitDefaults *defaults) { | |
4274 | _cleanup_free_ char *label = NULL; | |
4275 | struct rlimit *rlimit[_RLIMIT_MAX]; | |
4276 | int r; | |
4277 | ||
4278 | assert(m); | |
4279 | assert(defaults); | |
4280 | ||
4281 | if (streq_ptr(defaults->smack_process_label, "/")) | |
4282 | label = NULL; | |
4283 | else { | |
4284 | const char *l = defaults->smack_process_label; | |
4285 | #ifdef SMACK_DEFAULT_PROCESS_LABEL | |
4286 | if (!l) | |
4287 | l = SMACK_DEFAULT_PROCESS_LABEL; | |
4288 | #endif | |
4289 | if (l) { | |
4290 | label = strdup(l); | |
4291 | if (!label) | |
4292 | return -ENOMEM; | |
4293 | } else | |
4294 | label = NULL; | |
4295 | } | |
4296 | ||
4297 | r = rlimit_copy_all(rlimit, defaults->rlimit); | |
4298 | if (r < 0) | |
4299 | return r; | |
4300 | ||
4301 | m->defaults.std_output = defaults->std_output; | |
4302 | m->defaults.std_error = defaults->std_error; | |
4303 | ||
4304 | m->defaults.restart_usec = defaults->restart_usec; | |
4305 | m->defaults.timeout_start_usec = defaults->timeout_start_usec; | |
4306 | m->defaults.timeout_stop_usec = defaults->timeout_stop_usec; | |
4307 | m->defaults.timeout_abort_usec = defaults->timeout_abort_usec; | |
4308 | m->defaults.timeout_abort_set = defaults->timeout_abort_set; | |
4309 | m->defaults.device_timeout_usec = defaults->device_timeout_usec; | |
4310 | ||
4311 | m->defaults.start_limit_interval = defaults->start_limit_interval; | |
4312 | m->defaults.start_limit_burst = defaults->start_limit_burst; | |
4313 | ||
4314 | m->defaults.cpu_accounting = defaults->cpu_accounting; | |
4315 | m->defaults.memory_accounting = defaults->memory_accounting; | |
4316 | m->defaults.io_accounting = defaults->io_accounting; | |
4317 | m->defaults.blockio_accounting = defaults->blockio_accounting; | |
4318 | m->defaults.tasks_accounting = defaults->tasks_accounting; | |
4319 | m->defaults.ip_accounting = defaults->ip_accounting; | |
4320 | ||
4321 | m->defaults.tasks_max = defaults->tasks_max; | |
4322 | m->defaults.timer_accuracy_usec = defaults->timer_accuracy_usec; | |
4323 | ||
4324 | m->defaults.oom_policy = defaults->oom_policy; | |
4325 | m->defaults.oom_score_adjust = defaults->oom_score_adjust; | |
4326 | m->defaults.oom_score_adjust_set = defaults->oom_score_adjust_set; | |
4327 | ||
4328 | m->defaults.memory_pressure_watch = defaults->memory_pressure_watch; | |
4329 | m->defaults.memory_pressure_threshold_usec = defaults->memory_pressure_threshold_usec; | |
4330 | ||
4331 | free_and_replace(m->defaults.smack_process_label, label); | |
4332 | rlimit_free_all(m->defaults.rlimit); | |
4333 | memcpy(m->defaults.rlimit, rlimit, sizeof(struct rlimit*) * _RLIMIT_MAX); | |
4334 | ||
4335 | return 0; | |
4336 | } | |
4337 | ||
4338 | void manager_recheck_dbus(Manager *m) { | |
4339 | assert(m); | |
4340 | ||
4341 | /* Connects to the bus if the dbus service and socket are running. If we are running in user mode | |
4342 | * this is all it does. In system mode we'll also connect to the system bus (which will most likely | |
4343 | * just reuse the connection of the API bus). That's because the system bus after all runs as service | |
4344 | * of the system instance, while in the user instance we can assume it's already there. */ | |
4345 | ||
4346 | if (MANAGER_IS_RELOADING(m)) | |
4347 | return; /* don't check while we are reloading… */ | |
4348 | ||
4349 | if (manager_dbus_is_running(m, false)) { | |
4350 | (void) bus_init_api(m); | |
4351 | ||
4352 | if (MANAGER_IS_SYSTEM(m)) | |
4353 | (void) bus_init_system(m); | |
4354 | } else { | |
4355 | (void) bus_done_api(m); | |
4356 | ||
4357 | if (MANAGER_IS_SYSTEM(m)) | |
4358 | (void) bus_done_system(m); | |
4359 | } | |
4360 | } | |
4361 | ||
4362 | static bool manager_journal_is_running(Manager *m) { | |
4363 | Unit *u; | |
4364 | ||
4365 | assert(m); | |
4366 | ||
4367 | if (MANAGER_IS_TEST_RUN(m)) | |
4368 | return false; | |
4369 | ||
4370 | /* If we are the user manager we can safely assume that the journal is up */ | |
4371 | if (!MANAGER_IS_SYSTEM(m)) | |
4372 | return true; | |
4373 | ||
4374 | /* Check that the socket is not only up, but in RUNNING state */ | |
4375 | u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET); | |
4376 | if (!u) | |
4377 | return false; | |
4378 | if (SOCKET(u)->state != SOCKET_RUNNING) | |
4379 | return false; | |
4380 | ||
4381 | /* Similar, check if the daemon itself is fully up, too */ | |
4382 | u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE); | |
4383 | if (!u) | |
4384 | return false; | |
4385 | if (!IN_SET(SERVICE(u)->state, SERVICE_RELOAD, SERVICE_RUNNING)) | |
4386 | return false; | |
4387 | ||
4388 | return true; | |
4389 | } | |
4390 | ||
4391 | void disable_printk_ratelimit(void) { | |
4392 | /* Disable kernel's printk ratelimit. | |
4393 | * | |
4394 | * Logging to /dev/kmsg is most useful during early boot and shutdown, where normal logging | |
4395 | * mechanisms are not available. The semantics of this sysctl are such that any kernel command-line | |
4396 | * setting takes precedence. */ | |
4397 | int r; | |
4398 | ||
4399 | r = sysctl_write("kernel/printk_devkmsg", "on"); | |
4400 | if (r < 0) | |
4401 | log_debug_errno(r, "Failed to set sysctl kernel.printk_devkmsg=on: %m"); | |
4402 | } | |
4403 | ||
4404 | void manager_recheck_journal(Manager *m) { | |
4405 | ||
4406 | assert(m); | |
4407 | ||
4408 | /* Don't bother with this unless we are in the special situation of being PID 1 */ | |
4409 | if (getpid_cached() != 1) | |
4410 | return; | |
4411 | ||
4412 | /* Don't check this while we are reloading, things might still change */ | |
4413 | if (MANAGER_IS_RELOADING(m)) | |
4414 | return; | |
4415 | ||
4416 | /* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. If | |
4417 | * the journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we | |
4418 | * might trigger an activation ourselves we can't fulfill. */ | |
4419 | log_set_prohibit_ipc(!manager_journal_is_running(m)); | |
4420 | log_open(); | |
4421 | } | |
4422 | ||
4423 | static ShowStatus manager_get_show_status(Manager *m) { | |
4424 | assert(m); | |
4425 | ||
4426 | if (MANAGER_IS_USER(m)) | |
4427 | return _SHOW_STATUS_INVALID; | |
4428 | ||
4429 | if (m->show_status_overridden != _SHOW_STATUS_INVALID) | |
4430 | return m->show_status_overridden; | |
4431 | ||
4432 | return m->show_status; | |
4433 | } | |
4434 | ||
4435 | bool manager_get_show_status_on(Manager *m) { | |
4436 | assert(m); | |
4437 | ||
4438 | return show_status_on(manager_get_show_status(m)); | |
4439 | } | |
4440 | ||
4441 | static void set_show_status_marker(bool b) { | |
4442 | if (b) | |
4443 | (void) touch("/run/systemd/show-status"); | |
4444 | else | |
4445 | (void) unlink("/run/systemd/show-status"); | |
4446 | } | |
4447 | ||
4448 | void manager_set_show_status(Manager *m, ShowStatus mode, const char *reason) { | |
4449 | assert(m); | |
4450 | assert(reason); | |
4451 | assert(mode >= 0 && mode < _SHOW_STATUS_MAX); | |
4452 | ||
4453 | if (MANAGER_IS_USER(m)) | |
4454 | return; | |
4455 | ||
4456 | if (mode == m->show_status) | |
4457 | return; | |
4458 | ||
4459 | if (m->show_status_overridden == _SHOW_STATUS_INVALID) { | |
4460 | bool enabled; | |
4461 | ||
4462 | enabled = show_status_on(mode); | |
4463 | log_debug("%s (%s) showing of status (%s).", | |
4464 | enabled ? "Enabling" : "Disabling", | |
4465 | strna(show_status_to_string(mode)), | |
4466 | reason); | |
4467 | ||
4468 | set_show_status_marker(enabled); | |
4469 | } | |
4470 | ||
4471 | m->show_status = mode; | |
4472 | } | |
4473 | ||
4474 | void manager_override_show_status(Manager *m, ShowStatus mode, const char *reason) { | |
4475 | assert(m); | |
4476 | assert(mode < _SHOW_STATUS_MAX); | |
4477 | ||
4478 | if (MANAGER_IS_USER(m)) | |
4479 | return; | |
4480 | ||
4481 | if (mode == m->show_status_overridden) | |
4482 | return; | |
4483 | ||
4484 | m->show_status_overridden = mode; | |
4485 | ||
4486 | if (mode == _SHOW_STATUS_INVALID) | |
4487 | mode = m->show_status; | |
4488 | ||
4489 | log_debug("%s (%s) showing of status (%s).", | |
4490 | m->show_status_overridden != _SHOW_STATUS_INVALID ? "Overriding" : "Restoring", | |
4491 | strna(show_status_to_string(mode)), | |
4492 | reason); | |
4493 | ||
4494 | set_show_status_marker(show_status_on(mode)); | |
4495 | } | |
4496 | ||
4497 | const char* manager_get_confirm_spawn(Manager *m) { | |
4498 | static int last_errno = 0; | |
4499 | struct stat st; | |
4500 | int r; | |
4501 | ||
4502 | assert(m); | |
4503 | ||
4504 | /* Here's the deal: we want to test the validity of the console but don't want | |
4505 | * PID1 to go through the whole console process which might block. But we also | |
4506 | * want to warn the user only once if something is wrong with the console so we | |
4507 | * cannot do the sanity checks after spawning our children. So here we simply do | |
4508 | * really basic tests to hopefully trap common errors. | |
4509 | * | |
4510 | * If the console suddenly disappear at the time our children will really it | |
4511 | * then they will simply fail to acquire it and a positive answer will be | |
4512 | * assumed. New children will fall back to /dev/console though. | |
4513 | * | |
4514 | * Note: TTYs are devices that can come and go any time, and frequently aren't | |
4515 | * available yet during early boot (consider a USB rs232 dongle...). If for any | |
4516 | * reason the configured console is not ready, we fall back to the default | |
4517 | * console. */ | |
4518 | ||
4519 | if (!m->confirm_spawn || path_equal(m->confirm_spawn, "/dev/console")) | |
4520 | return m->confirm_spawn; | |
4521 | ||
4522 | if (stat(m->confirm_spawn, &st) < 0) { | |
4523 | r = -errno; | |
4524 | goto fail; | |
4525 | } | |
4526 | ||
4527 | if (!S_ISCHR(st.st_mode)) { | |
4528 | r = -ENOTTY; | |
4529 | goto fail; | |
4530 | } | |
4531 | ||
4532 | last_errno = 0; | |
4533 | return m->confirm_spawn; | |
4534 | ||
4535 | fail: | |
4536 | if (last_errno != r) | |
4537 | last_errno = log_warning_errno(r, "Failed to open %s, using default console: %m", m->confirm_spawn); | |
4538 | ||
4539 | return "/dev/console"; | |
4540 | } | |
4541 | ||
4542 | void manager_set_first_boot(Manager *m, bool b) { | |
4543 | assert(m); | |
4544 | ||
4545 | if (!MANAGER_IS_SYSTEM(m)) | |
4546 | return; | |
4547 | ||
4548 | if (m->first_boot != (int) b) { | |
4549 | if (b) | |
4550 | (void) touch("/run/systemd/first-boot"); | |
4551 | else | |
4552 | (void) unlink("/run/systemd/first-boot"); | |
4553 | } | |
4554 | ||
4555 | m->first_boot = b; | |
4556 | } | |
4557 | ||
4558 | void manager_disable_confirm_spawn(void) { | |
4559 | (void) touch("/run/systemd/confirm_spawn_disabled"); | |
4560 | } | |
4561 | ||
4562 | static bool manager_should_show_status(Manager *m, StatusType type) { | |
4563 | assert(m); | |
4564 | ||
4565 | if (!MANAGER_IS_SYSTEM(m)) | |
4566 | return false; | |
4567 | ||
4568 | if (m->no_console_output) | |
4569 | return false; | |
4570 | ||
4571 | if (!IN_SET(manager_state(m), MANAGER_INITIALIZING, MANAGER_STARTING, MANAGER_STOPPING)) | |
4572 | return false; | |
4573 | ||
4574 | /* If we cannot find out the status properly, just proceed. */ | |
4575 | if (type != STATUS_TYPE_EMERGENCY && manager_check_ask_password(m) > 0) | |
4576 | return false; | |
4577 | ||
4578 | if (type == STATUS_TYPE_NOTICE && m->show_status != SHOW_STATUS_NO) | |
4579 | return true; | |
4580 | ||
4581 | return manager_get_show_status_on(m); | |
4582 | } | |
4583 | ||
4584 | void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) { | |
4585 | va_list ap; | |
4586 | ||
4587 | /* If m is NULL, assume we're after shutdown and let the messages through. */ | |
4588 | ||
4589 | if (m && !manager_should_show_status(m, type)) | |
4590 | return; | |
4591 | ||
4592 | /* XXX We should totally drop the check for ephemeral here | |
4593 | * and thus effectively make 'Type=idle' pointless. */ | |
4594 | if (type == STATUS_TYPE_EPHEMERAL && m && m->n_on_console > 0) | |
4595 | return; | |
4596 | ||
4597 | va_start(ap, format); | |
4598 | status_vprintf(status, SHOW_STATUS_ELLIPSIZE|(type == STATUS_TYPE_EPHEMERAL ? SHOW_STATUS_EPHEMERAL : 0), format, ap); | |
4599 | va_end(ap); | |
4600 | } | |
4601 | ||
4602 | Set* manager_get_units_needing_mounts_for(Manager *m, const char *path, UnitMountDependencyType t) { | |
4603 | assert(m); | |
4604 | assert(path); | |
4605 | assert(t >= 0 && t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX); | |
4606 | ||
4607 | if (path_equal(path, "/")) | |
4608 | path = ""; | |
4609 | ||
4610 | return hashmap_get(m->units_needing_mounts_for[t], path); | |
4611 | } | |
4612 | ||
4613 | int manager_update_failed_units(Manager *m, Unit *u, bool failed) { | |
4614 | unsigned size; | |
4615 | int r; | |
4616 | ||
4617 | assert(m); | |
4618 | assert(u->manager == m); | |
4619 | ||
4620 | size = set_size(m->failed_units); | |
4621 | ||
4622 | if (failed) { | |
4623 | r = set_ensure_put(&m->failed_units, NULL, u); | |
4624 | if (r < 0) | |
4625 | return log_oom(); | |
4626 | } else | |
4627 | (void) set_remove(m->failed_units, u); | |
4628 | ||
4629 | if (set_size(m->failed_units) != size) | |
4630 | bus_manager_send_change_signal(m); | |
4631 | ||
4632 | return 0; | |
4633 | } | |
4634 | ||
4635 | ManagerState manager_state(Manager *m) { | |
4636 | Unit *u; | |
4637 | ||
4638 | assert(m); | |
4639 | ||
4640 | /* Is the special shutdown target active or queued? If so, we are in shutdown state */ | |
4641 | u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET); | |
4642 | if (u && unit_active_or_pending(u)) | |
4643 | return MANAGER_STOPPING; | |
4644 | ||
4645 | /* Did we ever finish booting? If not then we are still starting up */ | |
4646 | if (!MANAGER_IS_FINISHED(m)) { | |
4647 | ||
4648 | u = manager_get_unit(m, SPECIAL_BASIC_TARGET); | |
4649 | if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) | |
4650 | return MANAGER_INITIALIZING; | |
4651 | ||
4652 | return MANAGER_STARTING; | |
4653 | } | |
4654 | ||
4655 | if (MANAGER_IS_SYSTEM(m)) { | |
4656 | /* Are the rescue or emergency targets active or queued? If so we are in maintenance state */ | |
4657 | u = manager_get_unit(m, SPECIAL_RESCUE_TARGET); | |
4658 | if (u && unit_active_or_pending(u)) | |
4659 | return MANAGER_MAINTENANCE; | |
4660 | ||
4661 | u = manager_get_unit(m, SPECIAL_EMERGENCY_TARGET); | |
4662 | if (u && unit_active_or_pending(u)) | |
4663 | return MANAGER_MAINTENANCE; | |
4664 | } | |
4665 | ||
4666 | /* Are there any failed units? If so, we are in degraded mode */ | |
4667 | if (!set_isempty(m->failed_units)) | |
4668 | return MANAGER_DEGRADED; | |
4669 | ||
4670 | return MANAGER_RUNNING; | |
4671 | } | |
4672 | ||
4673 | static void manager_unref_uid_internal( | |
4674 | Hashmap *uid_refs, | |
4675 | uid_t uid, | |
4676 | bool destroy_now, | |
4677 | int (*_clean_ipc)(uid_t uid)) { | |
4678 | ||
4679 | uint32_t c, n; | |
4680 | ||
4681 | assert(uid_is_valid(uid)); | |
4682 | assert(_clean_ipc); | |
4683 | ||
4684 | /* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the | |
4685 | * assumption that uid_t and gid_t are actually defined the same way, with the same validity rules. | |
4686 | * | |
4687 | * We store a hashmap where the key is the UID/GID and the value is a 32-bit reference counter, whose | |
4688 | * highest bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last | |
4689 | * reference to the UID/GID is dropped. The flag is set to on, once at least one reference from a | |
4690 | * unit where RemoveIPC= is set is added on a UID/GID. It is reset when the UID's/GID's reference | |
4691 | * counter drops to 0 again. */ | |
4692 | ||
4693 | assert_cc(sizeof(uid_t) == sizeof(gid_t)); | |
4694 | assert_cc(UID_INVALID == (uid_t) GID_INVALID); | |
4695 | ||
4696 | if (uid == 0) /* We don't keep track of root, and will never destroy it */ | |
4697 | return; | |
4698 | ||
4699 | c = PTR_TO_UINT32(hashmap_get(uid_refs, UID_TO_PTR(uid))); | |
4700 | ||
4701 | n = c & ~DESTROY_IPC_FLAG; | |
4702 | assert(n > 0); | |
4703 | n--; | |
4704 | ||
4705 | if (destroy_now && n == 0) { | |
4706 | hashmap_remove(uid_refs, UID_TO_PTR(uid)); | |
4707 | ||
4708 | if (c & DESTROY_IPC_FLAG) { | |
4709 | log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.", | |
4710 | _clean_ipc == clean_ipc_by_uid ? "UID" : "GID", | |
4711 | uid); | |
4712 | (void) _clean_ipc(uid); | |
4713 | } | |
4714 | } else { | |
4715 | c = n | (c & DESTROY_IPC_FLAG); | |
4716 | assert_se(hashmap_update(uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0); | |
4717 | } | |
4718 | } | |
4719 | ||
4720 | void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) { | |
4721 | manager_unref_uid_internal(m->uid_refs, uid, destroy_now, clean_ipc_by_uid); | |
4722 | } | |
4723 | ||
4724 | void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) { | |
4725 | manager_unref_uid_internal(m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid); | |
4726 | } | |
4727 | ||
4728 | static int manager_ref_uid_internal( | |
4729 | Hashmap **uid_refs, | |
4730 | uid_t uid, | |
4731 | bool clean_ipc) { | |
4732 | ||
4733 | uint32_t c, n; | |
4734 | int r; | |
4735 | ||
4736 | assert(uid_refs); | |
4737 | assert(uid_is_valid(uid)); | |
4738 | ||
4739 | /* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the | |
4740 | * assumption that uid_t and gid_t are actually defined the same way, with the same validity | |
4741 | * rules. */ | |
4742 | ||
4743 | assert_cc(sizeof(uid_t) == sizeof(gid_t)); | |
4744 | assert_cc(UID_INVALID == (uid_t) GID_INVALID); | |
4745 | ||
4746 | if (uid == 0) /* We don't keep track of root, and will never destroy it */ | |
4747 | return 0; | |
4748 | ||
4749 | r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops); | |
4750 | if (r < 0) | |
4751 | return r; | |
4752 | ||
4753 | c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid))); | |
4754 | ||
4755 | n = c & ~DESTROY_IPC_FLAG; | |
4756 | n++; | |
4757 | ||
4758 | if (n & DESTROY_IPC_FLAG) /* check for overflow */ | |
4759 | return -EOVERFLOW; | |
4760 | ||
4761 | c = n | (c & DESTROY_IPC_FLAG) | (clean_ipc ? DESTROY_IPC_FLAG : 0); | |
4762 | ||
4763 | return hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)); | |
4764 | } | |
4765 | ||
4766 | int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) { | |
4767 | return manager_ref_uid_internal(&m->uid_refs, uid, clean_ipc); | |
4768 | } | |
4769 | ||
4770 | int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) { | |
4771 | return manager_ref_uid_internal(&m->gid_refs, (uid_t) gid, clean_ipc); | |
4772 | } | |
4773 | ||
4774 | static void manager_vacuum_uid_refs_internal( | |
4775 | Hashmap *uid_refs, | |
4776 | int (*_clean_ipc)(uid_t uid)) { | |
4777 | ||
4778 | void *p, *k; | |
4779 | ||
4780 | assert(_clean_ipc); | |
4781 | ||
4782 | HASHMAP_FOREACH_KEY(p, k, uid_refs) { | |
4783 | uint32_t c, n; | |
4784 | uid_t uid; | |
4785 | ||
4786 | uid = PTR_TO_UID(k); | |
4787 | c = PTR_TO_UINT32(p); | |
4788 | ||
4789 | n = c & ~DESTROY_IPC_FLAG; | |
4790 | if (n > 0) | |
4791 | continue; | |
4792 | ||
4793 | if (c & DESTROY_IPC_FLAG) { | |
4794 | log_debug("Found unreferenced %s " UID_FMT " after reload/reexec. Cleaning up.", | |
4795 | _clean_ipc == clean_ipc_by_uid ? "UID" : "GID", | |
4796 | uid); | |
4797 | (void) _clean_ipc(uid); | |
4798 | } | |
4799 | ||
4800 | assert_se(hashmap_remove(uid_refs, k) == p); | |
4801 | } | |
4802 | } | |
4803 | ||
4804 | static void manager_vacuum_uid_refs(Manager *m) { | |
4805 | manager_vacuum_uid_refs_internal(m->uid_refs, clean_ipc_by_uid); | |
4806 | } | |
4807 | ||
4808 | static void manager_vacuum_gid_refs(Manager *m) { | |
4809 | manager_vacuum_uid_refs_internal(m->gid_refs, clean_ipc_by_gid); | |
4810 | } | |
4811 | ||
4812 | static void manager_vacuum(Manager *m) { | |
4813 | assert(m); | |
4814 | ||
4815 | /* Release any dynamic users no longer referenced */ | |
4816 | dynamic_user_vacuum(m, true); | |
4817 | ||
4818 | /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */ | |
4819 | manager_vacuum_uid_refs(m); | |
4820 | manager_vacuum_gid_refs(m); | |
4821 | ||
4822 | /* Release any runtimes no longer referenced */ | |
4823 | exec_shared_runtime_vacuum(m); | |
4824 | } | |
4825 | ||
4826 | static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
4827 | struct buffer { | |
4828 | uid_t uid; | |
4829 | gid_t gid; | |
4830 | char unit_name[UNIT_NAME_MAX+1]; | |
4831 | } _packed_ buffer; | |
4832 | ||
4833 | Manager *m = ASSERT_PTR(userdata); | |
4834 | ssize_t l; | |
4835 | size_t n; | |
4836 | Unit *u; | |
4837 | ||
4838 | assert(source); | |
4839 | ||
4840 | /* Invoked whenever a child process succeeded resolving its user/group to use and sent us the | |
4841 | * resulting UID/GID in a datagram. We parse the datagram here and pass it off to the unit, so that | |
4842 | * it can add a reference to the UID/GID so that it can destroy the UID/GID's IPC objects when the | |
4843 | * reference counter drops to 0. */ | |
4844 | ||
4845 | l = recv(fd, &buffer, sizeof(buffer), MSG_DONTWAIT); | |
4846 | if (l < 0) { | |
4847 | if (ERRNO_IS_TRANSIENT(errno)) | |
4848 | return 0; | |
4849 | ||
4850 | return log_error_errno(errno, "Failed to read from user lookup fd: %m"); | |
4851 | } | |
4852 | ||
4853 | if ((size_t) l <= offsetof(struct buffer, unit_name)) { | |
4854 | log_warning("Received too short user lookup message, ignoring."); | |
4855 | return 0; | |
4856 | } | |
4857 | ||
4858 | if ((size_t) l > offsetof(struct buffer, unit_name) + UNIT_NAME_MAX) { | |
4859 | log_warning("Received too long user lookup message, ignoring."); | |
4860 | return 0; | |
4861 | } | |
4862 | ||
4863 | if (!uid_is_valid(buffer.uid) && !gid_is_valid(buffer.gid)) { | |
4864 | log_warning("Got user lookup message with invalid UID/GID pair, ignoring."); | |
4865 | return 0; | |
4866 | } | |
4867 | ||
4868 | n = (size_t) l - offsetof(struct buffer, unit_name); | |
4869 | if (memchr(buffer.unit_name, 0, n)) { | |
4870 | log_warning("Received lookup message with embedded NUL character, ignoring."); | |
4871 | return 0; | |
4872 | } | |
4873 | ||
4874 | buffer.unit_name[n] = 0; | |
4875 | u = manager_get_unit(m, buffer.unit_name); | |
4876 | if (!u) { | |
4877 | log_debug("Got user lookup message but unit doesn't exist, ignoring."); | |
4878 | return 0; | |
4879 | } | |
4880 | ||
4881 | log_unit_debug(u, "User lookup succeeded: uid=" UID_FMT " gid=" GID_FMT, buffer.uid, buffer.gid); | |
4882 | ||
4883 | unit_notify_user_lookup(u, buffer.uid, buffer.gid); | |
4884 | return 0; | |
4885 | } | |
4886 | ||
4887 | static int manager_dispatch_handoff_timestamp_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
4888 | Manager *m = ASSERT_PTR(userdata); | |
4889 | usec_t ts[2] = {}; | |
4890 | CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control; | |
4891 | struct msghdr msghdr = { | |
4892 | .msg_iov = &IOVEC_MAKE(ts, sizeof(ts)), | |
4893 | .msg_iovlen = 1, | |
4894 | .msg_control = &control, | |
4895 | .msg_controllen = sizeof(control), | |
4896 | }; | |
4897 | ssize_t n; | |
4898 | ||
4899 | assert(source); | |
4900 | ||
4901 | n = recvmsg_safe(m->handoff_timestamp_fds[0], &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC); | |
4902 | if (ERRNO_IS_NEG_TRANSIENT(n)) | |
4903 | return 0; /* Spurious wakeup, try again */ | |
4904 | if (n == -EXFULL) { | |
4905 | log_warning("Got message with truncated control, ignoring."); | |
4906 | return 0; | |
4907 | } | |
4908 | if (n < 0) | |
4909 | return log_error_errno(n, "Failed to receive handoff timestamp message: %m"); | |
4910 | ||
4911 | if (msghdr.msg_flags & MSG_TRUNC) { | |
4912 | log_warning("Got truncated handoff timestamp message, ignoring."); | |
4913 | return 0; | |
4914 | } | |
4915 | if (n != sizeof(ts)) { | |
4916 | log_warning("Got handoff timestamp message of unexpected size %zi (expected %zu), ignoring.", n, sizeof(ts)); | |
4917 | return 0; | |
4918 | } | |
4919 | ||
4920 | struct ucred *ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred); | |
4921 | if (!ucred || !pid_is_valid(ucred->pid)) { | |
4922 | log_warning("Received notify message without valid credentials. Ignoring."); | |
4923 | return 0; | |
4924 | } | |
4925 | ||
4926 | log_debug("Got handoff timestamp event for PID " PID_FMT ".", ucred->pid); | |
4927 | ||
4928 | _cleanup_free_ Unit **units = NULL; | |
4929 | int n_units = manager_get_units_for_pidref(m, &PIDREF_MAKE_FROM_PID(ucred->pid), &units); | |
4930 | if (n_units < 0) { | |
4931 | log_warning_errno(n_units, "Unable to determine units for PID " PID_FMT ", ignoring: %m", ucred->pid); | |
4932 | return 0; | |
4933 | } | |
4934 | if (n_units == 0) { | |
4935 | log_debug("Got handoff timestamp for process " PID_FMT " we are not interested in, ignoring.", ucred->pid); | |
4936 | return 0; | |
4937 | } | |
4938 | ||
4939 | dual_timestamp dt = { | |
4940 | .realtime = ts[0], | |
4941 | .monotonic = ts[1], | |
4942 | }; | |
4943 | ||
4944 | FOREACH_ARRAY(u, units, n_units) { | |
4945 | if (!UNIT_VTABLE(*u)->notify_handoff_timestamp) | |
4946 | continue; | |
4947 | ||
4948 | UNIT_VTABLE(*u)->notify_handoff_timestamp(*u, ucred, &dt); | |
4949 | } | |
4950 | ||
4951 | return 0; | |
4952 | } | |
4953 | ||
4954 | void manager_ref_console(Manager *m) { | |
4955 | assert(m); | |
4956 | ||
4957 | m->n_on_console++; | |
4958 | } | |
4959 | ||
4960 | void manager_unref_console(Manager *m) { | |
4961 | ||
4962 | assert(m->n_on_console > 0); | |
4963 | m->n_on_console--; | |
4964 | ||
4965 | if (m->n_on_console == 0) | |
4966 | m->no_console_output = false; /* unset no_console_output flag, since the console is definitely free now */ | |
4967 | } | |
4968 | ||
4969 | void manager_override_log_level(Manager *m, int level) { | |
4970 | _cleanup_free_ char *s = NULL; | |
4971 | assert(m); | |
4972 | ||
4973 | if (!m->log_level_overridden) { | |
4974 | m->original_log_level = log_get_max_level(); | |
4975 | m->log_level_overridden = true; | |
4976 | } | |
4977 | ||
4978 | (void) log_level_to_string_alloc(level, &s); | |
4979 | log_info("Setting log level to %s.", strna(s)); | |
4980 | ||
4981 | log_set_max_level(level); | |
4982 | } | |
4983 | ||
4984 | void manager_restore_original_log_level(Manager *m) { | |
4985 | _cleanup_free_ char *s = NULL; | |
4986 | assert(m); | |
4987 | ||
4988 | if (!m->log_level_overridden) | |
4989 | return; | |
4990 | ||
4991 | (void) log_level_to_string_alloc(m->original_log_level, &s); | |
4992 | log_info("Restoring log level to original (%s).", strna(s)); | |
4993 | ||
4994 | log_set_max_level(m->original_log_level); | |
4995 | m->log_level_overridden = false; | |
4996 | } | |
4997 | ||
4998 | void manager_override_log_target(Manager *m, LogTarget target) { | |
4999 | assert(m); | |
5000 | ||
5001 | if (!m->log_target_overridden) { | |
5002 | m->original_log_target = log_get_target(); | |
5003 | m->log_target_overridden = true; | |
5004 | } | |
5005 | ||
5006 | log_info("Setting log target to %s.", log_target_to_string(target)); | |
5007 | log_set_target(target); | |
5008 | } | |
5009 | ||
5010 | void manager_restore_original_log_target(Manager *m) { | |
5011 | assert(m); | |
5012 | ||
5013 | if (!m->log_target_overridden) | |
5014 | return; | |
5015 | ||
5016 | log_info("Restoring log target to original %s.", log_target_to_string(m->original_log_target)); | |
5017 | ||
5018 | log_set_target(m->original_log_target); | |
5019 | m->log_target_overridden = false; | |
5020 | } | |
5021 | ||
5022 | ManagerTimestamp manager_timestamp_initrd_mangle(ManagerTimestamp s) { | |
5023 | if (in_initrd() && | |
5024 | s >= MANAGER_TIMESTAMP_SECURITY_START && | |
5025 | s <= MANAGER_TIMESTAMP_UNITS_LOAD_FINISH) | |
5026 | return s - MANAGER_TIMESTAMP_SECURITY_START + MANAGER_TIMESTAMP_INITRD_SECURITY_START; | |
5027 | return s; | |
5028 | } | |
5029 | ||
5030 | int manager_allocate_idle_pipe(Manager *m) { | |
5031 | int r; | |
5032 | ||
5033 | assert(m); | |
5034 | ||
5035 | if (m->idle_pipe[0] >= 0) { | |
5036 | assert(m->idle_pipe[1] >= 0); | |
5037 | assert(m->idle_pipe[2] >= 0); | |
5038 | assert(m->idle_pipe[3] >= 0); | |
5039 | return 0; | |
5040 | } | |
5041 | ||
5042 | assert(m->idle_pipe[1] < 0); | |
5043 | assert(m->idle_pipe[2] < 0); | |
5044 | assert(m->idle_pipe[3] < 0); | |
5045 | ||
5046 | r = RET_NERRNO(pipe2(m->idle_pipe + 0, O_NONBLOCK|O_CLOEXEC)); | |
5047 | if (r < 0) | |
5048 | return r; | |
5049 | ||
5050 | r = RET_NERRNO(pipe2(m->idle_pipe + 2, O_NONBLOCK|O_CLOEXEC)); | |
5051 | if (r < 0) { | |
5052 | safe_close_pair(m->idle_pipe + 0); | |
5053 | return r; | |
5054 | } | |
5055 | ||
5056 | return 1; | |
5057 | } | |
5058 | ||
5059 | void unit_defaults_init(UnitDefaults *defaults, RuntimeScope scope) { | |
5060 | assert(defaults); | |
5061 | assert(scope >= 0); | |
5062 | assert(scope < _RUNTIME_SCOPE_MAX); | |
5063 | ||
5064 | *defaults = (UnitDefaults) { | |
5065 | .std_output = EXEC_OUTPUT_JOURNAL, | |
5066 | .std_error = EXEC_OUTPUT_INHERIT, | |
5067 | .restart_usec = DEFAULT_RESTART_USEC, | |
5068 | .timeout_start_usec = manager_default_timeout(scope), | |
5069 | .timeout_stop_usec = manager_default_timeout(scope), | |
5070 | .timeout_abort_usec = manager_default_timeout(scope), | |
5071 | .timeout_abort_set = false, | |
5072 | .device_timeout_usec = manager_default_timeout(scope), | |
5073 | .start_limit_interval = DEFAULT_START_LIMIT_INTERVAL, | |
5074 | .start_limit_burst = DEFAULT_START_LIMIT_BURST, | |
5075 | ||
5076 | /* On 4.15+ with unified hierarchy, CPU accounting is essentially free as it doesn't require the CPU | |
5077 | * controller to be enabled, so the default is to enable it unless we got told otherwise. */ | |
5078 | .cpu_accounting = cpu_accounting_is_cheap(), | |
5079 | .memory_accounting = MEMORY_ACCOUNTING_DEFAULT, | |
5080 | .io_accounting = false, | |
5081 | .blockio_accounting = false, | |
5082 | .tasks_accounting = true, | |
5083 | .ip_accounting = false, | |
5084 | ||
5085 | .tasks_max = DEFAULT_TASKS_MAX, | |
5086 | .timer_accuracy_usec = 1 * USEC_PER_MINUTE, | |
5087 | ||
5088 | .memory_pressure_watch = CGROUP_PRESSURE_WATCH_AUTO, | |
5089 | .memory_pressure_threshold_usec = MEMORY_PRESSURE_DEFAULT_THRESHOLD_USEC, | |
5090 | ||
5091 | .oom_policy = OOM_STOP, | |
5092 | .oom_score_adjust_set = false, | |
5093 | }; | |
5094 | } | |
5095 | ||
5096 | void unit_defaults_done(UnitDefaults *defaults) { | |
5097 | assert(defaults); | |
5098 | ||
5099 | defaults->smack_process_label = mfree(defaults->smack_process_label); | |
5100 | rlimit_free_all(defaults->rlimit); | |
5101 | } | |
5102 | ||
5103 | LogTarget manager_get_executor_log_target(Manager *m) { | |
5104 | assert(m); | |
5105 | ||
5106 | /* If journald is not available tell sd-executor to go to kmsg, as it might be starting journald */ | |
5107 | if (!MANAGER_IS_TEST_RUN(m) && !manager_journal_is_running(m)) | |
5108 | return LOG_TARGET_KMSG; | |
5109 | ||
5110 | return log_get_target(); | |
5111 | } | |
5112 | ||
5113 | static const char* const manager_state_table[_MANAGER_STATE_MAX] = { | |
5114 | [MANAGER_INITIALIZING] = "initializing", | |
5115 | [MANAGER_STARTING] = "starting", | |
5116 | [MANAGER_RUNNING] = "running", | |
5117 | [MANAGER_DEGRADED] = "degraded", | |
5118 | [MANAGER_MAINTENANCE] = "maintenance", | |
5119 | [MANAGER_STOPPING] = "stopping", | |
5120 | }; | |
5121 | ||
5122 | DEFINE_STRING_TABLE_LOOKUP(manager_state, ManagerState); | |
5123 | ||
5124 | static const char* const manager_objective_table[_MANAGER_OBJECTIVE_MAX] = { | |
5125 | [MANAGER_OK] = "ok", | |
5126 | [MANAGER_EXIT] = "exit", | |
5127 | [MANAGER_RELOAD] = "reload", | |
5128 | [MANAGER_REEXECUTE] = "reexecute", | |
5129 | [MANAGER_REBOOT] = "reboot", | |
5130 | [MANAGER_SOFT_REBOOT] = "soft-reboot", | |
5131 | [MANAGER_POWEROFF] = "poweroff", | |
5132 | [MANAGER_HALT] = "halt", | |
5133 | [MANAGER_KEXEC] = "kexec", | |
5134 | [MANAGER_SWITCH_ROOT] = "switch-root", | |
5135 | }; | |
5136 | ||
5137 | DEFINE_STRING_TABLE_LOOKUP(manager_objective, ManagerObjective); | |
5138 | ||
5139 | static const char* const manager_timestamp_table[_MANAGER_TIMESTAMP_MAX] = { | |
5140 | [MANAGER_TIMESTAMP_FIRMWARE] = "firmware", | |
5141 | [MANAGER_TIMESTAMP_LOADER] = "loader", | |
5142 | [MANAGER_TIMESTAMP_KERNEL] = "kernel", | |
5143 | [MANAGER_TIMESTAMP_INITRD] = "initrd", | |
5144 | [MANAGER_TIMESTAMP_USERSPACE] = "userspace", | |
5145 | [MANAGER_TIMESTAMP_FINISH] = "finish", | |
5146 | [MANAGER_TIMESTAMP_SECURITY_START] = "security-start", | |
5147 | [MANAGER_TIMESTAMP_SECURITY_FINISH] = "security-finish", | |
5148 | [MANAGER_TIMESTAMP_GENERATORS_START] = "generators-start", | |
5149 | [MANAGER_TIMESTAMP_GENERATORS_FINISH] = "generators-finish", | |
5150 | [MANAGER_TIMESTAMP_UNITS_LOAD_START] = "units-load-start", | |
5151 | [MANAGER_TIMESTAMP_UNITS_LOAD_FINISH] = "units-load-finish", | |
5152 | [MANAGER_TIMESTAMP_UNITS_LOAD] = "units-load", | |
5153 | [MANAGER_TIMESTAMP_INITRD_SECURITY_START] = "initrd-security-start", | |
5154 | [MANAGER_TIMESTAMP_INITRD_SECURITY_FINISH] = "initrd-security-finish", | |
5155 | [MANAGER_TIMESTAMP_INITRD_GENERATORS_START] = "initrd-generators-start", | |
5156 | [MANAGER_TIMESTAMP_INITRD_GENERATORS_FINISH] = "initrd-generators-finish", | |
5157 | [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_START] = "initrd-units-load-start", | |
5158 | [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_FINISH] = "initrd-units-load-finish", | |
5159 | [MANAGER_TIMESTAMP_SHUTDOWN_START] = "shutdown-start", | |
5160 | }; | |
5161 | ||
5162 | DEFINE_STRING_TABLE_LOOKUP(manager_timestamp, ManagerTimestamp); | |
5163 | ||
5164 | static const char* const oom_policy_table[_OOM_POLICY_MAX] = { | |
5165 | [OOM_CONTINUE] = "continue", | |
5166 | [OOM_STOP] = "stop", | |
5167 | [OOM_KILL] = "kill", | |
5168 | }; | |
5169 | ||
5170 | DEFINE_STRING_TABLE_LOOKUP(oom_policy, OOMPolicy); |