]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | ||
3 | #include <fcntl.h> | |
4 | #include <linux/kd.h> | |
5 | #include <sys/inotify.h> | |
6 | #include <sys/ioctl.h> | |
7 | #include <sys/mount.h> | |
8 | #include <sys/reboot.h> | |
9 | #include <sys/wait.h> | |
10 | #include <unistd.h> | |
11 | ||
12 | #include "sd-bus.h" | |
13 | #include "sd-daemon.h" | |
14 | #include "sd-messages.h" | |
15 | #include "sd-path.h" | |
16 | ||
17 | #include "all-units.h" | |
18 | #include "alloc-util.h" | |
19 | #include "architecture.h" | |
20 | #include "audit-fd.h" | |
21 | #include "boot-timestamps.h" | |
22 | #include "bpf-restrict-fs.h" | |
23 | #include "build-path.h" | |
24 | #include "bus-common-errors.h" | |
25 | #include "bus-error.h" | |
26 | #include "clean-ipc.h" | |
27 | #include "common-signal.h" | |
28 | #include "confidential-virt.h" | |
29 | #include "constants.h" | |
30 | #include "creds-util.h" | |
31 | #include "daemon-util.h" | |
32 | #include "dbus-job.h" | |
33 | #include "dbus-manager.h" | |
34 | #include "dbus-unit.h" | |
35 | #include "dbus.h" | |
36 | #include "dirent-util.h" | |
37 | #include "dynamic-user.h" | |
38 | #include "env-util.h" | |
39 | #include "escape.h" | |
40 | #include "event-util.h" | |
41 | #include "exec-util.h" | |
42 | #include "execute.h" | |
43 | #include "exit-status.h" | |
44 | #include "fd-util.h" | |
45 | #include "fdset.h" | |
46 | #include "format-util.h" | |
47 | #include "fs-util.h" | |
48 | #include "generator-setup.h" | |
49 | #include "hashmap.h" | |
50 | #include "initrd-util.h" | |
51 | #include "inotify-util.h" | |
52 | #include "install.h" | |
53 | #include "io-util.h" | |
54 | #include "iovec-util.h" | |
55 | #include "libaudit-util.h" | |
56 | #include "locale-setup.h" | |
57 | #include "log.h" | |
58 | #include "manager-dump.h" | |
59 | #include "manager-serialize.h" | |
60 | #include "manager.h" | |
61 | #include "mkdir-label.h" | |
62 | #include "mount-util.h" | |
63 | #include "notify-recv.h" | |
64 | #include "parse-util.h" | |
65 | #include "path-lookup.h" | |
66 | #include "path-util.h" | |
67 | #include "plymouth-util.h" | |
68 | #include "pretty-print.h" | |
69 | #include "prioq.h" | |
70 | #include "process-util.h" | |
71 | #include "psi-util.h" | |
72 | #include "ratelimit.h" | |
73 | #include "rlimit-util.h" | |
74 | #include "rm-rf.h" | |
75 | #include "selinux-util.h" | |
76 | #include "serialize.h" | |
77 | #include "set.h" | |
78 | #include "signal-util.h" | |
79 | #include "socket-util.h" | |
80 | #include "special.h" | |
81 | #include "stat-util.h" | |
82 | #include "string-table.h" | |
83 | #include "string-util.h" | |
84 | #include "strv.h" | |
85 | #include "strxcpyx.h" | |
86 | #include "sysctl-util.h" | |
87 | #include "syslog-util.h" | |
88 | #include "taint.h" | |
89 | #include "terminal-util.h" | |
90 | #include "time-util.h" | |
91 | #include "transaction.h" | |
92 | #include "umask-util.h" | |
93 | #include "unit-name.h" | |
94 | #include "user-util.h" | |
95 | #include "varlink.h" | |
96 | #include "virt.h" | |
97 | #include "watchdog.h" | |
98 | ||
99 | /* Make sure clients notifying us don't block */ | |
100 | #define MANAGER_SOCKET_RCVBUF_SIZE (8*U64_MB) | |
101 | ||
102 | /* Initial delay and the interval for printing status messages about running jobs */ | |
103 | #define JOBS_IN_PROGRESS_WAIT_USEC (2*USEC_PER_SEC) | |
104 | #define JOBS_IN_PROGRESS_QUIET_WAIT_USEC (25*USEC_PER_SEC) | |
105 | #define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3) | |
106 | #define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3 | |
107 | ||
108 | /* If there are more than 1K bus messages queue across our API and direct buses, then let's not add more on top until | |
109 | * the queue gets more empty. */ | |
110 | #define MANAGER_BUS_BUSY_THRESHOLD 1024LU | |
111 | ||
112 | /* How many units and jobs to process of the bus queue before returning to the event loop. */ | |
113 | #define MANAGER_BUS_MESSAGE_BUDGET 100U | |
114 | ||
115 | #define DEFAULT_TASKS_MAX ((CGroupTasksMax) { 15U, 100U }) /* 15% */ | |
116 | ||
117 | static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
118 | static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
119 | static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
120 | static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
121 | static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
122 | static int manager_dispatch_handoff_timestamp_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
123 | static int manager_dispatch_pidref_transport_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata); | |
124 | static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata); | |
125 | static int manager_dispatch_run_queue(sd_event_source *source, void *userdata); | |
126 | static int manager_dispatch_sigchld(sd_event_source *source, void *userdata); | |
127 | static int manager_dispatch_timezone_change(sd_event_source *source, const struct inotify_event *event, void *userdata); | |
128 | static int manager_run_environment_generators(Manager *m); | |
129 | static int manager_run_generators(Manager *m); | |
130 | static void manager_vacuum(Manager *m); | |
131 | ||
132 | static usec_t manager_watch_jobs_next_time(Manager *m) { | |
133 | usec_t timeout; | |
134 | ||
135 | if (MANAGER_IS_USER(m)) | |
136 | /* Let the user manager without a timeout show status quickly, so the system manager can make | |
137 | * use of it, if it wants to. */ | |
138 | timeout = JOBS_IN_PROGRESS_WAIT_USEC * 2 / 3; | |
139 | else if (show_status_on(m->show_status)) | |
140 | /* When status is on, just use the usual timeout. */ | |
141 | timeout = JOBS_IN_PROGRESS_WAIT_USEC; | |
142 | else | |
143 | timeout = JOBS_IN_PROGRESS_QUIET_WAIT_USEC; | |
144 | ||
145 | return usec_add(now(CLOCK_MONOTONIC), timeout); | |
146 | } | |
147 | ||
148 | static bool manager_is_confirm_spawn_disabled(Manager *m) { | |
149 | assert(m); | |
150 | ||
151 | if (!m->confirm_spawn) | |
152 | return true; | |
153 | ||
154 | return access("/run/systemd/confirm_spawn_disabled", F_OK) >= 0; | |
155 | } | |
156 | ||
157 | static void manager_watch_jobs_in_progress(Manager *m) { | |
158 | usec_t next; | |
159 | int r; | |
160 | ||
161 | assert(m); | |
162 | ||
163 | /* We do not want to show the cylon animation if the user | |
164 | * needs to confirm service executions otherwise confirmation | |
165 | * messages will be screwed by the cylon animation. */ | |
166 | if (!manager_is_confirm_spawn_disabled(m)) | |
167 | return; | |
168 | ||
169 | if (m->jobs_in_progress_event_source) | |
170 | return; | |
171 | ||
172 | next = manager_watch_jobs_next_time(m); | |
173 | r = sd_event_add_time( | |
174 | m->event, | |
175 | &m->jobs_in_progress_event_source, | |
176 | CLOCK_MONOTONIC, | |
177 | next, 0, | |
178 | manager_dispatch_jobs_in_progress, m); | |
179 | if (r < 0) | |
180 | return; | |
181 | ||
182 | (void) sd_event_source_set_description(m->jobs_in_progress_event_source, "manager-jobs-in-progress"); | |
183 | } | |
184 | ||
185 | static void manager_flip_auto_status(Manager *m, bool enable, const char *reason) { | |
186 | assert(m); | |
187 | ||
188 | if (enable) { | |
189 | if (m->show_status == SHOW_STATUS_AUTO) | |
190 | manager_set_show_status(m, SHOW_STATUS_TEMPORARY, reason); | |
191 | } else { | |
192 | if (m->show_status == SHOW_STATUS_TEMPORARY) | |
193 | manager_set_show_status(m, SHOW_STATUS_AUTO, reason); | |
194 | } | |
195 | } | |
196 | ||
197 | static void manager_print_jobs_in_progress(Manager *m) { | |
198 | Job *j; | |
199 | unsigned counter = 0, print_nr; | |
200 | char cylon[6 + CYLON_BUFFER_EXTRA + 1]; | |
201 | unsigned cylon_pos; | |
202 | uint64_t timeout = 0; | |
203 | ||
204 | assert(m); | |
205 | assert(m->n_running_jobs > 0); | |
206 | ||
207 | manager_flip_auto_status(m, true, "delay"); | |
208 | ||
209 | print_nr = (m->jobs_in_progress_iteration / JOBS_IN_PROGRESS_PERIOD_DIVISOR) % m->n_running_jobs; | |
210 | ||
211 | HASHMAP_FOREACH(j, m->jobs) | |
212 | if (j->state == JOB_RUNNING && counter++ == print_nr) | |
213 | break; | |
214 | ||
215 | /* m->n_running_jobs must be consistent with the contents of m->jobs, | |
216 | * so the above loop must have succeeded in finding j. */ | |
217 | assert(counter == print_nr + 1); | |
218 | assert(j); | |
219 | ||
220 | cylon_pos = m->jobs_in_progress_iteration % 14; | |
221 | if (cylon_pos >= 8) | |
222 | cylon_pos = 14 - cylon_pos; | |
223 | draw_cylon(cylon, sizeof(cylon), 6, cylon_pos); | |
224 | ||
225 | m->jobs_in_progress_iteration++; | |
226 | ||
227 | char job_of_n[STRLEN("( of ) ") + DECIMAL_STR_MAX(unsigned)*2] = ""; | |
228 | if (m->n_running_jobs > 1) | |
229 | xsprintf(job_of_n, "(%u of %u) ", counter, m->n_running_jobs); | |
230 | ||
231 | (void) job_get_timeout(j, &timeout); | |
232 | ||
233 | /* We want to use enough information for the user to identify previous lines talking about the same | |
234 | * unit, but keep the message as short as possible. So if 'Starting foo.service' or 'Starting | |
235 | * foo.service - Description' were used, 'foo.service' is enough here. On the other hand, if we used | |
236 | * 'Starting Description' before, then we shall also use 'Description' here. So we pass NULL as the | |
237 | * second argument to unit_status_string(). */ | |
238 | const char *ident = unit_status_string(j->unit, NULL); | |
239 | ||
240 | const char *time = FORMAT_TIMESPAN(now(CLOCK_MONOTONIC) - j->begin_usec, 1*USEC_PER_SEC); | |
241 | const char *limit = timeout > 0 ? FORMAT_TIMESPAN(timeout - j->begin_usec, 1*USEC_PER_SEC) : "no limit"; | |
242 | ||
243 | if (m->status_unit_format == STATUS_UNIT_FORMAT_DESCRIPTION) | |
244 | /* When using 'Description', we effectively don't have enough space to show the nested status | |
245 | * without ellipsization, so let's not even try. */ | |
246 | manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon, | |
247 | "%sA %s job is running for %s (%s / %s)", | |
248 | job_of_n, | |
249 | job_type_to_string(j->type), | |
250 | ident, | |
251 | time, limit); | |
252 | else { | |
253 | const char *status_text = unit_status_text(j->unit); | |
254 | ||
255 | manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon, | |
256 | "%sJob %s/%s running (%s / %s)%s%s", | |
257 | job_of_n, | |
258 | ident, | |
259 | job_type_to_string(j->type), | |
260 | time, limit, | |
261 | status_text ? ": " : "", | |
262 | strempty(status_text)); | |
263 | } | |
264 | ||
265 | (void) sd_notifyf(/* unset_environment= */ false, | |
266 | "STATUS=%sUser job %s/%s running (%s / %s)...", | |
267 | job_of_n, | |
268 | ident, job_type_to_string(j->type), | |
269 | time, limit); | |
270 | m->status_ready = false; | |
271 | } | |
272 | ||
273 | static int have_ask_password(void) { | |
274 | _cleanup_closedir_ DIR *dir = NULL; | |
275 | ||
276 | dir = opendir("/run/systemd/ask-password"); | |
277 | if (!dir) { | |
278 | if (errno == ENOENT) | |
279 | return false; | |
280 | ||
281 | return -errno; | |
282 | } | |
283 | ||
284 | FOREACH_DIRENT_ALL(de, dir, return -errno) { | |
285 | if (!IN_SET(de->d_type, DT_REG, DT_UNKNOWN)) | |
286 | continue; | |
287 | ||
288 | if (startswith(de->d_name, "ask.")) | |
289 | return true; | |
290 | } | |
291 | ||
292 | return false; | |
293 | } | |
294 | ||
295 | static int manager_dispatch_ask_password_fd(sd_event_source *source, | |
296 | int fd, uint32_t revents, void *userdata) { | |
297 | Manager *m = ASSERT_PTR(userdata); | |
298 | ||
299 | (void) flush_fd(fd); | |
300 | ||
301 | m->have_ask_password = have_ask_password(); | |
302 | if (m->have_ask_password < 0) | |
303 | /* Log error but continue. Negative have_ask_password is treated as unknown status. */ | |
304 | log_warning_errno(m->have_ask_password, "Failed to list /run/systemd/ask-password/, ignoring: %m"); | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
309 | static void manager_close_ask_password(Manager *m) { | |
310 | assert(m); | |
311 | ||
312 | m->ask_password_event_source = sd_event_source_disable_unref(m->ask_password_event_source); | |
313 | m->have_ask_password = -EINVAL; | |
314 | } | |
315 | ||
316 | static int manager_check_ask_password(Manager *m) { | |
317 | int r; | |
318 | ||
319 | assert(m); | |
320 | ||
321 | /* We only care about passwords prompts when running in system mode (because that's the only time we | |
322 | * manage a console) */ | |
323 | if (!MANAGER_IS_SYSTEM(m)) | |
324 | return 0; | |
325 | ||
326 | if (!m->ask_password_event_source) { | |
327 | _cleanup_close_ int inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC); | |
328 | if (inotify_fd < 0) | |
329 | return log_error_errno(errno, "Failed to create inotify object: %m"); | |
330 | ||
331 | (void) mkdir_label("/run/systemd/ask-password", 0755); | |
332 | r = inotify_add_watch_and_warn(inotify_fd, "/run/systemd/ask-password", IN_CLOSE_WRITE|IN_DELETE|IN_MOVED_TO|IN_ONLYDIR); | |
333 | if (r < 0) | |
334 | return r; | |
335 | ||
336 | _cleanup_(sd_event_source_disable_unrefp) sd_event_source *event_source = NULL; | |
337 | r = sd_event_add_io( | |
338 | m->event, | |
339 | &event_source, | |
340 | inotify_fd, | |
341 | EPOLLIN, | |
342 | manager_dispatch_ask_password_fd, | |
343 | m); | |
344 | if (r < 0) | |
345 | return log_error_errno(r, "Failed to add event source for /run/systemd/ask-password/: %m"); | |
346 | ||
347 | r = sd_event_source_set_io_fd_own(event_source, true); | |
348 | if (r < 0) | |
349 | return log_error_errno(r, "Failed to pass ownership of /run/systemd/ask-password/ inotify fd to event source: %m"); | |
350 | TAKE_FD(inotify_fd); | |
351 | ||
352 | (void) sd_event_source_set_description(event_source, "manager-ask-password"); | |
353 | ||
354 | m->ask_password_event_source = TAKE_PTR(event_source); | |
355 | ||
356 | /* Queries might have been added meanwhile... */ | |
357 | (void) manager_dispatch_ask_password_fd(m->ask_password_event_source, sd_event_source_get_io_fd(m->ask_password_event_source), EPOLLIN, m); | |
358 | } | |
359 | ||
360 | return m->have_ask_password; | |
361 | } | |
362 | ||
363 | static int manager_watch_idle_pipe(Manager *m) { | |
364 | int r; | |
365 | ||
366 | assert(m); | |
367 | ||
368 | if (m->idle_pipe_event_source) | |
369 | return 0; | |
370 | ||
371 | if (m->idle_pipe[2] < 0) | |
372 | return 0; | |
373 | ||
374 | r = sd_event_add_io(m->event, &m->idle_pipe_event_source, m->idle_pipe[2], EPOLLIN, manager_dispatch_idle_pipe_fd, m); | |
375 | if (r < 0) | |
376 | return log_error_errno(r, "Failed to watch idle pipe: %m"); | |
377 | ||
378 | (void) sd_event_source_set_description(m->idle_pipe_event_source, "manager-idle-pipe"); | |
379 | ||
380 | return 0; | |
381 | } | |
382 | ||
383 | static void manager_close_idle_pipe(Manager *m) { | |
384 | assert(m); | |
385 | ||
386 | m->idle_pipe_event_source = sd_event_source_disable_unref(m->idle_pipe_event_source); | |
387 | ||
388 | safe_close_pair(m->idle_pipe); | |
389 | safe_close_pair(m->idle_pipe + 2); | |
390 | } | |
391 | ||
392 | static int manager_setup_time_change(Manager *m) { | |
393 | int r; | |
394 | ||
395 | assert(m); | |
396 | ||
397 | if (MANAGER_IS_TEST_RUN(m)) | |
398 | return 0; | |
399 | ||
400 | m->time_change_event_source = sd_event_source_disable_unref(m->time_change_event_source); | |
401 | ||
402 | r = event_add_time_change(m->event, &m->time_change_event_source, manager_dispatch_time_change_fd, m); | |
403 | if (r < 0) | |
404 | return log_error_errno(r, "Failed to create time change event source: %m"); | |
405 | ||
406 | /* Schedule this slightly earlier than the .timer event sources */ | |
407 | r = sd_event_source_set_priority(m->time_change_event_source, EVENT_PRIORITY_TIME_CHANGE); | |
408 | if (r < 0) | |
409 | return log_error_errno(r, "Failed to set priority of time change event sources: %m"); | |
410 | ||
411 | log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd."); | |
412 | ||
413 | return 0; | |
414 | } | |
415 | ||
416 | static int manager_read_timezone_stat(Manager *m) { | |
417 | struct stat st; | |
418 | bool changed; | |
419 | ||
420 | assert(m); | |
421 | ||
422 | /* Read the current stat() data of /etc/localtime so that we detect changes */ | |
423 | if (lstat(etc_localtime(), &st) < 0) { | |
424 | log_debug_errno(errno, "Failed to stat /etc/localtime, ignoring: %m"); | |
425 | changed = m->etc_localtime_accessible; | |
426 | m->etc_localtime_accessible = false; | |
427 | } else { | |
428 | usec_t k; | |
429 | ||
430 | k = timespec_load(&st.st_mtim); | |
431 | changed = !m->etc_localtime_accessible || k != m->etc_localtime_mtime; | |
432 | ||
433 | m->etc_localtime_mtime = k; | |
434 | m->etc_localtime_accessible = true; | |
435 | } | |
436 | ||
437 | return changed; | |
438 | } | |
439 | ||
440 | static int manager_setup_timezone_change(Manager *m) { | |
441 | _cleanup_(sd_event_source_unrefp) sd_event_source *new_event = NULL; | |
442 | int r; | |
443 | ||
444 | assert(m); | |
445 | ||
446 | if (MANAGER_IS_TEST_RUN(m)) | |
447 | return 0; | |
448 | ||
449 | /* We watch /etc/localtime for three events: change of the link count (which might mean removal from /etc even | |
450 | * though another link might be kept), renames, and file close operations after writing. Note we don't bother | |
451 | * with IN_DELETE_SELF, as that would just report when the inode is removed entirely, i.e. after the link count | |
452 | * went to zero and all fds to it are closed. | |
453 | * | |
454 | * Note that we never follow symlinks here. This is a simplification, but should cover almost all cases | |
455 | * correctly. | |
456 | * | |
457 | * Note that we create the new event source first here, before releasing the old one. This should optimize | |
458 | * behaviour as this way sd-event can reuse the old watch in case the inode didn't change. */ | |
459 | ||
460 | r = sd_event_add_inotify(m->event, &new_event, etc_localtime(), | |
461 | IN_ATTRIB|IN_MOVE_SELF|IN_CLOSE_WRITE|IN_DONT_FOLLOW, manager_dispatch_timezone_change, m); | |
462 | if (r == -ENOENT) { | |
463 | /* If the file doesn't exist yet, subscribe to /etc instead, and wait until it is created either by | |
464 | * O_CREATE or by rename() */ | |
465 | _cleanup_free_ char *localtime_dir = NULL; | |
466 | ||
467 | int dir_r = path_extract_directory(etc_localtime(), &localtime_dir); | |
468 | if (dir_r < 0) | |
469 | return log_error_errno(dir_r, "Failed to extract directory from path '%s': %m", etc_localtime()); | |
470 | ||
471 | log_debug_errno(r, "%s doesn't exist yet, watching %s instead.", etc_localtime(), localtime_dir); | |
472 | ||
473 | r = sd_event_add_inotify(m->event, &new_event, localtime_dir, | |
474 | IN_CREATE|IN_MOVED_TO|IN_ONLYDIR, manager_dispatch_timezone_change, m); | |
475 | } | |
476 | if (r < 0) | |
477 | return log_error_errno(r, "Failed to create timezone change event source: %m"); | |
478 | ||
479 | /* Schedule this slightly earlier than the .timer event sources */ | |
480 | r = sd_event_source_set_priority(new_event, EVENT_PRIORITY_TIME_ZONE); | |
481 | if (r < 0) | |
482 | return log_error_errno(r, "Failed to set priority of timezone change event sources: %m"); | |
483 | ||
484 | sd_event_source_unref(m->timezone_change_event_source); | |
485 | m->timezone_change_event_source = TAKE_PTR(new_event); | |
486 | ||
487 | return 0; | |
488 | } | |
489 | ||
490 | static int manager_enable_special_signals(Manager *m) { | |
491 | _cleanup_close_ int fd = -EBADF; | |
492 | ||
493 | assert(m); | |
494 | ||
495 | if (!MANAGER_IS_SYSTEM(m) || MANAGER_IS_TEST_RUN(m)) | |
496 | return 0; | |
497 | ||
498 | /* Enable that we get SIGINT on control-alt-del. In containers this will fail with EPERM (older) or | |
499 | * EINVAL (newer), so ignore that. */ | |
500 | if (reboot(RB_DISABLE_CAD) < 0 && !IN_SET(errno, EPERM, EINVAL)) | |
501 | log_warning_errno(errno, "Failed to enable ctrl-alt-del handling, ignoring: %m"); | |
502 | ||
503 | fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC); | |
504 | if (fd < 0) | |
505 | /* Support systems without virtual console (ENOENT) gracefully */ | |
506 | log_full_errno(fd == -ENOENT ? LOG_DEBUG : LOG_WARNING, fd, "Failed to open %s, ignoring: %m", "/dev/tty0"); | |
507 | else { | |
508 | /* Enable that we get SIGWINCH on kbrequest */ | |
509 | if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0) | |
510 | log_warning_errno(errno, "Failed to enable kbrequest handling, ignoring: %m"); | |
511 | } | |
512 | ||
513 | return 0; | |
514 | } | |
515 | ||
516 | static int manager_setup_signals(Manager *m) { | |
517 | static const struct sigaction sa = { | |
518 | .sa_handler = SIG_DFL, | |
519 | .sa_flags = SA_NOCLDSTOP|SA_RESTART, | |
520 | }; | |
521 | sigset_t mask; | |
522 | int r; | |
523 | ||
524 | assert(m); | |
525 | ||
526 | assert_se(sigaction(SIGCHLD, &sa, NULL) == 0); | |
527 | ||
528 | /* We make liberal use of realtime signals here. On Linux/glibc we have 30 of them, between | |
529 | * SIGRTMIN+0 ... SIGRTMIN+30 (aka SIGRTMAX). */ | |
530 | ||
531 | assert_se(sigemptyset(&mask) == 0); | |
532 | sigset_add_many(&mask, | |
533 | SIGCHLD, /* Child died */ | |
534 | SIGTERM, /* Reexecute daemon */ | |
535 | SIGHUP, /* Reload configuration */ | |
536 | SIGUSR1, /* systemd: reconnect to D-Bus */ | |
537 | SIGUSR2, /* systemd: dump status */ | |
538 | SIGINT, /* Kernel sends us this on control-alt-del */ | |
539 | SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */ | |
540 | SIGPWR, /* Some kernel drivers and upsd send us this on power failure */ | |
541 | ||
542 | SIGRTMIN+0, /* systemd: start default.target */ | |
543 | SIGRTMIN+1, /* systemd: isolate rescue.target */ | |
544 | SIGRTMIN+2, /* systemd: isolate emergency.target */ | |
545 | SIGRTMIN+3, /* systemd: start halt.target */ | |
546 | SIGRTMIN+4, /* systemd: start poweroff.target */ | |
547 | SIGRTMIN+5, /* systemd: start reboot.target */ | |
548 | SIGRTMIN+6, /* systemd: start kexec.target */ | |
549 | SIGRTMIN+7, /* systemd: start soft-reboot.target */ | |
550 | ||
551 | /* ... space for more special targets ... */ | |
552 | ||
553 | SIGRTMIN+13, /* systemd: Immediate halt */ | |
554 | SIGRTMIN+14, /* systemd: Immediate poweroff */ | |
555 | SIGRTMIN+15, /* systemd: Immediate reboot */ | |
556 | SIGRTMIN+16, /* systemd: Immediate kexec */ | |
557 | SIGRTMIN+17, /* systemd: Immediate soft-reboot */ | |
558 | SIGRTMIN+18, /* systemd: control command */ | |
559 | ||
560 | /* ... space ... */ | |
561 | ||
562 | SIGRTMIN+20, /* systemd: enable status messages */ | |
563 | SIGRTMIN+21, /* systemd: disable status messages */ | |
564 | SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */ | |
565 | SIGRTMIN+23, /* systemd: set log level to LOG_INFO */ | |
566 | SIGRTMIN+24, /* systemd: Immediate exit (--user only) */ | |
567 | SIGRTMIN+25, /* systemd: reexecute manager */ | |
568 | ||
569 | SIGRTMIN+26, /* systemd: set log target to journal-or-kmsg */ | |
570 | SIGRTMIN+27, /* systemd: set log target to console */ | |
571 | SIGRTMIN+28, /* systemd: set log target to kmsg */ | |
572 | SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg (obsolete) */ | |
573 | ||
574 | /* ... one free signal here SIGRTMIN+30 ... */ | |
575 | -1); | |
576 | assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0); | |
577 | ||
578 | m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC); | |
579 | if (m->signal_fd < 0) | |
580 | return -errno; | |
581 | ||
582 | r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m); | |
583 | if (r < 0) | |
584 | return r; | |
585 | ||
586 | (void) sd_event_source_set_description(m->signal_event_source, "manager-signal"); | |
587 | ||
588 | /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the | |
589 | * notify processing can still figure out to which process/service a message belongs, before we reap the | |
590 | * process. Also, process this before handling cgroup notifications, so that we always collect child exit | |
591 | * status information before detecting that there's no process in a cgroup. */ | |
592 | r = sd_event_source_set_priority(m->signal_event_source, EVENT_PRIORITY_SIGNALS); | |
593 | if (r < 0) | |
594 | return r; | |
595 | ||
596 | /* Report to supervisor that we now process the above signals. We report this as level "2", to | |
597 | * indicate that we support more than sysvinit's signals (of course, sysvinit never sent this | |
598 | * message, but conceptually it makes sense to consider level "1" to be equivalent to sysvinit's | |
599 | * signal handling). Also, by setting this to "2" people looking for this hopefully won't | |
600 | * misunderstand this as a boolean concept. Signal level 2 shall refer to the signals PID 1 | |
601 | * understands at the time of release of systemd v256, i.e. including basic SIGRTMIN+18 handling for | |
602 | * memory pressure and stuff. When more signals are hooked up (or more SIGRTMIN+18 multiplex | |
603 | * operations added, this level should be increased). */ | |
604 | (void) sd_notify(/* unset_environment= */ false, | |
605 | "X_SYSTEMD_SIGNALS_LEVEL=2"); | |
606 | ||
607 | return manager_enable_special_signals(m); | |
608 | } | |
609 | ||
610 | static char** sanitize_environment(char **l) { | |
611 | ||
612 | /* Let's remove some environment variables that we need ourselves to communicate with our clients */ | |
613 | strv_env_unset_many( | |
614 | l, | |
615 | "CACHE_DIRECTORY", | |
616 | "CONFIGURATION_DIRECTORY", | |
617 | "CREDENTIALS_DIRECTORY", | |
618 | "EXIT_CODE", | |
619 | "EXIT_STATUS", | |
620 | "INVOCATION_ID", | |
621 | "JOURNAL_STREAM", | |
622 | "LISTEN_FDNAMES", | |
623 | "LISTEN_FDS", | |
624 | "LISTEN_PID", | |
625 | "LOGS_DIRECTORY", | |
626 | "LOG_NAMESPACE", | |
627 | "MAINPID", | |
628 | "MANAGERPID", | |
629 | "MEMORY_PRESSURE_WATCH", | |
630 | "MEMORY_PRESSURE_WRITE", | |
631 | "MONITOR_EXIT_CODE", | |
632 | "MONITOR_EXIT_STATUS", | |
633 | "MONITOR_INVOCATION_ID", | |
634 | "MONITOR_SERVICE_RESULT", | |
635 | "MONITOR_UNIT", | |
636 | "NOTIFY_SOCKET", | |
637 | "PIDFILE", | |
638 | "REMOTE_ADDR", | |
639 | "REMOTE_PORT", | |
640 | "RUNTIME_DIRECTORY", | |
641 | "SERVICE_RESULT", | |
642 | "STATE_DIRECTORY", | |
643 | "SYSTEMD_EXEC_PID", | |
644 | "TRIGGER_PATH", | |
645 | "TRIGGER_TIMER_MONOTONIC_USEC", | |
646 | "TRIGGER_TIMER_REALTIME_USEC", | |
647 | "TRIGGER_UNIT", | |
648 | "WATCHDOG_PID", | |
649 | "WATCHDOG_USEC"); | |
650 | ||
651 | /* Let's order the environment alphabetically, just to make it pretty */ | |
652 | return strv_sort(l); | |
653 | } | |
654 | ||
655 | int manager_default_environment(Manager *m) { | |
656 | assert(m); | |
657 | ||
658 | m->transient_environment = strv_free(m->transient_environment); | |
659 | ||
660 | if (MANAGER_IS_SYSTEM(m)) { | |
661 | /* The system manager always starts with a clean environment for its children. It does not | |
662 | * import the kernel's or the parents' exported variables. | |
663 | * | |
664 | * The initial passed environment is untouched to keep /proc/self/environ valid; it is used | |
665 | * for tagging the init process inside containers. */ | |
666 | char *path = strjoin("PATH=", default_PATH()); | |
667 | if (!path) | |
668 | return log_oom(); | |
669 | ||
670 | if (strv_consume(&m->transient_environment, path) < 0) | |
671 | return log_oom(); | |
672 | ||
673 | /* Import locale variables LC_*= from configuration */ | |
674 | (void) locale_setup(&m->transient_environment); | |
675 | } else { | |
676 | /* The user manager passes its own environment along to its children, except for $PATH and | |
677 | * session envs. */ | |
678 | ||
679 | m->transient_environment = strv_copy(environ); | |
680 | if (!m->transient_environment) | |
681 | return log_oom(); | |
682 | ||
683 | char *path = strjoin("PATH=", default_user_PATH()); | |
684 | if (!path) | |
685 | return log_oom(); | |
686 | ||
687 | if (strv_env_replace_consume(&m->transient_environment, path) < 0) | |
688 | return log_oom(); | |
689 | ||
690 | /* Envvars set for our 'manager' class session are private and should not be propagated | |
691 | * to children. Also it's likely that the graphical session will set these on their own. */ | |
692 | strv_env_unset_many(m->transient_environment, | |
693 | "XDG_SESSION_ID", | |
694 | "XDG_SESSION_CLASS", | |
695 | "XDG_SESSION_TYPE", | |
696 | "XDG_SESSION_DESKTOP", | |
697 | "XDG_SEAT", | |
698 | "XDG_VTNR"); | |
699 | } | |
700 | ||
701 | sanitize_environment(m->transient_environment); | |
702 | return 0; | |
703 | } | |
704 | ||
705 | static int manager_setup_prefix(Manager *m) { | |
706 | struct table_entry { | |
707 | uint64_t type; | |
708 | const char *suffix; | |
709 | }; | |
710 | ||
711 | static const struct table_entry paths_system[_EXEC_DIRECTORY_TYPE_MAX] = { | |
712 | [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_SYSTEM_RUNTIME, NULL }, | |
713 | [EXEC_DIRECTORY_STATE] = { SD_PATH_SYSTEM_STATE_PRIVATE, NULL }, | |
714 | [EXEC_DIRECTORY_CACHE] = { SD_PATH_SYSTEM_STATE_CACHE, NULL }, | |
715 | [EXEC_DIRECTORY_LOGS] = { SD_PATH_SYSTEM_STATE_LOGS, NULL }, | |
716 | [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_SYSTEM_CONFIGURATION, NULL }, | |
717 | }; | |
718 | ||
719 | static const struct table_entry paths_user[_EXEC_DIRECTORY_TYPE_MAX] = { | |
720 | [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_USER_RUNTIME, NULL }, | |
721 | [EXEC_DIRECTORY_STATE] = { SD_PATH_USER_STATE_PRIVATE, NULL }, | |
722 | [EXEC_DIRECTORY_CACHE] = { SD_PATH_USER_STATE_CACHE, NULL }, | |
723 | [EXEC_DIRECTORY_LOGS] = { SD_PATH_USER_STATE_PRIVATE, "log" }, | |
724 | [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_USER_CONFIGURATION, NULL }, | |
725 | }; | |
726 | ||
727 | assert(m); | |
728 | ||
729 | const struct table_entry *p = MANAGER_IS_SYSTEM(m) ? paths_system : paths_user; | |
730 | int r; | |
731 | ||
732 | for (ExecDirectoryType i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) { | |
733 | r = sd_path_lookup(p[i].type, p[i].suffix, &m->prefix[i]); | |
734 | if (r < 0) | |
735 | return log_warning_errno(r, "Failed to lookup %s path: %m", | |
736 | exec_directory_type_to_string(i)); | |
737 | } | |
738 | ||
739 | return 0; | |
740 | } | |
741 | ||
742 | static void manager_free_unit_name_maps(Manager *m) { | |
743 | m->unit_id_map = hashmap_free(m->unit_id_map); | |
744 | m->unit_name_map = hashmap_free(m->unit_name_map); | |
745 | m->unit_path_cache = set_free(m->unit_path_cache); | |
746 | m->unit_cache_timestamp_hash = 0; | |
747 | } | |
748 | ||
749 | static int manager_setup_run_queue(Manager *m) { | |
750 | int r; | |
751 | ||
752 | assert(m); | |
753 | assert(!m->run_queue_event_source); | |
754 | ||
755 | r = sd_event_add_defer(m->event, &m->run_queue_event_source, manager_dispatch_run_queue, m); | |
756 | if (r < 0) | |
757 | return r; | |
758 | ||
759 | r = sd_event_source_set_priority(m->run_queue_event_source, EVENT_PRIORITY_RUN_QUEUE); | |
760 | if (r < 0) | |
761 | return r; | |
762 | ||
763 | r = sd_event_source_set_enabled(m->run_queue_event_source, SD_EVENT_OFF); | |
764 | if (r < 0) | |
765 | return r; | |
766 | ||
767 | (void) sd_event_source_set_description(m->run_queue_event_source, "manager-run-queue"); | |
768 | ||
769 | return 0; | |
770 | } | |
771 | ||
772 | static int manager_setup_sigchld_event_source(Manager *m) { | |
773 | int r; | |
774 | ||
775 | assert(m); | |
776 | assert(!m->sigchld_event_source); | |
777 | ||
778 | r = sd_event_add_defer(m->event, &m->sigchld_event_source, manager_dispatch_sigchld, m); | |
779 | if (r < 0) | |
780 | return r; | |
781 | ||
782 | r = sd_event_source_set_priority(m->sigchld_event_source, EVENT_PRIORITY_SIGCHLD); | |
783 | if (r < 0) | |
784 | return r; | |
785 | ||
786 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF); | |
787 | if (r < 0) | |
788 | return r; | |
789 | ||
790 | (void) sd_event_source_set_description(m->sigchld_event_source, "manager-sigchld"); | |
791 | ||
792 | return 0; | |
793 | } | |
794 | ||
795 | int manager_setup_memory_pressure_event_source(Manager *m) { | |
796 | int r; | |
797 | ||
798 | assert(m); | |
799 | ||
800 | m->memory_pressure_event_source = sd_event_source_disable_unref(m->memory_pressure_event_source); | |
801 | ||
802 | r = sd_event_add_memory_pressure(m->event, &m->memory_pressure_event_source, NULL, NULL); | |
803 | if (r < 0) | |
804 | log_full_errno(ERRNO_IS_NOT_SUPPORTED(r) || ERRNO_IS_PRIVILEGE(r) || (r == -EHOSTDOWN) ? LOG_DEBUG : LOG_NOTICE, r, | |
805 | "Failed to establish memory pressure event source, ignoring: %m"); | |
806 | else if (m->defaults.memory_pressure_threshold_usec != USEC_INFINITY) { | |
807 | ||
808 | /* If there's a default memory pressure threshold set, also apply it to the service manager itself */ | |
809 | r = sd_event_source_set_memory_pressure_period( | |
810 | m->memory_pressure_event_source, | |
811 | m->defaults.memory_pressure_threshold_usec, | |
812 | MEMORY_PRESSURE_DEFAULT_WINDOW_USEC); | |
813 | if (r < 0) | |
814 | log_warning_errno(r, "Failed to adjust memory pressure threshold, ignoring: %m"); | |
815 | } | |
816 | ||
817 | return 0; | |
818 | } | |
819 | ||
820 | static int manager_find_credentials_dirs(Manager *m) { | |
821 | const char *e; | |
822 | int r; | |
823 | ||
824 | assert(m); | |
825 | ||
826 | r = get_credentials_dir(&e); | |
827 | if (r < 0) { | |
828 | if (r != -ENXIO) | |
829 | log_debug_errno(r, "Failed to determine credentials directory, ignoring: %m"); | |
830 | } else { | |
831 | m->received_credentials_directory = strdup(e); | |
832 | if (!m->received_credentials_directory) | |
833 | return -ENOMEM; | |
834 | } | |
835 | ||
836 | r = get_encrypted_credentials_dir(&e); | |
837 | if (r < 0) { | |
838 | if (r != -ENXIO) | |
839 | log_debug_errno(r, "Failed to determine encrypted credentials directory, ignoring: %m"); | |
840 | } else { | |
841 | m->received_encrypted_credentials_directory = strdup(e); | |
842 | if (!m->received_encrypted_credentials_directory) | |
843 | return -ENOMEM; | |
844 | } | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
849 | void manager_set_switching_root(Manager *m, bool switching_root) { | |
850 | assert(m); | |
851 | ||
852 | m->switching_root = MANAGER_IS_SYSTEM(m) && switching_root; | |
853 | } | |
854 | ||
855 | double manager_get_progress(Manager *m) { | |
856 | assert(m); | |
857 | ||
858 | if (MANAGER_IS_FINISHED(m) || m->n_installed_jobs == 0) | |
859 | return 1.0; | |
860 | ||
861 | return 1.0 - ((double) hashmap_size(m->jobs) / (double) m->n_installed_jobs); | |
862 | } | |
863 | ||
864 | static int compare_job_priority(const void *a, const void *b) { | |
865 | const Job *x = a, *y = b; | |
866 | ||
867 | return unit_compare_priority(x->unit, y->unit); | |
868 | } | |
869 | ||
870 | usec_t manager_default_timeout(RuntimeScope scope) { | |
871 | return scope == RUNTIME_SCOPE_SYSTEM ? DEFAULT_TIMEOUT_USEC : DEFAULT_USER_TIMEOUT_USEC; | |
872 | } | |
873 | ||
874 | int manager_new(RuntimeScope runtime_scope, ManagerTestRunFlags test_run_flags, Manager **ret) { | |
875 | _cleanup_(manager_freep) Manager *m = NULL; | |
876 | int r; | |
877 | ||
878 | assert(IN_SET(runtime_scope, RUNTIME_SCOPE_SYSTEM, RUNTIME_SCOPE_USER)); | |
879 | assert(ret); | |
880 | ||
881 | m = new(Manager, 1); | |
882 | if (!m) | |
883 | return -ENOMEM; | |
884 | ||
885 | *m = (Manager) { | |
886 | .runtime_scope = runtime_scope, | |
887 | .objective = _MANAGER_OBJECTIVE_INVALID, | |
888 | .previous_objective = _MANAGER_OBJECTIVE_INVALID, | |
889 | ||
890 | .status_unit_format = STATUS_UNIT_FORMAT_DEFAULT, | |
891 | ||
892 | .original_log_level = -1, | |
893 | .original_log_target = _LOG_TARGET_INVALID, | |
894 | ||
895 | .watchdog_overridden[WATCHDOG_RUNTIME] = USEC_INFINITY, | |
896 | .watchdog_overridden[WATCHDOG_REBOOT] = USEC_INFINITY, | |
897 | .watchdog_overridden[WATCHDOG_KEXEC] = USEC_INFINITY, | |
898 | .watchdog_overridden[WATCHDOG_PRETIMEOUT] = USEC_INFINITY, | |
899 | ||
900 | .show_status_overridden = _SHOW_STATUS_INVALID, | |
901 | ||
902 | .notify_fd = -EBADF, | |
903 | .signal_fd = -EBADF, | |
904 | .user_lookup_fds = EBADF_PAIR, | |
905 | .handoff_timestamp_fds = EBADF_PAIR, | |
906 | .pidref_transport_fds = EBADF_PAIR, | |
907 | .private_listen_fd = -EBADF, | |
908 | .dev_autofs_fd = -EBADF, | |
909 | .cgroup_inotify_fd = -EBADF, | |
910 | .pin_cgroupfs_fd = -EBADF, | |
911 | .idle_pipe = { -EBADF, -EBADF, -EBADF, -EBADF}, | |
912 | ||
913 | /* start as id #1, so that we can leave #0 around as "null-like" value */ | |
914 | .current_job_id = 1, | |
915 | ||
916 | .have_ask_password = -EINVAL, /* we don't know */ | |
917 | .first_boot = -1, | |
918 | .test_run_flags = test_run_flags, | |
919 | ||
920 | .dump_ratelimit = (const RateLimit) { .interval = 10 * USEC_PER_MINUTE, .burst = 10 }, | |
921 | ||
922 | .executor_fd = -EBADF, | |
923 | }; | |
924 | ||
925 | unit_defaults_init(&m->defaults, runtime_scope); | |
926 | ||
927 | #if ENABLE_EFI | |
928 | if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) | |
929 | boot_timestamps(m->timestamps + MANAGER_TIMESTAMP_USERSPACE, | |
930 | m->timestamps + MANAGER_TIMESTAMP_FIRMWARE, | |
931 | m->timestamps + MANAGER_TIMESTAMP_LOADER); | |
932 | #endif | |
933 | ||
934 | /* Reboot immediately if the user hits C-A-D more often than 7x per 2s */ | |
935 | m->ctrl_alt_del_ratelimit = (const RateLimit) { .interval = 2 * USEC_PER_SEC, .burst = 7 }; | |
936 | ||
937 | r = manager_default_environment(m); | |
938 | if (r < 0) | |
939 | return r; | |
940 | ||
941 | r = hashmap_ensure_allocated(&m->units, &string_hash_ops); | |
942 | if (r < 0) | |
943 | return r; | |
944 | ||
945 | r = hashmap_ensure_allocated(&m->cgroup_unit, &path_hash_ops); | |
946 | if (r < 0) | |
947 | return r; | |
948 | ||
949 | r = hashmap_ensure_allocated(&m->watch_bus, &string_hash_ops); | |
950 | if (r < 0) | |
951 | return r; | |
952 | ||
953 | r = prioq_ensure_allocated(&m->run_queue, compare_job_priority); | |
954 | if (r < 0) | |
955 | return r; | |
956 | ||
957 | r = manager_setup_prefix(m); | |
958 | if (r < 0) | |
959 | return r; | |
960 | ||
961 | r = manager_find_credentials_dirs(m); | |
962 | if (r < 0) | |
963 | return r; | |
964 | ||
965 | r = sd_event_default(&m->event); | |
966 | if (r < 0) | |
967 | return r; | |
968 | ||
969 | r = manager_setup_run_queue(m); | |
970 | if (r < 0) | |
971 | return r; | |
972 | ||
973 | if (FLAGS_SET(test_run_flags, MANAGER_TEST_RUN_MINIMAL)) { | |
974 | m->cgroup_root = strdup(""); | |
975 | if (!m->cgroup_root) | |
976 | return -ENOMEM; | |
977 | } else { | |
978 | r = manager_setup_signals(m); | |
979 | if (r < 0) | |
980 | return r; | |
981 | ||
982 | r = manager_setup_cgroup(m); | |
983 | if (r < 0) | |
984 | return r; | |
985 | ||
986 | r = manager_setup_time_change(m); | |
987 | if (r < 0) | |
988 | return r; | |
989 | ||
990 | r = manager_read_timezone_stat(m); | |
991 | if (r < 0) | |
992 | return r; | |
993 | ||
994 | (void) manager_setup_timezone_change(m); | |
995 | ||
996 | r = manager_setup_sigchld_event_source(m); | |
997 | if (r < 0) | |
998 | return r; | |
999 | ||
1000 | r = manager_setup_memory_pressure_event_source(m); | |
1001 | if (r < 0) | |
1002 | return r; | |
1003 | ||
1004 | #if HAVE_LIBBPF | |
1005 | if (MANAGER_IS_SYSTEM(m) && bpf_restrict_fs_supported(/* initialize = */ true)) { | |
1006 | r = bpf_restrict_fs_setup(m); | |
1007 | if (r < 0) | |
1008 | log_warning_errno(r, "Failed to setup LSM BPF, ignoring: %m"); | |
1009 | } | |
1010 | #endif | |
1011 | } | |
1012 | ||
1013 | if (test_run_flags == 0) { | |
1014 | if (MANAGER_IS_SYSTEM(m)) | |
1015 | r = mkdir_label("/run/systemd/units", 0755); | |
1016 | else { | |
1017 | _cleanup_free_ char *units_path = NULL; | |
1018 | r = xdg_user_runtime_dir("/systemd/units", &units_path); | |
1019 | if (r < 0) | |
1020 | return r; | |
1021 | ||
1022 | r = mkdir_label(units_path, 0755); | |
1023 | } | |
1024 | if (r < 0 && r != -EEXIST) | |
1025 | return r; | |
1026 | } | |
1027 | ||
1028 | if (!FLAGS_SET(test_run_flags, MANAGER_TEST_DONT_OPEN_EXECUTOR)) { | |
1029 | m->executor_fd = pin_callout_binary(SYSTEMD_EXECUTOR_BINARY_PATH, &m->executor_path); | |
1030 | if (m->executor_fd < 0) | |
1031 | return log_debug_errno(m->executor_fd, "Failed to pin executor binary: %m"); | |
1032 | ||
1033 | log_debug("Using systemd-executor binary from '%s'.", m->executor_path); | |
1034 | } | |
1035 | ||
1036 | /* Note that we do not set up the notify fd here. We do that after deserialization, | |
1037 | * since they might have gotten serialized across the reexec. */ | |
1038 | ||
1039 | *ret = TAKE_PTR(m); | |
1040 | ||
1041 | return 0; | |
1042 | } | |
1043 | ||
1044 | static int manager_setup_notify(Manager *m) { | |
1045 | int r; | |
1046 | ||
1047 | if (MANAGER_IS_TEST_RUN(m)) | |
1048 | return 0; | |
1049 | ||
1050 | if (m->notify_fd < 0) { | |
1051 | _cleanup_close_ int fd = -EBADF; | |
1052 | union sockaddr_union sa; | |
1053 | socklen_t sa_len; | |
1054 | ||
1055 | /* First free all secondary fields */ | |
1056 | m->notify_socket = mfree(m->notify_socket); | |
1057 | m->notify_event_source = sd_event_source_disable_unref(m->notify_event_source); | |
1058 | ||
1059 | fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0); | |
1060 | if (fd < 0) | |
1061 | return log_error_errno(errno, "Failed to allocate notification socket: %m"); | |
1062 | ||
1063 | (void) fd_increase_rxbuf(fd, MANAGER_SOCKET_RCVBUF_SIZE); | |
1064 | ||
1065 | m->notify_socket = path_join(m->prefix[EXEC_DIRECTORY_RUNTIME], "systemd/notify"); | |
1066 | if (!m->notify_socket) | |
1067 | return log_oom(); | |
1068 | ||
1069 | r = sockaddr_un_set_path(&sa.un, m->notify_socket); | |
1070 | if (r < 0) | |
1071 | return log_error_errno(r, "Notify socket '%s' not valid for AF_UNIX socket address, refusing.", | |
1072 | m->notify_socket); | |
1073 | sa_len = r; | |
1074 | ||
1075 | (void) sockaddr_un_unlink(&sa.un); | |
1076 | ||
1077 | r = mac_selinux_bind(fd, &sa.sa, sa_len); | |
1078 | if (r < 0) | |
1079 | return log_error_errno(r, "Failed to bind notify fd to '%s': %m", m->notify_socket); | |
1080 | ||
1081 | r = setsockopt_int(fd, SOL_SOCKET, SO_PASSCRED, true); | |
1082 | if (r < 0) | |
1083 | return log_error_errno(r, "Failed to enable SO_PASSCRED for notify socket: %m"); | |
1084 | ||
1085 | // TODO: enforce SO_PASSPIDFD when our baseline of the kernel version is bumped to >= 6.5. | |
1086 | r = setsockopt_int(fd, SOL_SOCKET, SO_PASSPIDFD, true); | |
1087 | if (r < 0 && r != -ENOPROTOOPT) | |
1088 | log_warning_errno(r, "Failed to enable SO_PASSPIDFD for notify socket, ignoring: %m"); | |
1089 | ||
1090 | m->notify_fd = TAKE_FD(fd); | |
1091 | ||
1092 | log_debug("Using notification socket %s", m->notify_socket); | |
1093 | } | |
1094 | ||
1095 | if (!m->notify_event_source) { | |
1096 | r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLIN, manager_dispatch_notify_fd, m); | |
1097 | if (r < 0) | |
1098 | return log_error_errno(r, "Failed to allocate notify event source: %m"); | |
1099 | ||
1100 | /* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which | |
1101 | * service an exit message belongs. */ | |
1102 | r = sd_event_source_set_priority(m->notify_event_source, EVENT_PRIORITY_NOTIFY); | |
1103 | if (r < 0) | |
1104 | return log_error_errno(r, "Failed to set priority of notify event source: %m"); | |
1105 | ||
1106 | (void) sd_event_source_set_description(m->notify_event_source, "manager-notify"); | |
1107 | } | |
1108 | ||
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | static int manager_setup_user_lookup_fd(Manager *m) { | |
1113 | int r; | |
1114 | ||
1115 | assert(m); | |
1116 | ||
1117 | /* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID | |
1118 | * 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation, | |
1119 | * and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked | |
1120 | * off processes right before executing the binaries to start. In order to be able to clean up any IPC objects | |
1121 | * created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes, | |
1122 | * hence we establish this communication channel so that forked off processes can pass their UID/GID | |
1123 | * information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple | |
1124 | * datagram, along with their unit name, so that we can share one communication socket pair among all units for | |
1125 | * this purpose. | |
1126 | * | |
1127 | * You might wonder why we need a communication channel for this that is independent of the usual notification | |
1128 | * socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET | |
1129 | * channel is only accepted if it originates from the right unit and if reception was enabled for it. The user | |
1130 | * lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available. | |
1131 | * | |
1132 | * Note that this function is called under two circumstances: when we first initialize (in which case we | |
1133 | * allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload | |
1134 | * (in which case the socket pair already exists but we still need to allocate the event source for it). */ | |
1135 | ||
1136 | if (m->user_lookup_fds[0] < 0) { | |
1137 | ||
1138 | /* Free all secondary fields */ | |
1139 | safe_close_pair(m->user_lookup_fds); | |
1140 | m->user_lookup_event_source = sd_event_source_disable_unref(m->user_lookup_event_source); | |
1141 | ||
1142 | if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0) | |
1143 | return log_error_errno(errno, "Failed to allocate user lookup socket: %m"); | |
1144 | ||
1145 | r = setsockopt_int(m->user_lookup_fds[0], SOL_SOCKET, SO_PASSRIGHTS, false); | |
1146 | if (r < 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r)) | |
1147 | log_warning_errno(r, "Failed to turn off SO_PASSRIGHTS on user lookup socket, ignoring: %m"); | |
1148 | ||
1149 | (void) fd_increase_rxbuf(m->user_lookup_fds[0], MANAGER_SOCKET_RCVBUF_SIZE); | |
1150 | } | |
1151 | ||
1152 | if (!m->user_lookup_event_source) { | |
1153 | r = sd_event_add_io(m->event, &m->user_lookup_event_source, m->user_lookup_fds[0], EPOLLIN, manager_dispatch_user_lookup_fd, m); | |
1154 | if (r < 0) | |
1155 | return log_error_errno(r, "Failed to allocate user lookup event source: %m"); | |
1156 | ||
1157 | /* Process even earlier than the notify event source, so that we always know first about valid UID/GID | |
1158 | * resolutions */ | |
1159 | r = sd_event_source_set_priority(m->user_lookup_event_source, EVENT_PRIORITY_USER_LOOKUP); | |
1160 | if (r < 0) | |
1161 | return log_error_errno(r, "Failed to set priority of user lookup event source: %m"); | |
1162 | ||
1163 | (void) sd_event_source_set_description(m->user_lookup_event_source, "user-lookup"); | |
1164 | } | |
1165 | ||
1166 | return 0; | |
1167 | } | |
1168 | ||
1169 | static int manager_setup_handoff_timestamp_fd(Manager *m) { | |
1170 | int r; | |
1171 | ||
1172 | assert(m); | |
1173 | ||
1174 | /* Set up the socket pair used for passing timestamps back when the executor processes we fork | |
1175 | * off invokes execve(), i.e. when we hand off control to our payload processes. */ | |
1176 | ||
1177 | if (m->handoff_timestamp_fds[0] < 0) { | |
1178 | m->handoff_timestamp_event_source = sd_event_source_disable_unref(m->handoff_timestamp_event_source); | |
1179 | safe_close_pair(m->handoff_timestamp_fds); | |
1180 | ||
1181 | if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->handoff_timestamp_fds) < 0) | |
1182 | return log_error_errno(errno, "Failed to allocate handoff timestamp socket: %m"); | |
1183 | ||
1184 | /* Make sure children never have to block */ | |
1185 | (void) fd_increase_rxbuf(m->handoff_timestamp_fds[0], MANAGER_SOCKET_RCVBUF_SIZE); | |
1186 | ||
1187 | r = setsockopt_int(m->handoff_timestamp_fds[0], SOL_SOCKET, SO_PASSCRED, true); | |
1188 | if (r < 0) | |
1189 | return log_error_errno(r, "Failed to enable SO_PASSCRED on handoff timestamp socket: %m"); | |
1190 | ||
1191 | r = setsockopt_int(m->handoff_timestamp_fds[0], SOL_SOCKET, SO_PASSRIGHTS, false); | |
1192 | if (r < 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r)) | |
1193 | log_warning_errno(r, "Failed to turn off SO_PASSRIGHTS on handoff timestamp socket, ignoring: %m"); | |
1194 | ||
1195 | /* Mark the receiving socket as O_NONBLOCK (but leave sending side as-is) */ | |
1196 | r = fd_nonblock(m->handoff_timestamp_fds[0], true); | |
1197 | if (r < 0) | |
1198 | return log_error_errno(r, "Failed to make handoff timestamp socket O_NONBLOCK: %m"); | |
1199 | } | |
1200 | ||
1201 | if (!m->handoff_timestamp_event_source) { | |
1202 | r = sd_event_add_io(m->event, &m->handoff_timestamp_event_source, m->handoff_timestamp_fds[0], EPOLLIN, manager_dispatch_handoff_timestamp_fd, m); | |
1203 | if (r < 0) | |
1204 | return log_error_errno(r, "Failed to allocate handoff timestamp event source: %m"); | |
1205 | ||
1206 | r = sd_event_source_set_priority(m->handoff_timestamp_event_source, EVENT_PRIORITY_HANDOFF_TIMESTAMP); | |
1207 | if (r < 0) | |
1208 | return log_error_errno(r, "Failed to set priority of handoff timestamp event source: %m"); | |
1209 | ||
1210 | (void) sd_event_source_set_description(m->handoff_timestamp_event_source, "handoff-timestamp"); | |
1211 | } | |
1212 | ||
1213 | return 0; | |
1214 | } | |
1215 | ||
1216 | static int manager_setup_pidref_transport_fd(Manager *m) { | |
1217 | int r; | |
1218 | ||
1219 | assert(m); | |
1220 | ||
1221 | /* Set up the socket pair used for passing parent and child pidrefs back when the executor unshares | |
1222 | * a PID namespace and forks again when using PrivatePIDs=yes. */ | |
1223 | ||
1224 | if (m->pidref_transport_fds[0] < 0) { | |
1225 | m->pidref_event_source = sd_event_source_disable_unref(m->pidref_event_source); | |
1226 | safe_close_pair(m->pidref_transport_fds); | |
1227 | ||
1228 | if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->pidref_transport_fds) < 0) | |
1229 | return log_error_errno(errno, "Failed to allocate pidref socket: %m"); | |
1230 | ||
1231 | /* Make sure children never have to block */ | |
1232 | (void) fd_increase_rxbuf(m->pidref_transport_fds[0], MANAGER_SOCKET_RCVBUF_SIZE); | |
1233 | ||
1234 | r = setsockopt_int(m->pidref_transport_fds[0], SOL_SOCKET, SO_PASSCRED, true); | |
1235 | if (r < 0) | |
1236 | return log_error_errno(r, "Failed to enable SO_PASSCRED for pidref socket: %m"); | |
1237 | ||
1238 | r = setsockopt_int(m->pidref_transport_fds[0], SOL_SOCKET, SO_PASSPIDFD, true); | |
1239 | if (ERRNO_IS_NEG_NOT_SUPPORTED(r)) | |
1240 | log_debug_errno(r, "SO_PASSPIDFD is not supported for pidref socket, ignoring."); | |
1241 | else if (r < 0) | |
1242 | log_warning_errno(r, "Failed to enable SO_PASSPIDFD for pidref socket, ignoring: %m"); | |
1243 | ||
1244 | /* Mark the receiving socket as O_NONBLOCK (but leave sending side as-is) */ | |
1245 | r = fd_nonblock(m->pidref_transport_fds[0], true); | |
1246 | if (r < 0) | |
1247 | return log_error_errno(r, "Failed to make pidref socket O_NONBLOCK: %m"); | |
1248 | } | |
1249 | ||
1250 | if (!m->pidref_event_source) { | |
1251 | r = sd_event_add_io(m->event, &m->pidref_event_source, m->pidref_transport_fds[0], EPOLLIN, manager_dispatch_pidref_transport_fd, m); | |
1252 | if (r < 0) | |
1253 | return log_error_errno(r, "Failed to allocate pidref event source: %m"); | |
1254 | ||
1255 | r = sd_event_source_set_priority(m->pidref_event_source, EVENT_PRIORITY_PIDREF); | |
1256 | if (r < 0) | |
1257 | return log_error_errno(r, "Failed to set priority of pidref event source: %m"); | |
1258 | ||
1259 | (void) sd_event_source_set_description(m->pidref_event_source, "pidref"); | |
1260 | } | |
1261 | ||
1262 | return 0; | |
1263 | } | |
1264 | ||
1265 | static unsigned manager_dispatch_cleanup_queue(Manager *m) { | |
1266 | Unit *u; | |
1267 | unsigned n = 0; | |
1268 | ||
1269 | assert(m); | |
1270 | ||
1271 | while ((u = m->cleanup_queue)) { | |
1272 | assert(u->in_cleanup_queue); | |
1273 | ||
1274 | unit_free(u); | |
1275 | n++; | |
1276 | } | |
1277 | ||
1278 | return n; | |
1279 | } | |
1280 | ||
1281 | static unsigned manager_dispatch_release_resources_queue(Manager *m) { | |
1282 | unsigned n = 0; | |
1283 | Unit *u; | |
1284 | ||
1285 | assert(m); | |
1286 | ||
1287 | while ((u = LIST_POP(release_resources_queue, m->release_resources_queue))) { | |
1288 | assert(u->in_release_resources_queue); | |
1289 | u->in_release_resources_queue = false; | |
1290 | ||
1291 | n++; | |
1292 | ||
1293 | unit_release_resources(u); | |
1294 | } | |
1295 | ||
1296 | return n; | |
1297 | } | |
1298 | ||
1299 | enum { | |
1300 | GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */ | |
1301 | GC_OFFSET_UNSURE, /* No clue */ | |
1302 | GC_OFFSET_GOOD, /* We still need this unit */ | |
1303 | GC_OFFSET_BAD, /* We don't need this unit anymore */ | |
1304 | _GC_OFFSET_MAX | |
1305 | }; | |
1306 | ||
1307 | static void unit_gc_mark_good(Unit *u, unsigned gc_marker) { | |
1308 | Unit *other; | |
1309 | ||
1310 | u->gc_marker = gc_marker + GC_OFFSET_GOOD; | |
1311 | ||
1312 | /* Recursively mark referenced units as GOOD as well */ | |
1313 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCES) | |
1314 | if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE) | |
1315 | unit_gc_mark_good(other, gc_marker); | |
1316 | } | |
1317 | ||
1318 | static void unit_gc_sweep(Unit *u, unsigned gc_marker) { | |
1319 | Unit *other; | |
1320 | bool is_bad; | |
1321 | ||
1322 | assert(u); | |
1323 | ||
1324 | if (IN_SET(u->gc_marker - gc_marker, | |
1325 | GC_OFFSET_GOOD, GC_OFFSET_BAD, GC_OFFSET_UNSURE, GC_OFFSET_IN_PATH)) | |
1326 | return; | |
1327 | ||
1328 | if (u->in_cleanup_queue) | |
1329 | goto bad; | |
1330 | ||
1331 | if (!unit_may_gc(u)) | |
1332 | goto good; | |
1333 | ||
1334 | u->gc_marker = gc_marker + GC_OFFSET_IN_PATH; | |
1335 | ||
1336 | is_bad = true; | |
1337 | ||
1338 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCED_BY) { | |
1339 | unit_gc_sweep(other, gc_marker); | |
1340 | ||
1341 | if (other->gc_marker == gc_marker + GC_OFFSET_GOOD) | |
1342 | goto good; | |
1343 | ||
1344 | if (other->gc_marker != gc_marker + GC_OFFSET_BAD) | |
1345 | is_bad = false; | |
1346 | } | |
1347 | ||
1348 | LIST_FOREACH(refs_by_target, ref, u->refs_by_target) { | |
1349 | unit_gc_sweep(ref->source, gc_marker); | |
1350 | ||
1351 | if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD) | |
1352 | goto good; | |
1353 | ||
1354 | if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD) | |
1355 | is_bad = false; | |
1356 | } | |
1357 | ||
1358 | if (is_bad) | |
1359 | goto bad; | |
1360 | ||
1361 | /* We were unable to find anything out about this entry, so | |
1362 | * let's investigate it later */ | |
1363 | u->gc_marker = gc_marker + GC_OFFSET_UNSURE; | |
1364 | unit_add_to_gc_queue(u); | |
1365 | return; | |
1366 | ||
1367 | bad: | |
1368 | /* We definitely know that this one is not useful anymore, so | |
1369 | * let's mark it for deletion */ | |
1370 | u->gc_marker = gc_marker + GC_OFFSET_BAD; | |
1371 | unit_add_to_cleanup_queue(u); | |
1372 | return; | |
1373 | ||
1374 | good: | |
1375 | unit_gc_mark_good(u, gc_marker); | |
1376 | } | |
1377 | ||
1378 | static unsigned manager_dispatch_gc_unit_queue(Manager *m) { | |
1379 | unsigned n = 0, gc_marker; | |
1380 | ||
1381 | assert(m); | |
1382 | ||
1383 | /* log_debug("Running GC..."); */ | |
1384 | ||
1385 | m->gc_marker += _GC_OFFSET_MAX; | |
1386 | if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX) | |
1387 | m->gc_marker = 1; | |
1388 | ||
1389 | gc_marker = m->gc_marker; | |
1390 | ||
1391 | Unit *u; | |
1392 | while ((u = m->gc_unit_queue)) { | |
1393 | assert(u->in_gc_queue); | |
1394 | ||
1395 | unit_gc_sweep(u, gc_marker); | |
1396 | ||
1397 | LIST_REMOVE(gc_queue, m->gc_unit_queue, u); | |
1398 | u->in_gc_queue = false; | |
1399 | ||
1400 | n++; | |
1401 | ||
1402 | if (IN_SET(u->gc_marker - gc_marker, | |
1403 | GC_OFFSET_BAD, GC_OFFSET_UNSURE)) { | |
1404 | if (u->id) | |
1405 | log_unit_debug(u, "Collecting."); | |
1406 | u->gc_marker = gc_marker + GC_OFFSET_BAD; | |
1407 | unit_add_to_cleanup_queue(u); | |
1408 | } | |
1409 | } | |
1410 | ||
1411 | return n; | |
1412 | } | |
1413 | ||
1414 | static unsigned manager_dispatch_gc_job_queue(Manager *m) { | |
1415 | unsigned n = 0; | |
1416 | Job *j; | |
1417 | ||
1418 | assert(m); | |
1419 | ||
1420 | while ((j = LIST_POP(gc_queue, m->gc_job_queue))) { | |
1421 | assert(j->in_gc_queue); | |
1422 | j->in_gc_queue = false; | |
1423 | ||
1424 | n++; | |
1425 | ||
1426 | if (!job_may_gc(j)) | |
1427 | continue; | |
1428 | ||
1429 | log_unit_debug(j->unit, "Collecting job."); | |
1430 | (void) job_finish_and_invalidate(j, JOB_COLLECTED, false, false); | |
1431 | } | |
1432 | ||
1433 | return n; | |
1434 | } | |
1435 | ||
1436 | static int manager_ratelimit_requeue(sd_event_source *s, uint64_t usec, void *userdata) { | |
1437 | Unit *u = userdata; | |
1438 | ||
1439 | assert(u); | |
1440 | assert(s == u->auto_start_stop_event_source); | |
1441 | ||
1442 | u->auto_start_stop_event_source = sd_event_source_unref(u->auto_start_stop_event_source); | |
1443 | ||
1444 | /* Re-queue to all queues, if the rate limit hit we might have been throttled on any of them. */ | |
1445 | unit_submit_to_stop_when_unneeded_queue(u); | |
1446 | unit_submit_to_start_when_upheld_queue(u); | |
1447 | unit_submit_to_stop_when_bound_queue(u); | |
1448 | ||
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | static int manager_ratelimit_check_and_queue(Unit *u) { | |
1453 | int r; | |
1454 | ||
1455 | assert(u); | |
1456 | ||
1457 | if (ratelimit_below(&u->auto_start_stop_ratelimit)) | |
1458 | return 1; | |
1459 | ||
1460 | /* Already queued, no need to requeue */ | |
1461 | if (u->auto_start_stop_event_source) | |
1462 | return 0; | |
1463 | ||
1464 | r = sd_event_add_time( | |
1465 | u->manager->event, | |
1466 | &u->auto_start_stop_event_source, | |
1467 | CLOCK_MONOTONIC, | |
1468 | ratelimit_end(&u->auto_start_stop_ratelimit), | |
1469 | 0, | |
1470 | manager_ratelimit_requeue, | |
1471 | u); | |
1472 | if (r < 0) | |
1473 | return log_unit_error_errno(u, r, "Failed to queue timer on event loop: %m"); | |
1474 | ||
1475 | return 0; | |
1476 | } | |
1477 | ||
1478 | static unsigned manager_dispatch_stop_when_unneeded_queue(Manager *m) { | |
1479 | unsigned n = 0; | |
1480 | Unit *u; | |
1481 | int r; | |
1482 | ||
1483 | assert(m); | |
1484 | ||
1485 | while ((u = LIST_POP(stop_when_unneeded_queue, m->stop_when_unneeded_queue))) { | |
1486 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
1487 | ||
1488 | assert(u->in_stop_when_unneeded_queue); | |
1489 | u->in_stop_when_unneeded_queue = false; | |
1490 | ||
1491 | n++; | |
1492 | ||
1493 | if (!unit_is_unneeded(u)) | |
1494 | continue; | |
1495 | ||
1496 | log_unit_debug(u, "Unit is not needed anymore."); | |
1497 | ||
1498 | /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the | |
1499 | * service being unnecessary after a while. */ | |
1500 | ||
1501 | r = manager_ratelimit_check_and_queue(u); | |
1502 | if (r <= 0) { | |
1503 | log_unit_warning(u, | |
1504 | "Unit not needed anymore, but not stopping since we tried this too often recently.%s", | |
1505 | r == 0 ? " Will retry later." : ""); | |
1506 | continue; | |
1507 | } | |
1508 | ||
1509 | /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */ | |
1510 | r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, /* ret = */ NULL); | |
1511 | if (r < 0) | |
1512 | log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r)); | |
1513 | } | |
1514 | ||
1515 | return n; | |
1516 | } | |
1517 | ||
1518 | static unsigned manager_dispatch_start_when_upheld_queue(Manager *m) { | |
1519 | unsigned n = 0; | |
1520 | Unit *u; | |
1521 | int r; | |
1522 | ||
1523 | assert(m); | |
1524 | ||
1525 | while ((u = LIST_POP(start_when_upheld_queue, m->start_when_upheld_queue))) { | |
1526 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
1527 | Unit *culprit = NULL; | |
1528 | ||
1529 | assert(u->in_start_when_upheld_queue); | |
1530 | u->in_start_when_upheld_queue = false; | |
1531 | ||
1532 | n++; | |
1533 | ||
1534 | if (!unit_is_upheld_by_active(u, &culprit)) | |
1535 | continue; | |
1536 | ||
1537 | log_unit_debug(u, "Unit is started because upheld by active unit %s.", culprit->id); | |
1538 | ||
1539 | /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the | |
1540 | * service being unnecessary after a while. */ | |
1541 | ||
1542 | r = manager_ratelimit_check_and_queue(u); | |
1543 | if (r <= 0) { | |
1544 | log_unit_warning(u, | |
1545 | "Unit needs to be started because active unit %s upholds it, but not starting since we tried this too often recently.%s", | |
1546 | culprit->id, | |
1547 | r == 0 ? " Will retry later." : ""); | |
1548 | continue; | |
1549 | } | |
1550 | ||
1551 | r = manager_add_job(u->manager, JOB_START, u, JOB_FAIL, &error, /* ret = */ NULL); | |
1552 | if (r < 0) | |
1553 | log_unit_warning_errno(u, r, "Failed to enqueue start job, ignoring: %s", bus_error_message(&error, r)); | |
1554 | } | |
1555 | ||
1556 | return n; | |
1557 | } | |
1558 | ||
1559 | static unsigned manager_dispatch_stop_when_bound_queue(Manager *m) { | |
1560 | unsigned n = 0; | |
1561 | Unit *u; | |
1562 | int r; | |
1563 | ||
1564 | assert(m); | |
1565 | ||
1566 | while ((u = LIST_POP(stop_when_bound_queue, m->stop_when_bound_queue))) { | |
1567 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
1568 | Unit *culprit = NULL; | |
1569 | ||
1570 | assert(u->in_stop_when_bound_queue); | |
1571 | u->in_stop_when_bound_queue = false; | |
1572 | ||
1573 | n++; | |
1574 | ||
1575 | if (!unit_is_bound_by_inactive(u, &culprit)) | |
1576 | continue; | |
1577 | ||
1578 | log_unit_debug(u, "Unit is stopped because bound to inactive unit %s.", culprit->id); | |
1579 | ||
1580 | /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the | |
1581 | * service being unnecessary after a while. */ | |
1582 | ||
1583 | r = manager_ratelimit_check_and_queue(u); | |
1584 | if (r <= 0) { | |
1585 | log_unit_warning(u, | |
1586 | "Unit needs to be stopped because it is bound to inactive unit %s it, but not stopping since we tried this too often recently.%s", | |
1587 | culprit->id, | |
1588 | r == 0 ? " Will retry later." : ""); | |
1589 | continue; | |
1590 | } | |
1591 | ||
1592 | r = manager_add_job(u->manager, JOB_STOP, u, JOB_REPLACE, &error, /* ret = */ NULL); | |
1593 | if (r < 0) | |
1594 | log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r)); | |
1595 | } | |
1596 | ||
1597 | return n; | |
1598 | } | |
1599 | ||
1600 | static unsigned manager_dispatch_stop_notify_queue(Manager *m) { | |
1601 | unsigned n = 0; | |
1602 | ||
1603 | assert(m); | |
1604 | ||
1605 | if (m->may_dispatch_stop_notify_queue < 0) | |
1606 | m->may_dispatch_stop_notify_queue = hashmap_isempty(m->jobs); | |
1607 | ||
1608 | if (!m->may_dispatch_stop_notify_queue) | |
1609 | return 0; | |
1610 | ||
1611 | m->may_dispatch_stop_notify_queue = false; | |
1612 | ||
1613 | LIST_FOREACH(stop_notify_queue, u, m->stop_notify_queue) { | |
1614 | assert(u->in_stop_notify_queue); | |
1615 | ||
1616 | assert(UNIT_VTABLE(u)->stop_notify); | |
1617 | if (UNIT_VTABLE(u)->stop_notify(u)) { | |
1618 | assert(!u->in_stop_notify_queue); | |
1619 | n++; | |
1620 | } | |
1621 | } | |
1622 | ||
1623 | return n; | |
1624 | } | |
1625 | ||
1626 | static void manager_clear_jobs_and_units(Manager *m) { | |
1627 | Unit *u; | |
1628 | ||
1629 | assert(m); | |
1630 | ||
1631 | while ((u = hashmap_first(m->units))) | |
1632 | unit_free(u); | |
1633 | ||
1634 | manager_dispatch_cleanup_queue(m); | |
1635 | ||
1636 | assert(!m->load_queue); | |
1637 | assert(prioq_isempty(m->run_queue)); | |
1638 | assert(!m->dbus_unit_queue); | |
1639 | assert(!m->dbus_job_queue); | |
1640 | assert(!m->cleanup_queue); | |
1641 | assert(!m->gc_unit_queue); | |
1642 | assert(!m->gc_job_queue); | |
1643 | assert(!m->cgroup_realize_queue); | |
1644 | assert(!m->cgroup_empty_queue); | |
1645 | assert(!m->cgroup_oom_queue); | |
1646 | assert(!m->target_deps_queue); | |
1647 | assert(!m->stop_when_unneeded_queue); | |
1648 | assert(!m->start_when_upheld_queue); | |
1649 | assert(!m->stop_when_bound_queue); | |
1650 | assert(!m->release_resources_queue); | |
1651 | ||
1652 | assert(hashmap_isempty(m->jobs)); | |
1653 | assert(hashmap_isempty(m->units)); | |
1654 | assert(hashmap_isempty(m->units_by_invocation_id)); | |
1655 | ||
1656 | m->n_on_console = 0; | |
1657 | m->n_running_jobs = 0; | |
1658 | m->n_installed_jobs = 0; | |
1659 | m->n_failed_jobs = 0; | |
1660 | } | |
1661 | ||
1662 | Manager* manager_free(Manager *m) { | |
1663 | if (!m) | |
1664 | return NULL; | |
1665 | ||
1666 | manager_clear_jobs_and_units(m); | |
1667 | ||
1668 | for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) | |
1669 | if (unit_vtable[c]->shutdown) | |
1670 | unit_vtable[c]->shutdown(m); | |
1671 | ||
1672 | /* Keep the cgroup hierarchy in place except when we know we are going down for good */ | |
1673 | manager_shutdown_cgroup(m, /* delete= */ IN_SET(m->objective, MANAGER_EXIT, MANAGER_REBOOT, MANAGER_POWEROFF, MANAGER_HALT, MANAGER_KEXEC)); | |
1674 | ||
1675 | lookup_paths_flush_generator(&m->lookup_paths); | |
1676 | ||
1677 | bus_done(m); | |
1678 | manager_varlink_done(m); | |
1679 | ||
1680 | exec_shared_runtime_vacuum(m); | |
1681 | hashmap_free(m->exec_shared_runtime_by_id); | |
1682 | ||
1683 | dynamic_user_vacuum(m, false); | |
1684 | hashmap_free(m->dynamic_users); | |
1685 | ||
1686 | hashmap_free(m->units); | |
1687 | hashmap_free(m->units_by_invocation_id); | |
1688 | hashmap_free(m->jobs); | |
1689 | hashmap_free(m->watch_pids); | |
1690 | hashmap_free(m->watch_pids_more); | |
1691 | hashmap_free(m->watch_bus); | |
1692 | ||
1693 | prioq_free(m->run_queue); | |
1694 | ||
1695 | set_free(m->startup_units); | |
1696 | set_free(m->failed_units); | |
1697 | ||
1698 | sd_event_source_unref(m->signal_event_source); | |
1699 | sd_event_source_unref(m->sigchld_event_source); | |
1700 | sd_event_source_unref(m->notify_event_source); | |
1701 | sd_event_source_unref(m->time_change_event_source); | |
1702 | sd_event_source_unref(m->timezone_change_event_source); | |
1703 | sd_event_source_unref(m->jobs_in_progress_event_source); | |
1704 | sd_event_source_unref(m->run_queue_event_source); | |
1705 | sd_event_source_unref(m->user_lookup_event_source); | |
1706 | sd_event_source_unref(m->handoff_timestamp_event_source); | |
1707 | sd_event_source_unref(m->pidref_event_source); | |
1708 | sd_event_source_unref(m->memory_pressure_event_source); | |
1709 | ||
1710 | safe_close(m->signal_fd); | |
1711 | safe_close(m->notify_fd); | |
1712 | safe_close_pair(m->user_lookup_fds); | |
1713 | safe_close_pair(m->handoff_timestamp_fds); | |
1714 | safe_close_pair(m->pidref_transport_fds); | |
1715 | ||
1716 | manager_close_ask_password(m); | |
1717 | ||
1718 | manager_close_idle_pipe(m); | |
1719 | ||
1720 | sd_event_unref(m->event); | |
1721 | ||
1722 | free(m->notify_socket); | |
1723 | ||
1724 | lookup_paths_done(&m->lookup_paths); | |
1725 | strv_free(m->transient_environment); | |
1726 | strv_free(m->client_environment); | |
1727 | ||
1728 | hashmap_free(m->cgroup_unit); | |
1729 | manager_free_unit_name_maps(m); | |
1730 | ||
1731 | free(m->switch_root); | |
1732 | free(m->switch_root_init); | |
1733 | ||
1734 | sd_bus_track_unref(m->subscribed); | |
1735 | strv_free(m->subscribed_as_strv); | |
1736 | ||
1737 | unit_defaults_done(&m->defaults); | |
1738 | ||
1739 | FOREACH_ARRAY(map, m->units_needing_mounts_for, _UNIT_MOUNT_DEPENDENCY_TYPE_MAX) { | |
1740 | assert(hashmap_isempty(*map)); | |
1741 | hashmap_free(*map); | |
1742 | } | |
1743 | ||
1744 | hashmap_free(m->uid_refs); | |
1745 | hashmap_free(m->gid_refs); | |
1746 | ||
1747 | FOREACH_ARRAY(i, m->prefix, _EXEC_DIRECTORY_TYPE_MAX) | |
1748 | free(*i); | |
1749 | ||
1750 | free(m->received_credentials_directory); | |
1751 | free(m->received_encrypted_credentials_directory); | |
1752 | ||
1753 | free(m->watchdog_pretimeout_governor); | |
1754 | free(m->watchdog_pretimeout_governor_overridden); | |
1755 | ||
1756 | fw_ctx_free(m->fw_ctx); | |
1757 | ||
1758 | #if BPF_FRAMEWORK | |
1759 | bpf_restrict_fs_destroy(m->restrict_fs); | |
1760 | #endif | |
1761 | ||
1762 | safe_close(m->executor_fd); | |
1763 | free(m->executor_path); | |
1764 | ||
1765 | return mfree(m); | |
1766 | } | |
1767 | ||
1768 | static void manager_enumerate_perpetual(Manager *m) { | |
1769 | assert(m); | |
1770 | ||
1771 | if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL)) | |
1772 | return; | |
1773 | ||
1774 | /* Let's ask every type to load all units from disk/kernel that it might know */ | |
1775 | for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) { | |
1776 | if (!unit_type_supported(c)) { | |
1777 | log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c)); | |
1778 | continue; | |
1779 | } | |
1780 | ||
1781 | if (unit_vtable[c]->enumerate_perpetual) | |
1782 | unit_vtable[c]->enumerate_perpetual(m); | |
1783 | } | |
1784 | } | |
1785 | ||
1786 | static void manager_enumerate(Manager *m) { | |
1787 | assert(m); | |
1788 | ||
1789 | if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL)) | |
1790 | return; | |
1791 | ||
1792 | /* Let's ask every type to load all units from disk/kernel that it might know */ | |
1793 | for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) { | |
1794 | if (!unit_type_supported(c)) { | |
1795 | log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c)); | |
1796 | continue; | |
1797 | } | |
1798 | ||
1799 | if (unit_vtable[c]->enumerate) | |
1800 | unit_vtable[c]->enumerate(m); | |
1801 | } | |
1802 | ||
1803 | manager_dispatch_load_queue(m); | |
1804 | } | |
1805 | ||
1806 | static void manager_coldplug(Manager *m) { | |
1807 | Unit *u; | |
1808 | char *k; | |
1809 | int r; | |
1810 | ||
1811 | assert(m); | |
1812 | ||
1813 | log_debug("Invoking unit coldplug() handlers%s", glyph(GLYPH_ELLIPSIS)); | |
1814 | ||
1815 | /* Let's place the units back into their deserialized state */ | |
1816 | HASHMAP_FOREACH_KEY(u, k, m->units) { | |
1817 | ||
1818 | /* ignore aliases */ | |
1819 | if (u->id != k) | |
1820 | continue; | |
1821 | ||
1822 | r = unit_coldplug(u); | |
1823 | if (r < 0) | |
1824 | log_warning_errno(r, "We couldn't coldplug %s, proceeding anyway: %m", u->id); | |
1825 | } | |
1826 | } | |
1827 | ||
1828 | static void manager_catchup(Manager *m) { | |
1829 | Unit *u; | |
1830 | char *k; | |
1831 | ||
1832 | assert(m); | |
1833 | ||
1834 | log_debug("Invoking unit catchup() handlers%s", glyph(GLYPH_ELLIPSIS)); | |
1835 | ||
1836 | /* Let's catch up on any state changes that happened while we were reloading/reexecing */ | |
1837 | HASHMAP_FOREACH_KEY(u, k, m->units) { | |
1838 | ||
1839 | /* ignore aliases */ | |
1840 | if (u->id != k) | |
1841 | continue; | |
1842 | ||
1843 | unit_catchup(u); | |
1844 | } | |
1845 | } | |
1846 | ||
1847 | static void manager_distribute_fds(Manager *m, FDSet *fds) { | |
1848 | Unit *u; | |
1849 | ||
1850 | assert(m); | |
1851 | ||
1852 | HASHMAP_FOREACH(u, m->units) { | |
1853 | ||
1854 | if (fdset_isempty(fds)) | |
1855 | break; | |
1856 | ||
1857 | if (!UNIT_VTABLE(u)->distribute_fds) | |
1858 | continue; | |
1859 | ||
1860 | UNIT_VTABLE(u)->distribute_fds(u, fds); | |
1861 | } | |
1862 | } | |
1863 | ||
1864 | static bool manager_dbus_is_running(Manager *m, bool deserialized) { | |
1865 | Unit *u; | |
1866 | ||
1867 | assert(m); | |
1868 | ||
1869 | /* This checks whether the dbus instance we are supposed to expose our APIs on is up. We check both the socket | |
1870 | * and the service unit. If the 'deserialized' parameter is true we'll check the deserialized state of the unit | |
1871 | * rather than the current one. */ | |
1872 | ||
1873 | if (MANAGER_IS_TEST_RUN(m)) | |
1874 | return false; | |
1875 | ||
1876 | u = manager_get_unit(m, SPECIAL_DBUS_SOCKET); | |
1877 | if (!u) | |
1878 | return false; | |
1879 | if ((deserialized ? SOCKET(u)->deserialized_state : SOCKET(u)->state) != SOCKET_RUNNING) | |
1880 | return false; | |
1881 | ||
1882 | u = manager_get_unit(m, SPECIAL_DBUS_SERVICE); | |
1883 | if (!u) | |
1884 | return false; | |
1885 | if (!IN_SET((deserialized ? SERVICE(u)->deserialized_state : SERVICE(u)->state), | |
1886 | SERVICE_RUNNING, | |
1887 | SERVICE_MOUNTING, | |
1888 | SERVICE_RELOAD, | |
1889 | SERVICE_RELOAD_NOTIFY, | |
1890 | SERVICE_REFRESH_EXTENSIONS, | |
1891 | SERVICE_RELOAD_SIGNAL)) | |
1892 | return false; | |
1893 | ||
1894 | return true; | |
1895 | } | |
1896 | ||
1897 | static void manager_setup_bus(Manager *m) { | |
1898 | assert(m); | |
1899 | ||
1900 | if (MANAGER_IS_TEST_RUN(m)) | |
1901 | return; | |
1902 | ||
1903 | /* Let's set up our private bus connection now, unconditionally */ | |
1904 | (void) bus_init_private(m); | |
1905 | ||
1906 | /* If we are in --user mode also connect to the system bus now */ | |
1907 | if (MANAGER_IS_USER(m)) | |
1908 | (void) bus_init_system(m); | |
1909 | ||
1910 | /* Let's connect to the bus now, but only if the unit is supposed to be up */ | |
1911 | if (manager_dbus_is_running(m, MANAGER_IS_RELOADING(m))) { | |
1912 | (void) bus_init_api(m); | |
1913 | ||
1914 | if (MANAGER_IS_SYSTEM(m)) | |
1915 | (void) bus_init_system(m); | |
1916 | } | |
1917 | } | |
1918 | ||
1919 | static void manager_preset_all(Manager *m) { | |
1920 | int r; | |
1921 | ||
1922 | assert(m); | |
1923 | ||
1924 | if (m->first_boot <= 0) | |
1925 | return; | |
1926 | ||
1927 | if (!MANAGER_IS_SYSTEM(m)) | |
1928 | return; | |
1929 | ||
1930 | if (MANAGER_IS_TEST_RUN(m)) | |
1931 | return; | |
1932 | ||
1933 | /* If this is the first boot, and we are in the host system, then preset everything */ | |
1934 | UnitFilePresetMode mode = | |
1935 | ENABLE_FIRST_BOOT_FULL_PRESET ? UNIT_FILE_PRESET_FULL : UNIT_FILE_PRESET_ENABLE_ONLY; | |
1936 | InstallChange *changes = NULL; | |
1937 | size_t n_changes = 0; | |
1938 | ||
1939 | CLEANUP_ARRAY(changes, n_changes, install_changes_free); | |
1940 | ||
1941 | log_info("Applying preset policy."); | |
1942 | r = unit_file_preset_all(RUNTIME_SCOPE_SYSTEM, /* file_flags = */ 0, | |
1943 | /* root_dir = */ NULL, mode, &changes, &n_changes); | |
1944 | install_changes_dump(r, "preset", changes, n_changes, /* quiet = */ false); | |
1945 | if (r < 0) | |
1946 | log_full_errno(r == -EEXIST ? LOG_NOTICE : LOG_WARNING, r, | |
1947 | "Failed to populate /etc with preset unit settings, ignoring: %m"); | |
1948 | else | |
1949 | log_info("Populated /etc with preset unit settings."); | |
1950 | } | |
1951 | ||
1952 | static void manager_ready(Manager *m) { | |
1953 | assert(m); | |
1954 | ||
1955 | /* After having loaded everything, do the final round of catching up with what might have changed */ | |
1956 | ||
1957 | m->objective = MANAGER_OK; /* Tell everyone we are up now */ | |
1958 | ||
1959 | /* It might be safe to log to the journal now and connect to dbus */ | |
1960 | manager_recheck_journal(m); | |
1961 | manager_recheck_dbus(m); | |
1962 | ||
1963 | /* Let's finally catch up with any changes that took place while we were reloading/reexecing */ | |
1964 | manager_catchup(m); | |
1965 | ||
1966 | /* Create a file which will indicate when the manager started loading units the last time. */ | |
1967 | if (MANAGER_IS_SYSTEM(m)) | |
1968 | (void) touch_file("/run/systemd/systemd-units-load", false, | |
1969 | m->timestamps[MANAGER_TIMESTAMP_UNITS_LOAD].realtime ?: now(CLOCK_REALTIME), | |
1970 | UID_INVALID, GID_INVALID, 0444); | |
1971 | } | |
1972 | ||
1973 | Manager* manager_reloading_start(Manager *m) { | |
1974 | m->n_reloading++; | |
1975 | dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD); | |
1976 | return m; | |
1977 | } | |
1978 | ||
1979 | void manager_reloading_stopp(Manager **m) { | |
1980 | if (*m) { | |
1981 | assert((*m)->n_reloading > 0); | |
1982 | (*m)->n_reloading--; | |
1983 | } | |
1984 | } | |
1985 | ||
1986 | static int manager_make_runtime_dir(Manager *m) { | |
1987 | int r; | |
1988 | ||
1989 | assert(m); | |
1990 | ||
1991 | _cleanup_free_ char *d = path_join(m->prefix[EXEC_DIRECTORY_RUNTIME], "systemd"); | |
1992 | if (!d) | |
1993 | return log_oom(); | |
1994 | ||
1995 | r = mkdir_label(d, 0755); | |
1996 | if (r < 0 && r != -EEXIST) | |
1997 | return log_error_errno(r, "Failed to create directory '%s/': %m", d); | |
1998 | ||
1999 | return 0; | |
2000 | } | |
2001 | ||
2002 | int manager_startup(Manager *m, FILE *serialization, FDSet *fds, const char *root) { | |
2003 | int r; | |
2004 | ||
2005 | assert(m); | |
2006 | ||
2007 | r = manager_make_runtime_dir(m); | |
2008 | if (r < 0) | |
2009 | return r; | |
2010 | ||
2011 | /* If we are running in test mode, we still want to run the generators, | |
2012 | * but we should not touch the real generator directories. */ | |
2013 | r = lookup_paths_init_or_warn(&m->lookup_paths, m->runtime_scope, | |
2014 | MANAGER_IS_TEST_RUN(m) ? LOOKUP_PATHS_TEMPORARY_GENERATED : 0, | |
2015 | root); | |
2016 | if (r < 0) | |
2017 | return r; | |
2018 | ||
2019 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_START)); | |
2020 | r = manager_run_environment_generators(m); | |
2021 | if (r >= 0) | |
2022 | r = manager_run_generators(m); | |
2023 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_FINISH)); | |
2024 | if (r < 0) | |
2025 | return r; | |
2026 | ||
2027 | manager_preset_all(m); | |
2028 | ||
2029 | lookup_paths_log(&m->lookup_paths); | |
2030 | ||
2031 | { | |
2032 | /* This block is (optionally) done with the reloading counter bumped */ | |
2033 | _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL; | |
2034 | ||
2035 | /* Make sure we don't have a left-over from a previous run */ | |
2036 | if (!serialization) | |
2037 | (void) rm_rf(m->lookup_paths.transient, 0); | |
2038 | ||
2039 | /* If we will deserialize make sure that during enumeration this is already known, so we increase the | |
2040 | * counter here already */ | |
2041 | if (serialization) | |
2042 | reloading = manager_reloading_start(m); | |
2043 | ||
2044 | /* First, enumerate what we can from all config files */ | |
2045 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_START)); | |
2046 | manager_enumerate_perpetual(m); | |
2047 | manager_enumerate(m); | |
2048 | dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_FINISH)); | |
2049 | ||
2050 | /* Second, deserialize if there is something to deserialize */ | |
2051 | if (serialization) { | |
2052 | r = manager_deserialize(m, serialization, fds); | |
2053 | if (r < 0) | |
2054 | return log_error_errno(r, "Deserialization failed: %m"); | |
2055 | } | |
2056 | ||
2057 | if (m->previous_objective >= 0) { | |
2058 | if (IN_SET(m->previous_objective, MANAGER_REEXECUTE, MANAGER_SOFT_REBOOT, MANAGER_SWITCH_ROOT)) | |
2059 | log_debug("Launching as effect of a '%s' operation.", | |
2060 | manager_objective_to_string(m->previous_objective)); | |
2061 | else | |
2062 | log_warning("Got unexpected previous objective '%s', ignoring.", | |
2063 | manager_objective_to_string(m->previous_objective)); | |
2064 | } | |
2065 | ||
2066 | /* If we are in a new soft-reboot iteration bump the counter now before starting units, so | |
2067 | * that they can reliably read it. We get the previous objective from serialized state. */ | |
2068 | if (m->previous_objective == MANAGER_SOFT_REBOOT) | |
2069 | m->soft_reboots_count++; | |
2070 | ||
2071 | /* Any fds left? Find some unit which wants them. This is useful to allow container managers to pass | |
2072 | * some file descriptors to us pre-initialized. This enables socket-based activation of entire | |
2073 | * containers. */ | |
2074 | manager_distribute_fds(m, fds); | |
2075 | ||
2076 | /* We might have deserialized the notify fd, but if we didn't then let's create it now */ | |
2077 | r = manager_setup_notify(m); | |
2078 | if (r < 0) | |
2079 | /* No sense to continue without notifications, our children would fail anyway. */ | |
2080 | return r; | |
2081 | ||
2082 | r = manager_setup_user_lookup_fd(m); | |
2083 | if (r < 0) | |
2084 | /* This shouldn't fail, except if things are really broken. */ | |
2085 | return r; | |
2086 | ||
2087 | r = manager_setup_handoff_timestamp_fd(m); | |
2088 | if (r < 0) | |
2089 | /* This shouldn't fail, except if things are really broken. */ | |
2090 | return r; | |
2091 | ||
2092 | r = manager_setup_pidref_transport_fd(m); | |
2093 | if (r < 0) | |
2094 | /* This shouldn't fail, except if things are really broken. */ | |
2095 | return r; | |
2096 | ||
2097 | /* Connect to the bus if we are good for it */ | |
2098 | manager_setup_bus(m); | |
2099 | ||
2100 | r = manager_varlink_init(m); | |
2101 | if (r < 0) | |
2102 | log_warning_errno(r, "Failed to set up Varlink, ignoring: %m"); | |
2103 | ||
2104 | /* Third, fire things up! */ | |
2105 | manager_coldplug(m); | |
2106 | ||
2107 | /* Clean up runtime objects */ | |
2108 | manager_vacuum(m); | |
2109 | ||
2110 | if (serialization) | |
2111 | /* Let's wait for the UnitNew/JobNew messages being sent, before we notify that the | |
2112 | * reload is finished */ | |
2113 | m->send_reloading_done = true; | |
2114 | } | |
2115 | ||
2116 | manager_ready(m); | |
2117 | ||
2118 | manager_set_switching_root(m, false); | |
2119 | ||
2120 | return 0; | |
2121 | } | |
2122 | ||
2123 | int manager_add_job_full( | |
2124 | Manager *m, | |
2125 | JobType type, | |
2126 | Unit *unit, | |
2127 | JobMode mode, | |
2128 | TransactionAddFlags extra_flags, | |
2129 | Set *affected_jobs, | |
2130 | sd_bus_error *error, | |
2131 | Job **ret) { | |
2132 | ||
2133 | _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL; | |
2134 | int r; | |
2135 | ||
2136 | assert(m); | |
2137 | assert(type >= 0 && type < _JOB_TYPE_MAX); | |
2138 | assert(unit); | |
2139 | assert(mode >= 0 && mode < _JOB_MODE_MAX); | |
2140 | assert((extra_flags & ~_TRANSACTION_FLAGS_MASK_PUBLIC) == 0); | |
2141 | ||
2142 | if (mode == JOB_ISOLATE && type != JOB_START) | |
2143 | return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start."); | |
2144 | ||
2145 | if (mode == JOB_ISOLATE && !unit->allow_isolate) | |
2146 | return sd_bus_error_set(error, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated."); | |
2147 | ||
2148 | if (mode == JOB_TRIGGERING && type != JOB_STOP) | |
2149 | return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=triggering is only valid for stop."); | |
2150 | ||
2151 | if (mode == JOB_RESTART_DEPENDENCIES && type != JOB_START) | |
2152 | return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=restart-dependencies is only valid for start."); | |
2153 | ||
2154 | log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode)); | |
2155 | ||
2156 | type = job_type_collapse(type, unit); | |
2157 | ||
2158 | tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY); | |
2159 | if (!tr) | |
2160 | return -ENOMEM; | |
2161 | ||
2162 | r = transaction_add_job_and_dependencies( | |
2163 | tr, | |
2164 | type, | |
2165 | unit, | |
2166 | /* by= */ NULL, | |
2167 | TRANSACTION_MATTERS | | |
2168 | (IN_SET(mode, JOB_IGNORE_DEPENDENCIES, JOB_IGNORE_REQUIREMENTS) ? TRANSACTION_IGNORE_REQUIREMENTS : 0) | | |
2169 | (mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0) | | |
2170 | (mode == JOB_RESTART_DEPENDENCIES ? TRANSACTION_PROPAGATE_START_AS_RESTART : 0) | | |
2171 | extra_flags, | |
2172 | error); | |
2173 | if (r < 0) | |
2174 | return r; | |
2175 | ||
2176 | if (mode == JOB_ISOLATE) { | |
2177 | r = transaction_add_isolate_jobs(tr, m); | |
2178 | if (r < 0) | |
2179 | return r; | |
2180 | } | |
2181 | ||
2182 | if (mode == JOB_TRIGGERING) { | |
2183 | r = transaction_add_triggering_jobs(tr, unit); | |
2184 | if (r < 0) | |
2185 | return r; | |
2186 | } | |
2187 | ||
2188 | r = transaction_activate(tr, m, mode, affected_jobs, error); | |
2189 | if (r < 0) | |
2190 | return r; | |
2191 | ||
2192 | log_unit_debug(unit, | |
2193 | "Enqueued job %s/%s as %u", unit->id, | |
2194 | job_type_to_string(type), (unsigned) tr->anchor_job->id); | |
2195 | ||
2196 | if (ret) | |
2197 | *ret = tr->anchor_job; | |
2198 | ||
2199 | tr = transaction_free(tr); | |
2200 | return 0; | |
2201 | } | |
2202 | ||
2203 | int manager_add_job( | |
2204 | Manager *m, | |
2205 | JobType type, | |
2206 | Unit *unit, | |
2207 | JobMode mode, | |
2208 | sd_bus_error *error, | |
2209 | Job **ret) { | |
2210 | ||
2211 | return manager_add_job_full(m, type, unit, mode, 0, NULL, error, ret); | |
2212 | } | |
2213 | ||
2214 | int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, sd_bus_error *e, Job **ret) { | |
2215 | Unit *unit = NULL; /* just to appease gcc, initialization is not really necessary */ | |
2216 | int r; | |
2217 | ||
2218 | assert(m); | |
2219 | assert(type < _JOB_TYPE_MAX); | |
2220 | assert(name); | |
2221 | assert(mode < _JOB_MODE_MAX); | |
2222 | ||
2223 | r = manager_load_unit(m, name, NULL, NULL, &unit); | |
2224 | if (r < 0) | |
2225 | return r; | |
2226 | assert(unit); | |
2227 | ||
2228 | return manager_add_job_full(m, type, unit, mode, /* extra_flags = */ 0, affected_jobs, e, ret); | |
2229 | } | |
2230 | ||
2231 | int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, Job **ret) { | |
2232 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
2233 | int r; | |
2234 | ||
2235 | assert(m); | |
2236 | assert(type < _JOB_TYPE_MAX); | |
2237 | assert(name); | |
2238 | assert(mode < _JOB_MODE_MAX); | |
2239 | ||
2240 | r = manager_add_job_by_name(m, type, name, mode, affected_jobs, &error, ret); | |
2241 | if (r < 0) | |
2242 | return log_warning_errno(r, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode), name, bus_error_message(&error, r)); | |
2243 | ||
2244 | return r; | |
2245 | } | |
2246 | ||
2247 | int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e) { | |
2248 | int r; | |
2249 | _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL; | |
2250 | ||
2251 | assert(m); | |
2252 | assert(unit); | |
2253 | assert(mode < _JOB_MODE_MAX); | |
2254 | assert(mode != JOB_ISOLATE); /* Isolate is only valid for start */ | |
2255 | ||
2256 | tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY); | |
2257 | if (!tr) | |
2258 | return -ENOMEM; | |
2259 | ||
2260 | /* We need an anchor job */ | |
2261 | r = transaction_add_job_and_dependencies(tr, JOB_NOP, unit, NULL, TRANSACTION_IGNORE_REQUIREMENTS|TRANSACTION_IGNORE_ORDER, e); | |
2262 | if (r < 0) | |
2263 | return r; | |
2264 | ||
2265 | /* Failure in adding individual dependencies is ignored, so this always succeeds. */ | |
2266 | transaction_add_propagate_reload_jobs( | |
2267 | tr, | |
2268 | unit, | |
2269 | tr->anchor_job, | |
2270 | mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0); | |
2271 | ||
2272 | r = transaction_activate(tr, m, mode, NULL, e); | |
2273 | if (r < 0) | |
2274 | return r; | |
2275 | ||
2276 | tr = transaction_free(tr); | |
2277 | return 0; | |
2278 | } | |
2279 | ||
2280 | Job *manager_get_job(Manager *m, uint32_t id) { | |
2281 | assert(m); | |
2282 | ||
2283 | return hashmap_get(m->jobs, UINT32_TO_PTR(id)); | |
2284 | } | |
2285 | ||
2286 | Unit *manager_get_unit(Manager *m, const char *name) { | |
2287 | assert(m); | |
2288 | assert(name); | |
2289 | ||
2290 | return hashmap_get(m->units, name); | |
2291 | } | |
2292 | ||
2293 | static int manager_dispatch_target_deps_queue(Manager *m) { | |
2294 | Unit *u; | |
2295 | int r = 0; | |
2296 | ||
2297 | assert(m); | |
2298 | ||
2299 | while ((u = LIST_POP(target_deps_queue, m->target_deps_queue))) { | |
2300 | _cleanup_free_ Unit **targets = NULL; | |
2301 | int n_targets; | |
2302 | ||
2303 | assert(u->in_target_deps_queue); | |
2304 | ||
2305 | u->in_target_deps_queue = false; | |
2306 | ||
2307 | /* Take an "atomic" snapshot of dependencies here, as the call below will likely modify the | |
2308 | * dependencies, and we can't have it that hash tables we iterate through are modified while | |
2309 | * we are iterating through them. */ | |
2310 | n_targets = unit_get_dependency_array(u, UNIT_ATOM_DEFAULT_TARGET_DEPENDENCIES, &targets); | |
2311 | if (n_targets < 0) | |
2312 | return n_targets; | |
2313 | ||
2314 | FOREACH_ARRAY(i, targets, n_targets) { | |
2315 | r = unit_add_default_target_dependency(u, *i); | |
2316 | if (r < 0) | |
2317 | return r; | |
2318 | } | |
2319 | } | |
2320 | ||
2321 | return r; | |
2322 | } | |
2323 | ||
2324 | unsigned manager_dispatch_load_queue(Manager *m) { | |
2325 | Unit *u; | |
2326 | unsigned n = 0; | |
2327 | ||
2328 | assert(m); | |
2329 | ||
2330 | /* Make sure we are not run recursively */ | |
2331 | if (m->dispatching_load_queue) | |
2332 | return 0; | |
2333 | ||
2334 | m->dispatching_load_queue = true; | |
2335 | ||
2336 | /* Dispatches the load queue. Takes a unit from the queue and | |
2337 | * tries to load its data until the queue is empty */ | |
2338 | ||
2339 | while ((u = m->load_queue)) { | |
2340 | assert(u->in_load_queue); | |
2341 | ||
2342 | unit_load(u); | |
2343 | n++; | |
2344 | } | |
2345 | ||
2346 | m->dispatching_load_queue = false; | |
2347 | ||
2348 | /* Dispatch the units waiting for their target dependencies to be added now, as all targets that we know about | |
2349 | * should be loaded and have aliases resolved */ | |
2350 | (void) manager_dispatch_target_deps_queue(m); | |
2351 | ||
2352 | return n; | |
2353 | } | |
2354 | ||
2355 | bool manager_unit_cache_should_retry_load(Unit *u) { | |
2356 | assert(u); | |
2357 | ||
2358 | /* Automatic reloading from disk only applies to units which were not found sometime in the past, and | |
2359 | * the not-found stub is kept pinned in the unit graph by dependencies. For units that were | |
2360 | * previously loaded, we don't do automatic reloading, and daemon-reload is necessary to update. */ | |
2361 | if (u->load_state != UNIT_NOT_FOUND) | |
2362 | return false; | |
2363 | ||
2364 | /* The cache has been updated since the last time we tried to load the unit. There might be new | |
2365 | * fragment paths to read. */ | |
2366 | if (u->manager->unit_cache_timestamp_hash != u->fragment_not_found_timestamp_hash) | |
2367 | return true; | |
2368 | ||
2369 | /* The cache needs to be updated because there are modifications on disk. */ | |
2370 | return !lookup_paths_timestamp_hash_same(&u->manager->lookup_paths, u->manager->unit_cache_timestamp_hash, NULL); | |
2371 | } | |
2372 | ||
2373 | int manager_load_unit_prepare( | |
2374 | Manager *m, | |
2375 | const char *name, | |
2376 | const char *path, | |
2377 | sd_bus_error *e, | |
2378 | Unit **ret) { | |
2379 | ||
2380 | _cleanup_(unit_freep) Unit *cleanup_unit = NULL; | |
2381 | _cleanup_free_ char *nbuf = NULL; | |
2382 | int r; | |
2383 | ||
2384 | assert(m); | |
2385 | assert(ret); | |
2386 | assert(name || path); | |
2387 | ||
2388 | /* This will prepare the unit for loading, but not actually load anything from disk. */ | |
2389 | ||
2390 | if (path && !path_is_absolute(path)) | |
2391 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path); | |
2392 | ||
2393 | if (!name) { | |
2394 | r = path_extract_filename(path, &nbuf); | |
2395 | if (r < 0) | |
2396 | return r; | |
2397 | if (r == O_DIRECTORY) | |
2398 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path '%s' refers to directory, refusing.", path); | |
2399 | ||
2400 | name = nbuf; | |
2401 | } | |
2402 | ||
2403 | UnitType t = unit_name_to_type(name); | |
2404 | ||
2405 | if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) { | |
2406 | if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) | |
2407 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name); | |
2408 | ||
2409 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name); | |
2410 | } | |
2411 | ||
2412 | Unit *unit = manager_get_unit(m, name); | |
2413 | if (unit) { | |
2414 | /* The time-based cache allows new units to be started without daemon-reload, | |
2415 | * but if they are already referenced (because of dependencies or ordering) | |
2416 | * then we have to force a load of the fragment. As an optimization, check | |
2417 | * first if anything in the usual paths was modified since the last time | |
2418 | * the cache was loaded. Also check if the last time an attempt to load the | |
2419 | * unit was made was before the most recent cache refresh, so that we know | |
2420 | * we need to try again — even if the cache is current, it might have been | |
2421 | * updated in a different context before we had a chance to retry loading | |
2422 | * this particular unit. */ | |
2423 | if (manager_unit_cache_should_retry_load(unit)) | |
2424 | unit->load_state = UNIT_STUB; | |
2425 | else { | |
2426 | *ret = unit; | |
2427 | return 0; /* The unit was already loaded */ | |
2428 | } | |
2429 | } else { | |
2430 | unit = cleanup_unit = unit_new(m, unit_vtable[t]->object_size); | |
2431 | if (!unit) | |
2432 | return -ENOMEM; | |
2433 | } | |
2434 | ||
2435 | if (path) { | |
2436 | r = free_and_strdup(&unit->fragment_path, path); | |
2437 | if (r < 0) | |
2438 | return r; | |
2439 | } | |
2440 | ||
2441 | r = unit_add_name(unit, name); | |
2442 | if (r < 0) | |
2443 | return r; | |
2444 | ||
2445 | unit_add_to_load_queue(unit); | |
2446 | unit_add_to_dbus_queue(unit); | |
2447 | unit_add_to_gc_queue(unit); | |
2448 | ||
2449 | *ret = unit; | |
2450 | TAKE_PTR(cleanup_unit); | |
2451 | ||
2452 | return 1; /* The unit was added the load queue */ | |
2453 | } | |
2454 | ||
2455 | int manager_load_unit( | |
2456 | Manager *m, | |
2457 | const char *name, | |
2458 | const char *path, | |
2459 | sd_bus_error *e, | |
2460 | Unit **ret) { | |
2461 | int r; | |
2462 | ||
2463 | assert(m); | |
2464 | assert(ret); | |
2465 | ||
2466 | /* This will load the unit config, but not actually start any services or anything. */ | |
2467 | ||
2468 | r = manager_load_unit_prepare(m, name, path, e, ret); | |
2469 | if (r <= 0) | |
2470 | return r; | |
2471 | ||
2472 | /* Unit was newly loaded */ | |
2473 | manager_dispatch_load_queue(m); | |
2474 | *ret = unit_follow_merge(*ret); | |
2475 | return 0; | |
2476 | } | |
2477 | ||
2478 | int manager_load_startable_unit_or_warn( | |
2479 | Manager *m, | |
2480 | const char *name, | |
2481 | const char *path, | |
2482 | Unit **ret) { | |
2483 | ||
2484 | /* Load a unit, make sure it loaded fully and is not masked. */ | |
2485 | ||
2486 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
2487 | Unit *unit; | |
2488 | int r; | |
2489 | ||
2490 | r = manager_load_unit(m, name, path, &error, &unit); | |
2491 | if (r < 0) | |
2492 | return log_error_errno(r, "Failed to load %s %s: %s", | |
2493 | name ? "unit" : "unit file", name ?: path, | |
2494 | bus_error_message(&error, r)); | |
2495 | ||
2496 | r = bus_unit_validate_load_state(unit, &error); | |
2497 | if (r < 0) | |
2498 | return log_error_errno(r, "%s", bus_error_message(&error, r)); | |
2499 | ||
2500 | *ret = unit; | |
2501 | return 0; | |
2502 | } | |
2503 | ||
2504 | void manager_clear_jobs(Manager *m) { | |
2505 | Job *j; | |
2506 | ||
2507 | assert(m); | |
2508 | ||
2509 | while ((j = hashmap_first(m->jobs))) | |
2510 | /* No need to recurse. We're cancelling all jobs. */ | |
2511 | job_finish_and_invalidate(j, JOB_CANCELED, false, false); | |
2512 | } | |
2513 | ||
2514 | void manager_unwatch_pidref(Manager *m, const PidRef *pid) { | |
2515 | assert(m); | |
2516 | ||
2517 | for (;;) { | |
2518 | Unit *u; | |
2519 | ||
2520 | u = manager_get_unit_by_pidref_watching(m, pid); | |
2521 | if (!u) | |
2522 | break; | |
2523 | ||
2524 | unit_unwatch_pidref(u, pid); | |
2525 | } | |
2526 | } | |
2527 | ||
2528 | static int manager_dispatch_run_queue(sd_event_source *source, void *userdata) { | |
2529 | Manager *m = ASSERT_PTR(userdata); | |
2530 | Job *j; | |
2531 | ||
2532 | assert(source); | |
2533 | ||
2534 | while ((j = prioq_peek(m->run_queue))) { | |
2535 | assert(j->installed); | |
2536 | assert(j->in_run_queue); | |
2537 | ||
2538 | (void) job_run_and_invalidate(j); | |
2539 | } | |
2540 | ||
2541 | if (m->n_running_jobs > 0) | |
2542 | manager_watch_jobs_in_progress(m); | |
2543 | ||
2544 | if (m->n_on_console > 0) | |
2545 | manager_watch_idle_pipe(m); | |
2546 | ||
2547 | return 1; | |
2548 | } | |
2549 | ||
2550 | void manager_trigger_run_queue(Manager *m) { | |
2551 | int r; | |
2552 | ||
2553 | assert(m); | |
2554 | ||
2555 | r = sd_event_source_set_enabled( | |
2556 | m->run_queue_event_source, | |
2557 | prioq_isempty(m->run_queue) ? SD_EVENT_OFF : SD_EVENT_ONESHOT); | |
2558 | if (r < 0) | |
2559 | log_warning_errno(r, "Failed to enable job run queue event source, ignoring: %m"); | |
2560 | } | |
2561 | ||
2562 | static unsigned manager_dispatch_dbus_queue(Manager *m) { | |
2563 | unsigned n = 0, budget; | |
2564 | Unit *u; | |
2565 | Job *j; | |
2566 | ||
2567 | assert(m); | |
2568 | ||
2569 | /* When we are reloading, let's not wait with generating signals, since we need to exit the manager as quickly | |
2570 | * as we can. There's no point in throttling generation of signals in that case. */ | |
2571 | if (MANAGER_IS_RELOADING(m) || m->send_reloading_done || m->pending_reload_message) | |
2572 | budget = UINT_MAX; /* infinite budget in this case */ | |
2573 | else { | |
2574 | /* Anything to do at all? */ | |
2575 | if (!m->dbus_unit_queue && !m->dbus_job_queue) | |
2576 | return 0; | |
2577 | ||
2578 | /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's | |
2579 | * sit this cycle out, and process things in a later cycle when the queues got a bit emptier. */ | |
2580 | if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD) | |
2581 | return 0; | |
2582 | ||
2583 | /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't | |
2584 | * overly full before this call we shouldn't increase it in size too wildly in one step, and we | |
2585 | * shouldn't monopolize CPU time with generating these messages. Note the difference in counting of | |
2586 | * this "budget" and the "threshold" above: the "budget" is decreased only once per generated message, | |
2587 | * regardless how many buses/direct connections it is enqueued on, while the "threshold" is applied to | |
2588 | * each queued instance of bus message, i.e. if the same message is enqueued to five buses/direct | |
2589 | * connections it will be counted five times. This difference in counting ("references" | |
2590 | * vs. "instances") is primarily a result of the fact that it's easier to implement it this way, | |
2591 | * however it also reflects the thinking that the "threshold" should put a limit on used queue memory, | |
2592 | * i.e. space, while the "budget" should put a limit on time. Also note that the "threshold" is | |
2593 | * currently chosen much higher than the "budget". */ | |
2594 | budget = MANAGER_BUS_MESSAGE_BUDGET; | |
2595 | } | |
2596 | ||
2597 | while (budget != 0 && (u = m->dbus_unit_queue)) { | |
2598 | ||
2599 | assert(u->in_dbus_queue); | |
2600 | ||
2601 | bus_unit_send_change_signal(u); | |
2602 | n++; | |
2603 | ||
2604 | if (budget != UINT_MAX) | |
2605 | budget--; | |
2606 | } | |
2607 | ||
2608 | while (budget != 0 && (j = m->dbus_job_queue)) { | |
2609 | assert(j->in_dbus_queue); | |
2610 | ||
2611 | bus_job_send_change_signal(j); | |
2612 | n++; | |
2613 | ||
2614 | if (budget != UINT_MAX) | |
2615 | budget--; | |
2616 | } | |
2617 | ||
2618 | if (m->send_reloading_done) { | |
2619 | m->send_reloading_done = false; | |
2620 | bus_manager_send_reloading(m, false); | |
2621 | n++; | |
2622 | } | |
2623 | ||
2624 | if (m->pending_reload_message) { | |
2625 | bus_send_pending_reload_message(m); | |
2626 | n++; | |
2627 | } | |
2628 | ||
2629 | return n; | |
2630 | } | |
2631 | ||
2632 | static bool manager_process_barrier_fd(char * const *tags, FDSet *fds) { | |
2633 | ||
2634 | /* nothing else must be sent when using BARRIER=1 */ | |
2635 | if (strv_contains(tags, "BARRIER=1")) { | |
2636 | if (strv_length(tags) != 1) | |
2637 | log_warning("Extra notification messages sent with BARRIER=1, ignoring everything."); | |
2638 | else if (fdset_size(fds) != 1) | |
2639 | log_warning("Got incorrect number of fds with BARRIER=1, closing them."); | |
2640 | ||
2641 | /* Drop the message if BARRIER=1 was found */ | |
2642 | return true; | |
2643 | } | |
2644 | ||
2645 | return false; | |
2646 | } | |
2647 | ||
2648 | static void manager_invoke_notify_message( | |
2649 | Manager *m, | |
2650 | Unit *u, | |
2651 | PidRef *pidref, | |
2652 | const struct ucred *ucred, | |
2653 | char * const *tags, | |
2654 | FDSet *fds) { | |
2655 | ||
2656 | assert(m); | |
2657 | assert(u); | |
2658 | assert(pidref_is_set(pidref)); | |
2659 | assert(ucred); | |
2660 | assert(pidref->pid == ucred->pid); | |
2661 | assert(tags); | |
2662 | ||
2663 | if (u->notifygen == m->notifygen) /* Already invoked on this same unit in this same iteration? */ | |
2664 | return; | |
2665 | u->notifygen = m->notifygen; | |
2666 | ||
2667 | if (UNIT_VTABLE(u)->notify_message) | |
2668 | UNIT_VTABLE(u)->notify_message(u, pidref, ucred, tags, fds); | |
2669 | ||
2670 | else if (DEBUG_LOGGING) { | |
2671 | _cleanup_free_ char *joined = strv_join(tags, ", "); | |
2672 | char buf[CELLESCAPE_DEFAULT_LENGTH]; | |
2673 | ||
2674 | log_unit_debug(u, "Got notification message from unexpected unit type, ignoring: %s", | |
2675 | joined ? cellescape(buf, sizeof(buf), joined) : "(null)"); | |
2676 | } | |
2677 | } | |
2678 | ||
2679 | static int manager_get_units_for_pidref(Manager *m, const PidRef *pidref, Unit ***ret_units) { | |
2680 | /* Determine array of every unit that is interested in the specified process */ | |
2681 | ||
2682 | assert(m); | |
2683 | assert(pidref_is_set(pidref)); | |
2684 | ||
2685 | Unit *u1, *u2, **array; | |
2686 | u1 = manager_get_unit_by_pidref_cgroup(m, pidref); | |
2687 | u2 = hashmap_get(m->watch_pids, pidref); | |
2688 | array = hashmap_get(m->watch_pids_more, pidref); | |
2689 | ||
2690 | size_t n = 0; | |
2691 | if (u1) | |
2692 | n++; | |
2693 | if (u2) | |
2694 | n++; | |
2695 | if (array) | |
2696 | for (size_t j = 0; array[j]; j++) | |
2697 | n++; | |
2698 | ||
2699 | assert(n <= INT_MAX); /* Make sure we can reasonably return the counter as "int" */ | |
2700 | ||
2701 | if (ret_units) { | |
2702 | _cleanup_free_ Unit **units = NULL; | |
2703 | ||
2704 | if (n > 0) { | |
2705 | units = new(Unit*, n + 1); | |
2706 | if (!units) | |
2707 | return -ENOMEM; | |
2708 | ||
2709 | /* We return a dense array, and put the "main" unit first, i.e. unit in whose cgroup | |
2710 | * the process currently is. Note that we do not bother with filtering duplicates | |
2711 | * here. */ | |
2712 | ||
2713 | size_t i = 0; | |
2714 | if (u1) | |
2715 | units[i++] = u1; | |
2716 | if (u2) | |
2717 | units[i++] = u2; | |
2718 | if (array) | |
2719 | for (size_t j = 0; array[j]; j++) | |
2720 | units[i++] = array[j]; | |
2721 | assert(i == n); | |
2722 | ||
2723 | units[i] = NULL; /* end array in an extra NULL */ | |
2724 | } | |
2725 | ||
2726 | *ret_units = TAKE_PTR(units); | |
2727 | } | |
2728 | ||
2729 | return (int) n; | |
2730 | } | |
2731 | ||
2732 | static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
2733 | Manager *m = ASSERT_PTR(userdata); | |
2734 | _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL; | |
2735 | struct ucred ucred; | |
2736 | _cleanup_(fdset_free_asyncp) FDSet *fds = NULL; | |
2737 | int r; | |
2738 | ||
2739 | assert(m->notify_fd == fd); | |
2740 | ||
2741 | if (revents != EPOLLIN) { | |
2742 | log_warning("Got unexpected poll event for notify fd."); | |
2743 | return 0; | |
2744 | } | |
2745 | ||
2746 | _cleanup_strv_free_ char **tags = NULL; | |
2747 | r = notify_recv_with_fds_strv(m->notify_fd, &tags, &ucred, &pidref, &fds); | |
2748 | if (r == -EAGAIN) | |
2749 | return 0; | |
2750 | if (r < 0) | |
2751 | /* If this is any other, real error, then stop processing this socket. This of course means | |
2752 | * we won't take notification messages anymore, but that's still better than busy looping: | |
2753 | * being woken up over and over again, but being unable to actually read the message from the | |
2754 | * socket. */ | |
2755 | return r; | |
2756 | ||
2757 | /* Possibly a barrier fd, let's see. */ | |
2758 | if (manager_process_barrier_fd(tags, fds)) { | |
2759 | log_debug("Received barrier notification message from PID " PID_FMT ".", pidref.pid); | |
2760 | return 0; | |
2761 | } | |
2762 | ||
2763 | /* Increase the generation counter used for filtering out duplicate unit invocations. */ | |
2764 | m->notifygen++; | |
2765 | ||
2766 | /* Notify every unit that might be interested, which might be multiple. */ | |
2767 | _cleanup_free_ Unit **array = NULL; | |
2768 | ||
2769 | int n_array = manager_get_units_for_pidref(m, &pidref, &array); | |
2770 | if (n_array < 0) { | |
2771 | log_warning_errno(n_array, "Failed to determine units for PID " PID_FMT ", ignoring: %m", pidref.pid); | |
2772 | return 0; | |
2773 | } | |
2774 | if (n_array == 0) | |
2775 | log_debug("Cannot find unit for notify message of PID "PID_FMT", ignoring.", pidref.pid); | |
2776 | else | |
2777 | /* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle | |
2778 | * duplicate units – making sure we only invoke each unit's handler once. */ | |
2779 | FOREACH_ARRAY(u, array, n_array) | |
2780 | manager_invoke_notify_message(m, *u, &pidref, &ucred, tags, fds); | |
2781 | ||
2782 | if (!fdset_isempty(fds)) | |
2783 | log_warning("Got extra auxiliary fds with notification message, closing them."); | |
2784 | ||
2785 | return 0; | |
2786 | } | |
2787 | ||
2788 | static void manager_invoke_sigchld_event( | |
2789 | Manager *m, | |
2790 | Unit *u, | |
2791 | const siginfo_t *si) { | |
2792 | ||
2793 | assert(m); | |
2794 | assert(u); | |
2795 | assert(si); | |
2796 | ||
2797 | /* Already invoked the handler of this unit in this iteration? Then don't process this again */ | |
2798 | if (u->sigchldgen == m->sigchldgen) | |
2799 | return; | |
2800 | u->sigchldgen = m->sigchldgen; | |
2801 | ||
2802 | log_unit_debug(u, "Child "PID_FMT" belongs to %s.", si->si_pid, u->id); | |
2803 | unit_unwatch_pidref(u, &PIDREF_MAKE_FROM_PID(si->si_pid)); | |
2804 | ||
2805 | if (UNIT_VTABLE(u)->sigchld_event) | |
2806 | UNIT_VTABLE(u)->sigchld_event(u, si->si_pid, si->si_code, si->si_status); | |
2807 | } | |
2808 | ||
2809 | static int manager_dispatch_sigchld(sd_event_source *source, void *userdata) { | |
2810 | Manager *m = ASSERT_PTR(userdata); | |
2811 | siginfo_t si = {}; | |
2812 | int r; | |
2813 | ||
2814 | assert(source); | |
2815 | ||
2816 | /* First we call waitid() for a PID and do not reap the zombie. That way we can still access | |
2817 | * /proc/$PID for it while it is a zombie. */ | |
2818 | ||
2819 | if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) { | |
2820 | ||
2821 | if (errno != ECHILD) | |
2822 | log_error_errno(errno, "Failed to peek for child with waitid(), ignoring: %m"); | |
2823 | ||
2824 | goto turn_off; | |
2825 | } | |
2826 | ||
2827 | if (si.si_pid <= 0) | |
2828 | goto turn_off; | |
2829 | ||
2830 | if (SIGINFO_CODE_IS_DEAD(si.si_code)) { | |
2831 | _cleanup_free_ char *name = NULL; | |
2832 | (void) pid_get_comm(si.si_pid, &name); | |
2833 | ||
2834 | log_debug("Child "PID_FMT" (%s) died (code=%s, status=%i/%s)", | |
2835 | si.si_pid, strna(name), | |
2836 | sigchld_code_to_string(si.si_code), | |
2837 | si.si_status, | |
2838 | strna(si.si_code == CLD_EXITED | |
2839 | ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL) | |
2840 | : signal_to_string(si.si_status))); | |
2841 | ||
2842 | /* Increase the generation counter used for filtering out duplicate unit invocations */ | |
2843 | m->sigchldgen++; | |
2844 | ||
2845 | /* We look this up by a PidRef that only consists of the PID. After all we couldn't create a | |
2846 | * pidfd here any more even if we wanted (since the process just exited). */ | |
2847 | PidRef pidref = PIDREF_MAKE_FROM_PID(si.si_pid); | |
2848 | ||
2849 | /* And now figure out the units this belongs to, there might be multiple... */ | |
2850 | _cleanup_free_ Unit **array = NULL; | |
2851 | int n_array = manager_get_units_for_pidref(m, &pidref, &array); | |
2852 | if (n_array < 0) | |
2853 | log_warning_errno(n_array, "Failed to get units for process " PID_FMT ", ignoring: %m", si.si_pid); | |
2854 | else if (n_array == 0) | |
2855 | log_debug("Got SIGCHLD for process " PID_FMT " we weren't interested in, ignoring.", si.si_pid); | |
2856 | else { | |
2857 | /* We check for an OOM condition, in case we got SIGCHLD before the OOM notification. | |
2858 | * We only do this for the cgroup the PID belonged to, which is the f */ | |
2859 | (void) unit_check_oom(array[0]); | |
2860 | ||
2861 | /* We check if systemd-oomd performed a kill so that we log and notify appropriately */ | |
2862 | (void) unit_check_oomd_kill(array[0]); | |
2863 | ||
2864 | /* Finally, execute them all. Note that the array might contain duplicates, but that's fine, | |
2865 | * manager_invoke_sigchld_event() will ensure we only invoke the handlers once for each | |
2866 | * iteration. */ | |
2867 | FOREACH_ARRAY(u, array, n_array) | |
2868 | manager_invoke_sigchld_event(m, *u, &si); | |
2869 | } | |
2870 | } | |
2871 | ||
2872 | /* And now, we actually reap the zombie. */ | |
2873 | if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) { | |
2874 | log_error_errno(errno, "Failed to dequeue child, ignoring: %m"); | |
2875 | return 0; | |
2876 | } | |
2877 | ||
2878 | return 0; | |
2879 | ||
2880 | turn_off: | |
2881 | /* All children processed for now, turn off event source */ | |
2882 | ||
2883 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF); | |
2884 | if (r < 0) | |
2885 | return log_error_errno(r, "Failed to disable SIGCHLD event source: %m"); | |
2886 | ||
2887 | return 0; | |
2888 | } | |
2889 | ||
2890 | static void manager_start_special(Manager *m, const char *name, JobMode mode) { | |
2891 | Job *job; | |
2892 | ||
2893 | if (manager_add_job_by_name_and_warn(m, JOB_START, name, mode, NULL, &job) < 0) | |
2894 | return; | |
2895 | ||
2896 | const char *s = unit_status_string(job->unit, NULL); | |
2897 | ||
2898 | log_info("Activating special unit %s...", s); | |
2899 | ||
2900 | (void) sd_notifyf(/* unset_environment= */ false, | |
2901 | "STATUS=Activating special unit %s...", s); | |
2902 | m->status_ready = false; | |
2903 | } | |
2904 | ||
2905 | static void manager_handle_ctrl_alt_del(Manager *m) { | |
2906 | assert(m); | |
2907 | ||
2908 | /* If the user presses C-A-D more than 7 times within 2s, we reboot/shutdown immediately, | |
2909 | * unless it was disabled in system.conf. */ | |
2910 | ||
2911 | if (ratelimit_below(&m->ctrl_alt_del_ratelimit) || m->cad_burst_action == EMERGENCY_ACTION_NONE) | |
2912 | manager_start_special(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE_IRREVERSIBLY); | |
2913 | else | |
2914 | emergency_action( | |
2915 | m, | |
2916 | m->cad_burst_action, | |
2917 | EMERGENCY_ACTION_WARN, | |
2918 | /* reboot_arg= */ NULL, | |
2919 | /* exit_status= */ -1, | |
2920 | "Ctrl-Alt-Del was pressed more than 7 times within 2s"); | |
2921 | } | |
2922 | ||
2923 | static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
2924 | Manager *m = ASSERT_PTR(userdata); | |
2925 | ssize_t n; | |
2926 | struct signalfd_siginfo sfsi; | |
2927 | int r; | |
2928 | ||
2929 | assert(m->signal_fd == fd); | |
2930 | ||
2931 | if (revents != EPOLLIN) { | |
2932 | log_warning("Got unexpected events from signal file descriptor."); | |
2933 | return 0; | |
2934 | } | |
2935 | ||
2936 | n = read(m->signal_fd, &sfsi, sizeof(sfsi)); | |
2937 | if (n < 0) { | |
2938 | if (ERRNO_IS_TRANSIENT(errno)) | |
2939 | return 0; | |
2940 | ||
2941 | /* We return an error here, which will kill this handler, | |
2942 | * to avoid a busy loop on read error. */ | |
2943 | return log_error_errno(errno, "Reading from signal fd failed: %m"); | |
2944 | } | |
2945 | if (n != sizeof(sfsi)) { | |
2946 | log_warning("Truncated read from signal fd (%zi bytes), ignoring!", n); | |
2947 | return 0; | |
2948 | } | |
2949 | ||
2950 | log_received_signal(sfsi.ssi_signo == SIGCHLD || | |
2951 | (sfsi.ssi_signo == SIGTERM && MANAGER_IS_USER(m)) | |
2952 | ? LOG_DEBUG : LOG_INFO, | |
2953 | &sfsi); | |
2954 | ||
2955 | switch (sfsi.ssi_signo) { | |
2956 | ||
2957 | case SIGCHLD: | |
2958 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON); | |
2959 | if (r < 0) | |
2960 | log_warning_errno(r, "Failed to enable SIGCHLD event source, ignoring: %m"); | |
2961 | ||
2962 | break; | |
2963 | ||
2964 | case SIGTERM: | |
2965 | if (MANAGER_IS_SYSTEM(m)) { | |
2966 | /* This is for compatibility with the original sysvinit */ | |
2967 | m->objective = MANAGER_REEXECUTE; | |
2968 | break; | |
2969 | } | |
2970 | ||
2971 | _fallthrough_; | |
2972 | case SIGINT: | |
2973 | if (MANAGER_IS_SYSTEM(m)) | |
2974 | manager_handle_ctrl_alt_del(m); | |
2975 | else | |
2976 | manager_start_special(m, SPECIAL_EXIT_TARGET, JOB_REPLACE_IRREVERSIBLY); | |
2977 | break; | |
2978 | ||
2979 | case SIGWINCH: | |
2980 | /* This is a nop on non-init */ | |
2981 | if (MANAGER_IS_SYSTEM(m)) | |
2982 | manager_start_special(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE); | |
2983 | ||
2984 | break; | |
2985 | ||
2986 | case SIGPWR: | |
2987 | /* This is a nop on non-init */ | |
2988 | if (MANAGER_IS_SYSTEM(m)) | |
2989 | manager_start_special(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE); | |
2990 | ||
2991 | break; | |
2992 | ||
2993 | case SIGUSR1: | |
2994 | if (manager_dbus_is_running(m, false)) { | |
2995 | log_info("Trying to reconnect to bus..."); | |
2996 | ||
2997 | (void) bus_init_api(m); | |
2998 | ||
2999 | if (MANAGER_IS_SYSTEM(m)) | |
3000 | (void) bus_init_system(m); | |
3001 | } else | |
3002 | manager_start_special(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE); | |
3003 | ||
3004 | break; | |
3005 | ||
3006 | case SIGUSR2: { | |
3007 | _cleanup_free_ char *dump = NULL; | |
3008 | ||
3009 | r = manager_get_dump_string(m, /* patterns= */ NULL, &dump); | |
3010 | if (r < 0) { | |
3011 | log_warning_errno(r, "Failed to acquire manager dump: %m"); | |
3012 | break; | |
3013 | } | |
3014 | ||
3015 | log_dump(LOG_INFO, dump); | |
3016 | break; | |
3017 | } | |
3018 | ||
3019 | case SIGHUP: | |
3020 | m->objective = MANAGER_RELOAD; | |
3021 | break; | |
3022 | ||
3023 | default: { | |
3024 | ||
3025 | if (MANAGER_IS_SYSTEM(m)) { | |
3026 | /* Starting SIGRTMIN+0 */ | |
3027 | static const struct { | |
3028 | const char *target; | |
3029 | JobMode mode; | |
3030 | } target_table[] = { | |
3031 | [0] = { SPECIAL_DEFAULT_TARGET, JOB_ISOLATE }, | |
3032 | [1] = { SPECIAL_RESCUE_TARGET, JOB_ISOLATE }, | |
3033 | [2] = { SPECIAL_EMERGENCY_TARGET, JOB_ISOLATE }, | |
3034 | [3] = { SPECIAL_HALT_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3035 | [4] = { SPECIAL_POWEROFF_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3036 | [5] = { SPECIAL_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3037 | [6] = { SPECIAL_KEXEC_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3038 | [7] = { SPECIAL_SOFT_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY }, | |
3039 | }; | |
3040 | ||
3041 | /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */ | |
3042 | static const ManagerObjective objective_table[] = { | |
3043 | [0] = MANAGER_HALT, | |
3044 | [1] = MANAGER_POWEROFF, | |
3045 | [2] = MANAGER_REBOOT, | |
3046 | [3] = MANAGER_KEXEC, | |
3047 | [4] = MANAGER_SOFT_REBOOT, | |
3048 | }; | |
3049 | ||
3050 | if ((int) sfsi.ssi_signo >= SIGRTMIN+0 && | |
3051 | (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) { | |
3052 | int idx = (int) sfsi.ssi_signo - SIGRTMIN; | |
3053 | manager_start_special(m, target_table[idx].target, target_table[idx].mode); | |
3054 | break; | |
3055 | } | |
3056 | ||
3057 | if ((int) sfsi.ssi_signo >= SIGRTMIN+13 && | |
3058 | (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(objective_table)) { | |
3059 | m->objective = objective_table[sfsi.ssi_signo - SIGRTMIN - 13]; | |
3060 | break; | |
3061 | } | |
3062 | } | |
3063 | ||
3064 | switch (sfsi.ssi_signo - SIGRTMIN) { | |
3065 | ||
3066 | case 18: { | |
3067 | bool generic = false; | |
3068 | ||
3069 | if (sfsi.ssi_code != SI_QUEUE) | |
3070 | generic = true; | |
3071 | else { | |
3072 | /* Override a few select commands by our own PID1-specific logic */ | |
3073 | ||
3074 | switch (sfsi.ssi_int) { | |
3075 | ||
3076 | case _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE..._COMMON_SIGNAL_COMMAND_LOG_LEVEL_END: | |
3077 | manager_override_log_level(m, sfsi.ssi_int - _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE); | |
3078 | break; | |
3079 | ||
3080 | case COMMON_SIGNAL_COMMAND_CONSOLE: | |
3081 | manager_override_log_target(m, LOG_TARGET_CONSOLE); | |
3082 | break; | |
3083 | ||
3084 | case COMMON_SIGNAL_COMMAND_JOURNAL: | |
3085 | manager_override_log_target(m, LOG_TARGET_JOURNAL); | |
3086 | break; | |
3087 | ||
3088 | case COMMON_SIGNAL_COMMAND_KMSG: | |
3089 | manager_override_log_target(m, LOG_TARGET_KMSG); | |
3090 | break; | |
3091 | ||
3092 | case COMMON_SIGNAL_COMMAND_NULL: | |
3093 | manager_override_log_target(m, LOG_TARGET_NULL); | |
3094 | break; | |
3095 | ||
3096 | case MANAGER_SIGNAL_COMMAND_DUMP_JOBS: { | |
3097 | _cleanup_free_ char *dump_jobs = NULL; | |
3098 | ||
3099 | r = manager_get_dump_jobs_string(m, /* patterns= */ NULL, " ", &dump_jobs); | |
3100 | if (r < 0) { | |
3101 | log_warning_errno(r, "Failed to acquire manager jobs dump: %m"); | |
3102 | break; | |
3103 | } | |
3104 | ||
3105 | log_dump(LOG_INFO, dump_jobs); | |
3106 | break; | |
3107 | } | |
3108 | ||
3109 | default: | |
3110 | generic = true; | |
3111 | } | |
3112 | } | |
3113 | ||
3114 | if (generic) | |
3115 | return sigrtmin18_handler(source, &sfsi, NULL); | |
3116 | ||
3117 | break; | |
3118 | } | |
3119 | ||
3120 | case 20: | |
3121 | manager_override_show_status(m, SHOW_STATUS_YES, "signal"); | |
3122 | break; | |
3123 | ||
3124 | case 21: | |
3125 | manager_override_show_status(m, SHOW_STATUS_NO, "signal"); | |
3126 | break; | |
3127 | ||
3128 | case 22: | |
3129 | manager_override_log_level(m, LOG_DEBUG); | |
3130 | break; | |
3131 | ||
3132 | case 23: | |
3133 | manager_restore_original_log_level(m); | |
3134 | break; | |
3135 | ||
3136 | case 24: | |
3137 | if (MANAGER_IS_USER(m)) { | |
3138 | m->objective = MANAGER_EXIT; | |
3139 | return 0; | |
3140 | } | |
3141 | ||
3142 | /* This is a nop on init */ | |
3143 | break; | |
3144 | ||
3145 | case 25: | |
3146 | m->objective = MANAGER_REEXECUTE; | |
3147 | break; | |
3148 | ||
3149 | case 26: | |
3150 | case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */ | |
3151 | manager_restore_original_log_target(m); | |
3152 | break; | |
3153 | ||
3154 | case 27: | |
3155 | manager_override_log_target(m, LOG_TARGET_CONSOLE); | |
3156 | break; | |
3157 | ||
3158 | case 28: | |
3159 | manager_override_log_target(m, LOG_TARGET_KMSG); | |
3160 | break; | |
3161 | ||
3162 | default: | |
3163 | log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo)); | |
3164 | } | |
3165 | }} | |
3166 | ||
3167 | return 0; | |
3168 | } | |
3169 | ||
3170 | static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
3171 | Manager *m = ASSERT_PTR(userdata); | |
3172 | Unit *u; | |
3173 | ||
3174 | log_struct(LOG_DEBUG, | |
3175 | LOG_MESSAGE_ID(SD_MESSAGE_TIME_CHANGE_STR), | |
3176 | LOG_MESSAGE("Time has been changed")); | |
3177 | ||
3178 | /* Restart the watch */ | |
3179 | (void) manager_setup_time_change(m); | |
3180 | ||
3181 | HASHMAP_FOREACH(u, m->units) | |
3182 | if (UNIT_VTABLE(u)->time_change) | |
3183 | UNIT_VTABLE(u)->time_change(u); | |
3184 | ||
3185 | return 0; | |
3186 | } | |
3187 | ||
3188 | static int manager_dispatch_timezone_change( | |
3189 | sd_event_source *source, | |
3190 | const struct inotify_event *e, | |
3191 | void *userdata) { | |
3192 | ||
3193 | Manager *m = ASSERT_PTR(userdata); | |
3194 | int changed; | |
3195 | Unit *u; | |
3196 | ||
3197 | log_debug("inotify event for /etc/localtime"); | |
3198 | ||
3199 | changed = manager_read_timezone_stat(m); | |
3200 | if (changed <= 0) | |
3201 | return changed; | |
3202 | ||
3203 | /* Something changed, restart the watch, to ensure we watch the new /etc/localtime if it changed */ | |
3204 | (void) manager_setup_timezone_change(m); | |
3205 | ||
3206 | /* Read the new timezone */ | |
3207 | tzset(); | |
3208 | ||
3209 | log_debug("Timezone has been changed (now: %s).", tzname[daylight]); | |
3210 | ||
3211 | HASHMAP_FOREACH(u, m->units) | |
3212 | if (UNIT_VTABLE(u)->timezone_change) | |
3213 | UNIT_VTABLE(u)->timezone_change(u); | |
3214 | ||
3215 | return 0; | |
3216 | } | |
3217 | ||
3218 | static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
3219 | Manager *m = ASSERT_PTR(userdata); | |
3220 | ||
3221 | assert(m->idle_pipe[2] == fd); | |
3222 | ||
3223 | /* There's at least one Type=idle child that just gave up on us waiting for the boot process to | |
3224 | * complete. Let's now turn off any further console output if there's at least one service that needs | |
3225 | * console access, so that from now on our own output should not spill into that service's output | |
3226 | * anymore. After all, we support Type=idle only to beautify console output and it generally is set | |
3227 | * on services that want to own the console exclusively without our interference. */ | |
3228 | m->no_console_output = m->n_on_console > 0; | |
3229 | ||
3230 | /* Acknowledge the child's request, and let all other children know too that they shouldn't wait | |
3231 | * any longer by closing the pipes towards them, which is what they are waiting for. */ | |
3232 | manager_close_idle_pipe(m); | |
3233 | ||
3234 | return 0; | |
3235 | } | |
3236 | ||
3237 | static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata) { | |
3238 | Manager *m = ASSERT_PTR(userdata); | |
3239 | int r; | |
3240 | ||
3241 | assert(source); | |
3242 | ||
3243 | manager_print_jobs_in_progress(m); | |
3244 | ||
3245 | r = sd_event_source_set_time_relative(source, JOBS_IN_PROGRESS_PERIOD_USEC); | |
3246 | if (r < 0) | |
3247 | return r; | |
3248 | ||
3249 | return sd_event_source_set_enabled(source, SD_EVENT_ONESHOT); | |
3250 | } | |
3251 | ||
3252 | int manager_loop(Manager *m) { | |
3253 | RateLimit rl = { .interval = 1*USEC_PER_SEC, .burst = 50000 }; | |
3254 | int r; | |
3255 | ||
3256 | assert(m); | |
3257 | assert(m->objective == MANAGER_OK); /* Ensure manager_startup() has been called */ | |
3258 | ||
3259 | manager_check_finished(m); | |
3260 | ||
3261 | /* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */ | |
3262 | r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON); | |
3263 | if (r < 0) | |
3264 | return log_error_errno(r, "Failed to enable SIGCHLD event source: %m"); | |
3265 | ||
3266 | while (m->objective == MANAGER_OK) { | |
3267 | ||
3268 | if (!ratelimit_below(&rl)) { | |
3269 | /* Yay, something is going seriously wrong, pause a little */ | |
3270 | log_warning("Looping too fast. Throttling execution a little."); | |
3271 | sleep(1); | |
3272 | } | |
3273 | ||
3274 | (void) watchdog_ping(); | |
3275 | ||
3276 | if (manager_dispatch_load_queue(m) > 0) | |
3277 | continue; | |
3278 | ||
3279 | if (manager_dispatch_gc_job_queue(m) > 0) | |
3280 | continue; | |
3281 | ||
3282 | if (manager_dispatch_gc_unit_queue(m) > 0) | |
3283 | continue; | |
3284 | ||
3285 | if (manager_dispatch_cleanup_queue(m) > 0) | |
3286 | continue; | |
3287 | ||
3288 | if (manager_dispatch_cgroup_realize_queue(m) > 0) | |
3289 | continue; | |
3290 | ||
3291 | if (manager_dispatch_start_when_upheld_queue(m) > 0) | |
3292 | continue; | |
3293 | ||
3294 | if (manager_dispatch_stop_when_bound_queue(m) > 0) | |
3295 | continue; | |
3296 | ||
3297 | if (manager_dispatch_stop_when_unneeded_queue(m) > 0) | |
3298 | continue; | |
3299 | ||
3300 | if (manager_dispatch_release_resources_queue(m) > 0) | |
3301 | continue; | |
3302 | ||
3303 | if (manager_dispatch_stop_notify_queue(m) > 0) | |
3304 | continue; | |
3305 | ||
3306 | if (manager_dispatch_dbus_queue(m) > 0) | |
3307 | continue; | |
3308 | ||
3309 | /* Sleep for watchdog runtime wait time */ | |
3310 | r = sd_event_run(m->event, watchdog_runtime_wait(/* divisor= */ 2)); | |
3311 | if (r < 0) | |
3312 | return log_error_errno(r, "Failed to run event loop: %m"); | |
3313 | } | |
3314 | ||
3315 | return m->objective; | |
3316 | } | |
3317 | ||
3318 | int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u) { | |
3319 | _cleanup_free_ char *n = NULL; | |
3320 | sd_id128_t invocation_id; | |
3321 | Unit *u; | |
3322 | int r; | |
3323 | ||
3324 | assert(m); | |
3325 | assert(s); | |
3326 | assert(_u); | |
3327 | ||
3328 | r = unit_name_from_dbus_path(s, &n); | |
3329 | if (r < 0) | |
3330 | return r; | |
3331 | ||
3332 | /* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128-bit ID then | |
3333 | * we use it as invocation ID. */ | |
3334 | r = sd_id128_from_string(n, &invocation_id); | |
3335 | if (r >= 0) { | |
3336 | u = hashmap_get(m->units_by_invocation_id, &invocation_id); | |
3337 | if (u) { | |
3338 | *_u = u; | |
3339 | return 0; | |
3340 | } | |
3341 | ||
3342 | return sd_bus_error_setf(e, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID, | |
3343 | "No unit with the specified invocation ID " SD_ID128_FORMAT_STR " known.", | |
3344 | SD_ID128_FORMAT_VAL(invocation_id)); | |
3345 | } | |
3346 | ||
3347 | /* If this didn't work, we check if this is a unit name */ | |
3348 | if (!unit_name_is_valid(n, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) { | |
3349 | _cleanup_free_ char *nn = NULL; | |
3350 | ||
3351 | nn = cescape(n); | |
3352 | return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, | |
3353 | "Unit name %s is neither a valid invocation ID nor unit name.", strnull(nn)); | |
3354 | } | |
3355 | ||
3356 | r = manager_load_unit(m, n, NULL, e, &u); | |
3357 | if (r < 0) | |
3358 | return r; | |
3359 | ||
3360 | *_u = u; | |
3361 | return 0; | |
3362 | } | |
3363 | ||
3364 | int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) { | |
3365 | const char *p; | |
3366 | unsigned id; | |
3367 | Job *j; | |
3368 | int r; | |
3369 | ||
3370 | assert(m); | |
3371 | assert(s); | |
3372 | assert(_j); | |
3373 | ||
3374 | p = startswith(s, "/org/freedesktop/systemd1/job/"); | |
3375 | if (!p) | |
3376 | return -EINVAL; | |
3377 | ||
3378 | r = safe_atou(p, &id); | |
3379 | if (r < 0) | |
3380 | return r; | |
3381 | ||
3382 | j = manager_get_job(m, id); | |
3383 | if (!j) | |
3384 | return -ENOENT; | |
3385 | ||
3386 | *_j = j; | |
3387 | ||
3388 | return 0; | |
3389 | } | |
3390 | ||
3391 | void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) { | |
3392 | ||
3393 | #if HAVE_AUDIT | |
3394 | _cleanup_free_ char *p = NULL; | |
3395 | const char *msg; | |
3396 | int audit_fd, r; | |
3397 | ||
3398 | assert(m); | |
3399 | assert(u); | |
3400 | ||
3401 | if (!MANAGER_IS_SYSTEM(m)) | |
3402 | return; | |
3403 | ||
3404 | /* Don't generate audit events if the service was already started and we're just deserializing */ | |
3405 | if (MANAGER_IS_RELOADING(m)) | |
3406 | return; | |
3407 | ||
3408 | audit_fd = get_core_audit_fd(); | |
3409 | if (audit_fd < 0) | |
3410 | return; | |
3411 | ||
3412 | r = unit_name_to_prefix_and_instance(u->id, &p); | |
3413 | if (r < 0) { | |
3414 | log_warning_errno(r, "Failed to extract prefix and instance of unit name, ignoring: %m"); | |
3415 | return; | |
3416 | } | |
3417 | ||
3418 | msg = strjoina("unit=", p); | |
3419 | if (audit_log_user_comm_message(audit_fd, type, msg, "systemd", NULL, NULL, NULL, success) < 0) { | |
3420 | if (ERRNO_IS_PRIVILEGE(errno)) { | |
3421 | /* We aren't allowed to send audit messages? Then let's not retry again. */ | |
3422 | log_debug_errno(errno, "Failed to send audit message, closing audit socket: %m"); | |
3423 | close_core_audit_fd(); | |
3424 | } else | |
3425 | log_warning_errno(errno, "Failed to send audit message, ignoring: %m"); | |
3426 | } | |
3427 | #endif | |
3428 | } | |
3429 | ||
3430 | void manager_send_unit_plymouth(Manager *m, Unit *u) { | |
3431 | _cleanup_free_ char *message = NULL; | |
3432 | int c, r; | |
3433 | ||
3434 | assert(m); | |
3435 | assert(u); | |
3436 | ||
3437 | if (!MANAGER_IS_SYSTEM(m)) | |
3438 | return; | |
3439 | ||
3440 | /* Don't generate plymouth events if the service was already started and we're just deserializing */ | |
3441 | if (MANAGER_IS_RELOADING(m)) | |
3442 | return; | |
3443 | ||
3444 | if (detect_container() > 0) | |
3445 | return; | |
3446 | ||
3447 | if (!UNIT_VTABLE(u)->notify_plymouth) | |
3448 | return; | |
3449 | ||
3450 | c = asprintf(&message, "U\x02%c%s%c", (int) (strlen(u->id) + 1), u->id, '\x00'); | |
3451 | if (c < 0) | |
3452 | return (void) log_oom(); | |
3453 | ||
3454 | /* We set SOCK_NONBLOCK here so that we rather drop the message then wait for plymouth */ | |
3455 | r = plymouth_send_raw(message, c, SOCK_NONBLOCK); | |
3456 | if (r < 0) | |
3457 | log_full_errno(ERRNO_IS_NO_PLYMOUTH(r) ? LOG_DEBUG : LOG_WARNING, r, | |
3458 | "Failed to communicate with plymouth: %m"); | |
3459 | } | |
3460 | ||
3461 | void manager_send_unit_supervisor(Manager *m, Unit *u, bool active) { | |
3462 | assert(m); | |
3463 | assert(u); | |
3464 | ||
3465 | /* Notify a "supervisor" process about our progress, i.e. a container manager, hypervisor, or | |
3466 | * surrounding service manager. */ | |
3467 | ||
3468 | if (MANAGER_IS_RELOADING(m)) | |
3469 | return; | |
3470 | ||
3471 | if (!UNIT_VTABLE(u)->notify_supervisor) | |
3472 | return; | |
3473 | ||
3474 | if (in_initrd()) /* Only send these once we left the initrd */ | |
3475 | return; | |
3476 | ||
3477 | (void) sd_notifyf(/* unset_environment= */ false, | |
3478 | active ? "X_SYSTEMD_UNIT_ACTIVE=%s" : "X_SYSTEMD_UNIT_INACTIVE=%s", | |
3479 | u->id); | |
3480 | } | |
3481 | ||
3482 | usec_t manager_get_watchdog(Manager *m, WatchdogType t) { | |
3483 | assert(m); | |
3484 | ||
3485 | if (MANAGER_IS_USER(m)) | |
3486 | return USEC_INFINITY; | |
3487 | ||
3488 | if (m->watchdog_overridden[t] != USEC_INFINITY) | |
3489 | return m->watchdog_overridden[t]; | |
3490 | ||
3491 | return m->watchdog[t]; | |
3492 | } | |
3493 | ||
3494 | void manager_set_watchdog(Manager *m, WatchdogType t, usec_t timeout) { | |
3495 | ||
3496 | assert(m); | |
3497 | ||
3498 | if (MANAGER_IS_USER(m)) | |
3499 | return; | |
3500 | ||
3501 | if (m->watchdog_overridden[t] == USEC_INFINITY) { | |
3502 | if (t == WATCHDOG_RUNTIME) | |
3503 | (void) watchdog_setup(timeout); | |
3504 | else if (t == WATCHDOG_PRETIMEOUT) | |
3505 | (void) watchdog_setup_pretimeout(timeout); | |
3506 | } | |
3507 | ||
3508 | m->watchdog[t] = timeout; | |
3509 | } | |
3510 | ||
3511 | void manager_override_watchdog(Manager *m, WatchdogType t, usec_t timeout) { | |
3512 | usec_t usec; | |
3513 | ||
3514 | assert(m); | |
3515 | ||
3516 | if (MANAGER_IS_USER(m)) | |
3517 | return; | |
3518 | ||
3519 | usec = timeout == USEC_INFINITY ? m->watchdog[t] : timeout; | |
3520 | if (t == WATCHDOG_RUNTIME) | |
3521 | (void) watchdog_setup(usec); | |
3522 | else if (t == WATCHDOG_PRETIMEOUT) | |
3523 | (void) watchdog_setup_pretimeout(usec); | |
3524 | ||
3525 | m->watchdog_overridden[t] = timeout; | |
3526 | } | |
3527 | ||
3528 | int manager_set_watchdog_pretimeout_governor(Manager *m, const char *governor) { | |
3529 | _cleanup_free_ char *p = NULL; | |
3530 | int r; | |
3531 | ||
3532 | assert(m); | |
3533 | ||
3534 | if (MANAGER_IS_USER(m)) | |
3535 | return 0; | |
3536 | ||
3537 | if (streq_ptr(m->watchdog_pretimeout_governor, governor)) | |
3538 | return 0; | |
3539 | ||
3540 | p = strdup(governor); | |
3541 | if (!p) | |
3542 | return -ENOMEM; | |
3543 | ||
3544 | r = watchdog_setup_pretimeout_governor(governor); | |
3545 | if (r < 0) | |
3546 | return r; | |
3547 | ||
3548 | return free_and_replace(m->watchdog_pretimeout_governor, p); | |
3549 | } | |
3550 | ||
3551 | int manager_override_watchdog_pretimeout_governor(Manager *m, const char *governor) { | |
3552 | _cleanup_free_ char *p = NULL; | |
3553 | int r; | |
3554 | ||
3555 | assert(m); | |
3556 | ||
3557 | if (MANAGER_IS_USER(m)) | |
3558 | return 0; | |
3559 | ||
3560 | if (streq_ptr(m->watchdog_pretimeout_governor_overridden, governor)) | |
3561 | return 0; | |
3562 | ||
3563 | p = strdup(governor); | |
3564 | if (!p) | |
3565 | return -ENOMEM; | |
3566 | ||
3567 | r = watchdog_setup_pretimeout_governor(governor); | |
3568 | if (r < 0) | |
3569 | return r; | |
3570 | ||
3571 | return free_and_replace(m->watchdog_pretimeout_governor_overridden, p); | |
3572 | } | |
3573 | ||
3574 | int manager_reload(Manager *m) { | |
3575 | _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL; | |
3576 | _cleanup_fdset_free_ FDSet *fds = NULL; | |
3577 | _cleanup_fclose_ FILE *f = NULL; | |
3578 | int r; | |
3579 | ||
3580 | assert(m); | |
3581 | ||
3582 | r = manager_open_serialization(m, &f); | |
3583 | if (r < 0) | |
3584 | return log_error_errno(r, "Failed to create serialization file: %m"); | |
3585 | ||
3586 | fds = fdset_new(); | |
3587 | if (!fds) | |
3588 | return log_oom(); | |
3589 | ||
3590 | /* We are officially in reload mode from here on. */ | |
3591 | reloading = manager_reloading_start(m); | |
3592 | ||
3593 | r = manager_serialize(m, f, fds, false); | |
3594 | if (r < 0) | |
3595 | return r; | |
3596 | ||
3597 | r = finish_serialization_file(f); | |
3598 | if (r < 0) | |
3599 | return log_error_errno(r, "Failed to finish serialization: %m"); | |
3600 | ||
3601 | /* 💀 This is the point of no return, from here on there is no way back. 💀 */ | |
3602 | reloading = NULL; | |
3603 | ||
3604 | bus_manager_send_reloading(m, true); | |
3605 | ||
3606 | /* Start by flushing out all jobs and units, all generated units, all runtime environments, all dynamic users | |
3607 | * and everything else that is worth flushing out. We'll get it all back from the serialization — if we need | |
3608 | * it. */ | |
3609 | ||
3610 | manager_clear_jobs_and_units(m); | |
3611 | lookup_paths_flush_generator(&m->lookup_paths); | |
3612 | exec_shared_runtime_vacuum(m); | |
3613 | dynamic_user_vacuum(m, false); | |
3614 | m->uid_refs = hashmap_free(m->uid_refs); | |
3615 | m->gid_refs = hashmap_free(m->gid_refs); | |
3616 | ||
3617 | (void) manager_run_environment_generators(m); | |
3618 | (void) manager_run_generators(m); | |
3619 | ||
3620 | /* We flushed out generated files, for which we don't watch mtime, so we should flush the old map. */ | |
3621 | manager_free_unit_name_maps(m); | |
3622 | m->unit_file_state_outdated = false; | |
3623 | ||
3624 | /* First, enumerate what we can from kernel and suchlike */ | |
3625 | manager_enumerate_perpetual(m); | |
3626 | manager_enumerate(m); | |
3627 | ||
3628 | /* Second, deserialize our stored data */ | |
3629 | r = manager_deserialize(m, f, fds); | |
3630 | if (r < 0) | |
3631 | log_warning_errno(r, "Deserialization failed, proceeding anyway: %m"); | |
3632 | ||
3633 | /* We don't need the serialization anymore */ | |
3634 | f = safe_fclose(f); | |
3635 | ||
3636 | /* Re-register notify_fd as event source, and set up other sockets/communication channels we might need */ | |
3637 | (void) manager_setup_notify(m); | |
3638 | (void) manager_setup_user_lookup_fd(m); | |
3639 | (void) manager_setup_handoff_timestamp_fd(m); | |
3640 | (void) manager_setup_pidref_transport_fd(m); | |
3641 | ||
3642 | /* Clean up deserialized bus track information. They're never consumed during reload (as opposed to | |
3643 | * reexec) since we do not disconnect from the bus. */ | |
3644 | m->subscribed_as_strv = strv_free(m->subscribed_as_strv); | |
3645 | m->deserialized_bus_id = SD_ID128_NULL; | |
3646 | ||
3647 | /* Third, fire things up! */ | |
3648 | manager_coldplug(m); | |
3649 | ||
3650 | /* Clean up runtime objects no longer referenced */ | |
3651 | manager_vacuum(m); | |
3652 | ||
3653 | /* Consider the reload process complete now. */ | |
3654 | assert(m->n_reloading > 0); | |
3655 | m->n_reloading--; | |
3656 | ||
3657 | manager_ready(m); | |
3658 | ||
3659 | m->send_reloading_done = true; | |
3660 | return 0; | |
3661 | } | |
3662 | ||
3663 | void manager_reset_failed(Manager *m) { | |
3664 | Unit *u; | |
3665 | ||
3666 | assert(m); | |
3667 | ||
3668 | HASHMAP_FOREACH(u, m->units) | |
3669 | unit_reset_failed(u); | |
3670 | } | |
3671 | ||
3672 | bool manager_unit_inactive_or_pending(Manager *m, const char *name) { | |
3673 | Unit *u; | |
3674 | ||
3675 | assert(m); | |
3676 | assert(name); | |
3677 | ||
3678 | /* Returns true if the unit is inactive or going down */ | |
3679 | u = manager_get_unit(m, name); | |
3680 | if (!u) | |
3681 | return true; | |
3682 | ||
3683 | return unit_inactive_or_pending(u); | |
3684 | } | |
3685 | ||
3686 | static void log_taint_string(Manager *m) { | |
3687 | assert(m); | |
3688 | ||
3689 | if (MANAGER_IS_USER(m) || m->taint_logged) | |
3690 | return; | |
3691 | ||
3692 | m->taint_logged = true; /* only check for taint once */ | |
3693 | ||
3694 | _cleanup_free_ char *taint = taint_string(); | |
3695 | if (isempty(taint)) | |
3696 | return; | |
3697 | ||
3698 | log_struct(LOG_NOTICE, | |
3699 | LOG_MESSAGE("System is tainted: %s", taint), | |
3700 | LOG_ITEM("TAINT=%s", taint), | |
3701 | LOG_MESSAGE_ID(SD_MESSAGE_TAINTED_STR)); | |
3702 | } | |
3703 | ||
3704 | static void manager_notify_finished(Manager *m) { | |
3705 | usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec; | |
3706 | ||
3707 | if (MANAGER_IS_TEST_RUN(m)) | |
3708 | return; | |
3709 | ||
3710 | if (MANAGER_IS_SYSTEM(m) && m->soft_reboots_count > 0) { | |
3711 | /* The soft-reboot case, where we only report data for the last reboot */ | |
3712 | firmware_usec = loader_usec = initrd_usec = kernel_usec = 0; | |
3713 | total_usec = userspace_usec = usec_sub_unsigned(m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic, | |
3714 | m->timestamps[MANAGER_TIMESTAMP_SHUTDOWN_START].monotonic); | |
3715 | ||
3716 | log_struct(LOG_INFO, | |
3717 | LOG_MESSAGE_ID(SD_MESSAGE_STARTUP_FINISHED_STR), | |
3718 | LOG_ITEM("USERSPACE_USEC="USEC_FMT, userspace_usec), | |
3719 | LOG_MESSAGE("Soft-reboot finished in %s, counter is now at %u.", | |
3720 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC), | |
3721 | m->soft_reboots_count)); | |
3722 | } else if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) { | |
3723 | char buf[FORMAT_TIMESPAN_MAX + STRLEN(" (firmware) + ") + FORMAT_TIMESPAN_MAX + STRLEN(" (loader) + ")] | |
3724 | = {}; | |
3725 | char *p = buf; | |
3726 | size_t size = sizeof buf; | |
3727 | ||
3728 | /* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and | |
3729 | * MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered | |
3730 | * negative values. */ | |
3731 | ||
3732 | firmware_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic - m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic; | |
3733 | loader_usec = m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic; | |
3734 | userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic; | |
3735 | total_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic + m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic; | |
3736 | ||
3737 | if (firmware_usec > 0) | |
3738 | size = strpcpyf(&p, size, "%s (firmware) + ", FORMAT_TIMESPAN(firmware_usec, USEC_PER_MSEC)); | |
3739 | if (loader_usec > 0) | |
3740 | size = strpcpyf(&p, size, "%s (loader) + ", FORMAT_TIMESPAN(loader_usec, USEC_PER_MSEC)); | |
3741 | ||
3742 | if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) { | |
3743 | ||
3744 | /* The initrd case on bare-metal */ | |
3745 | kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic; | |
3746 | initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic; | |
3747 | ||
3748 | log_struct(LOG_INFO, | |
3749 | LOG_MESSAGE_ID(SD_MESSAGE_STARTUP_FINISHED_STR), | |
3750 | LOG_ITEM("KERNEL_USEC="USEC_FMT, kernel_usec), | |
3751 | LOG_ITEM("INITRD_USEC="USEC_FMT, initrd_usec), | |
3752 | LOG_ITEM("USERSPACE_USEC="USEC_FMT, userspace_usec), | |
3753 | LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (initrd) + %s (userspace) = %s.", | |
3754 | buf, | |
3755 | FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC), | |
3756 | FORMAT_TIMESPAN(initrd_usec, USEC_PER_MSEC), | |
3757 | FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC), | |
3758 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC))); | |
3759 | } else { | |
3760 | /* The initrd-less case on bare-metal */ | |
3761 | ||
3762 | kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic; | |
3763 | initrd_usec = 0; | |
3764 | ||
3765 | log_struct(LOG_INFO, | |
3766 | LOG_MESSAGE_ID(SD_MESSAGE_STARTUP_FINISHED_STR), | |
3767 | LOG_ITEM("KERNEL_USEC="USEC_FMT, kernel_usec), | |
3768 | LOG_ITEM("USERSPACE_USEC="USEC_FMT, userspace_usec), | |
3769 | LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (userspace) = %s.", | |
3770 | buf, | |
3771 | FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC), | |
3772 | FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC), | |
3773 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC))); | |
3774 | } | |
3775 | } else { | |
3776 | /* The container and --user case */ | |
3777 | firmware_usec = loader_usec = initrd_usec = kernel_usec = 0; | |
3778 | total_usec = userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic; | |
3779 | ||
3780 | log_struct(LOG_INFO, | |
3781 | LOG_MESSAGE_ID(SD_MESSAGE_USER_STARTUP_FINISHED_STR), | |
3782 | LOG_ITEM("USERSPACE_USEC="USEC_FMT, userspace_usec), | |
3783 | LOG_MESSAGE("Startup finished in %s.", | |
3784 | FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC))); | |
3785 | } | |
3786 | ||
3787 | bus_manager_send_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec); | |
3788 | ||
3789 | if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) | |
3790 | watchdog_report_if_missing(); | |
3791 | ||
3792 | log_taint_string(m); | |
3793 | } | |
3794 | ||
3795 | static void manager_send_ready_on_basic_target(Manager *m) { | |
3796 | int r; | |
3797 | ||
3798 | assert(m); | |
3799 | ||
3800 | /* We send READY=1 on reaching basic.target only when running in --user mode. */ | |
3801 | if (!MANAGER_IS_USER(m) || m->ready_sent) | |
3802 | return; | |
3803 | ||
3804 | r = sd_notify(/* unset_environment= */ false, | |
3805 | "READY=1\n" | |
3806 | "STATUS=Reached " SPECIAL_BASIC_TARGET "."); | |
3807 | if (r < 0) | |
3808 | log_warning_errno(r, "Failed to send readiness notification, ignoring: %m"); | |
3809 | ||
3810 | m->ready_sent = true; | |
3811 | m->status_ready = false; | |
3812 | } | |
3813 | ||
3814 | static void manager_send_ready_on_idle(Manager *m) { | |
3815 | int r; | |
3816 | ||
3817 | assert(m); | |
3818 | ||
3819 | /* Skip the notification if nothing changed. */ | |
3820 | if (m->ready_sent && m->status_ready) | |
3821 | return; | |
3822 | ||
3823 | /* Note that for user managers, we might have already sent READY=1 in manager_send_ready_user_scope(). | |
3824 | * But we still need to flush STATUS=. The second READY=1 will be treated as a noop so it doesn't | |
3825 | * hurt to send it twice. */ | |
3826 | r = sd_notify(/* unset_environment= */ false, | |
3827 | "READY=1\n" | |
3828 | "STATUS=Ready."); | |
3829 | if (r < 0) | |
3830 | log_full_errno(m->ready_sent ? LOG_DEBUG : LOG_WARNING, r, | |
3831 | "Failed to send readiness notification, ignoring: %m"); | |
3832 | ||
3833 | m->ready_sent = m->status_ready = true; | |
3834 | } | |
3835 | ||
3836 | static void manager_check_basic_target(Manager *m) { | |
3837 | Unit *u; | |
3838 | ||
3839 | assert(m); | |
3840 | ||
3841 | /* Small shortcut */ | |
3842 | if (m->ready_sent && m->taint_logged) | |
3843 | return; | |
3844 | ||
3845 | u = manager_get_unit(m, SPECIAL_BASIC_TARGET); | |
3846 | if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) | |
3847 | return; | |
3848 | ||
3849 | /* For user managers, send out READY=1 as soon as we reach basic.target */ | |
3850 | manager_send_ready_on_basic_target(m); | |
3851 | ||
3852 | /* Log the taint string as soon as we reach basic.target */ | |
3853 | log_taint_string(m); | |
3854 | } | |
3855 | ||
3856 | void manager_check_finished(Manager *m) { | |
3857 | assert(m); | |
3858 | ||
3859 | if (MANAGER_IS_RELOADING(m)) | |
3860 | return; | |
3861 | ||
3862 | /* Verify that we have entered the event loop already, and not left it again. */ | |
3863 | if (!MANAGER_IS_RUNNING(m)) | |
3864 | return; | |
3865 | ||
3866 | manager_check_basic_target(m); | |
3867 | ||
3868 | if (!hashmap_isempty(m->jobs)) { | |
3869 | if (m->jobs_in_progress_event_source) | |
3870 | /* Ignore any failure, this is only for feedback */ | |
3871 | (void) sd_event_source_set_time(m->jobs_in_progress_event_source, | |
3872 | manager_watch_jobs_next_time(m)); | |
3873 | return; | |
3874 | } | |
3875 | ||
3876 | /* The jobs hashmap tends to grow a lot during boot, and then it's not reused until shutdown. Let's | |
3877 | kill the hashmap if it is relatively large. */ | |
3878 | if (hashmap_buckets(m->jobs) > hashmap_size(m->units) / 10) | |
3879 | m->jobs = hashmap_free(m->jobs); | |
3880 | ||
3881 | manager_send_ready_on_idle(m); | |
3882 | ||
3883 | /* Notify Type=idle units that we are done now */ | |
3884 | manager_close_idle_pipe(m); | |
3885 | ||
3886 | if (MANAGER_IS_FINISHED(m)) | |
3887 | return; | |
3888 | ||
3889 | manager_flip_auto_status(m, false, "boot finished"); | |
3890 | ||
3891 | /* Turn off confirm spawn now */ | |
3892 | m->confirm_spawn = NULL; | |
3893 | ||
3894 | /* No need to update ask password status when we're going non-interactive */ | |
3895 | manager_close_ask_password(m); | |
3896 | ||
3897 | /* This is no longer the first boot */ | |
3898 | manager_set_first_boot(m, false); | |
3899 | ||
3900 | dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_FINISH); | |
3901 | ||
3902 | manager_notify_finished(m); | |
3903 | ||
3904 | manager_invalidate_startup_units(m); | |
3905 | } | |
3906 | ||
3907 | void manager_send_reloading(Manager *m) { | |
3908 | assert(m); | |
3909 | ||
3910 | /* Let whoever invoked us know that we are now reloading */ | |
3911 | (void) notify_reloading_full(/* status = */ NULL); | |
3912 | ||
3913 | /* And ensure that we'll send READY=1 again as soon as we are ready again */ | |
3914 | m->ready_sent = false; | |
3915 | } | |
3916 | ||
3917 | static bool generator_path_any(char * const *paths) { | |
3918 | ||
3919 | /* Optimize by skipping the whole process by not creating output directories if no generators are found. */ | |
3920 | ||
3921 | STRV_FOREACH(i, paths) { | |
3922 | if (access(*i, F_OK) >= 0) | |
3923 | return true; | |
3924 | if (errno != ENOENT) | |
3925 | log_warning_errno(errno, "Failed to check if generator dir '%s' exists, assuming not: %m", *i); | |
3926 | } | |
3927 | ||
3928 | return false; | |
3929 | } | |
3930 | ||
3931 | static int manager_run_environment_generators(Manager *m) { | |
3932 | _cleanup_strv_free_ char **paths = NULL; | |
3933 | int r; | |
3934 | ||
3935 | assert(m); | |
3936 | ||
3937 | if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_ENV_GENERATORS)) | |
3938 | return 0; | |
3939 | ||
3940 | paths = env_generator_binary_paths(m->runtime_scope); | |
3941 | if (!paths) | |
3942 | return log_oom(); | |
3943 | ||
3944 | if (!generator_path_any(paths)) | |
3945 | return 0; | |
3946 | ||
3947 | char **tmp = NULL; /* this is only used in the forked process, no cleanup here */ | |
3948 | void *args[_STDOUT_CONSUME_MAX] = { | |
3949 | [STDOUT_GENERATE] = &tmp, | |
3950 | [STDOUT_COLLECT] = &tmp, | |
3951 | [STDOUT_CONSUME] = &m->transient_environment, | |
3952 | }; | |
3953 | ||
3954 | WITH_UMASK(0022) | |
3955 | r = execute_directories( | |
3956 | (const char* const*) paths, | |
3957 | DEFAULT_TIMEOUT_USEC, | |
3958 | gather_environment, | |
3959 | args, | |
3960 | /* argv[]= */ NULL, | |
3961 | m->transient_environment, | |
3962 | EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID); | |
3963 | return r; | |
3964 | } | |
3965 | ||
3966 | static int build_generator_environment(Manager *m, char ***ret) { | |
3967 | _cleanup_strv_free_ char **nl = NULL; | |
3968 | Virtualization v; | |
3969 | ConfidentialVirtualization cv; | |
3970 | int r; | |
3971 | ||
3972 | assert(m); | |
3973 | assert(ret); | |
3974 | ||
3975 | /* Generators oftentimes want to know some basic facts about the environment they run in, in order to | |
3976 | * adjust generated units to that. Let's pass down some bits of information that are easy for us to | |
3977 | * determine (but a bit harder for generator scripts to determine), as environment variables. */ | |
3978 | ||
3979 | nl = strv_copy(m->transient_environment); | |
3980 | if (!nl) | |
3981 | return -ENOMEM; | |
3982 | ||
3983 | r = strv_env_assign(&nl, "SYSTEMD_SCOPE", runtime_scope_to_string(m->runtime_scope)); | |
3984 | if (r < 0) | |
3985 | return r; | |
3986 | ||
3987 | if (MANAGER_IS_SYSTEM(m)) { | |
3988 | /* Note that $SYSTEMD_IN_INITRD may be used to override the initrd detection in much of our | |
3989 | * codebase. This is hence more than purely informational. It will shortcut detection of the | |
3990 | * initrd state if generators invoke our own tools. But that's OK, as it would come to the | |
3991 | * same results (hopefully). */ | |
3992 | r = strv_env_assign(&nl, "SYSTEMD_IN_INITRD", one_zero(in_initrd())); | |
3993 | if (r < 0) | |
3994 | return r; | |
3995 | ||
3996 | if (m->soft_reboots_count > 0) { | |
3997 | r = strv_env_assignf(&nl, "SYSTEMD_SOFT_REBOOTS_COUNT", "%u", m->soft_reboots_count); | |
3998 | if (r < 0) | |
3999 | return r; | |
4000 | } | |
4001 | ||
4002 | if (m->first_boot >= 0) { | |
4003 | r = strv_env_assign(&nl, "SYSTEMD_FIRST_BOOT", one_zero(m->first_boot)); | |
4004 | if (r < 0) | |
4005 | return r; | |
4006 | } | |
4007 | } | |
4008 | ||
4009 | v = detect_virtualization(); | |
4010 | if (v < 0) | |
4011 | log_debug_errno(v, "Failed to detect virtualization, ignoring: %m"); | |
4012 | else if (v > 0) { | |
4013 | const char *s; | |
4014 | ||
4015 | s = strjoina(VIRTUALIZATION_IS_VM(v) ? "vm:" : | |
4016 | VIRTUALIZATION_IS_CONTAINER(v) ? "container:" : ":", | |
4017 | virtualization_to_string(v)); | |
4018 | ||
4019 | r = strv_env_assign(&nl, "SYSTEMD_VIRTUALIZATION", s); | |
4020 | if (r < 0) | |
4021 | return r; | |
4022 | } | |
4023 | ||
4024 | cv = detect_confidential_virtualization(); | |
4025 | if (cv < 0) | |
4026 | log_debug_errno(cv, "Failed to detect confidential virtualization, ignoring: %m"); | |
4027 | else if (cv > 0) { | |
4028 | r = strv_env_assign(&nl, "SYSTEMD_CONFIDENTIAL_VIRTUALIZATION", confidential_virtualization_to_string(cv)); | |
4029 | if (r < 0) | |
4030 | return r; | |
4031 | } | |
4032 | ||
4033 | r = strv_env_assign(&nl, "SYSTEMD_ARCHITECTURE", architecture_to_string(uname_architecture())); | |
4034 | if (r < 0) | |
4035 | return r; | |
4036 | ||
4037 | *ret = TAKE_PTR(nl); | |
4038 | return 0; | |
4039 | } | |
4040 | ||
4041 | static int manager_execute_generators(Manager *m, char * const *paths, bool remount_ro) { | |
4042 | _cleanup_strv_free_ char **ge = NULL; | |
4043 | int r; | |
4044 | ||
4045 | assert(m); | |
4046 | ||
4047 | r = build_generator_environment(m, &ge); | |
4048 | if (r < 0) | |
4049 | return log_error_errno(r, "Failed to build generator environment: %m"); | |
4050 | ||
4051 | if (remount_ro) { | |
4052 | /* Remount most of the filesystem tree read-only. We leave /sys/ as-is, because our code | |
4053 | * checks whether it is read-only to detect containerized execution environments. We leave | |
4054 | * /run/ as-is too, because that's where our output goes. We also leave /proc/ and /dev/shm/ | |
4055 | * because they're API, and /tmp/ that safe_fork() mounted for us. | |
4056 | */ | |
4057 | r = bind_remount_recursive("/", MS_RDONLY, MS_RDONLY, | |
4058 | STRV_MAKE("/sys", "/run", "/proc", "/dev/shm", "/tmp")); | |
4059 | if (r < 0) | |
4060 | log_warning_errno(r, "Read-only bind remount failed, ignoring: %m"); | |
4061 | } | |
4062 | ||
4063 | const char *argv[] = { | |
4064 | NULL, /* Leave this empty, execute_directory() will fill something in */ | |
4065 | m->lookup_paths.generator, | |
4066 | m->lookup_paths.generator_early, | |
4067 | m->lookup_paths.generator_late, | |
4068 | NULL, | |
4069 | }; | |
4070 | ||
4071 | BLOCK_WITH_UMASK(0022); | |
4072 | return execute_directories( | |
4073 | (const char* const*) paths, | |
4074 | DEFAULT_TIMEOUT_USEC, | |
4075 | /* callbacks= */ NULL, /* callback_args= */ NULL, | |
4076 | (char**) argv, | |
4077 | ge, | |
4078 | EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID | EXEC_DIR_WARN_WORLD_WRITABLE); | |
4079 | } | |
4080 | ||
4081 | static int manager_run_generators(Manager *m) { | |
4082 | ForkFlags flags = FORK_RESET_SIGNALS | FORK_WAIT | FORK_NEW_MOUNTNS | FORK_MOUNTNS_SLAVE; | |
4083 | _cleanup_strv_free_ char **paths = NULL; | |
4084 | int r; | |
4085 | ||
4086 | assert(m); | |
4087 | ||
4088 | if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_GENERATORS)) | |
4089 | return 0; | |
4090 | ||
4091 | paths = generator_binary_paths(m->runtime_scope); | |
4092 | if (!paths) | |
4093 | return log_oom(); | |
4094 | ||
4095 | if (!generator_path_any(paths)) | |
4096 | return 0; | |
4097 | ||
4098 | r = lookup_paths_mkdir_generator(&m->lookup_paths); | |
4099 | if (r < 0) { | |
4100 | log_error_errno(r, "Failed to create generator directories: %m"); | |
4101 | goto finish; | |
4102 | } | |
4103 | ||
4104 | /* If we are the system manager, we fork and invoke the generators in a sanitized mount namespace. If | |
4105 | * we are the user manager, let's just execute the generators directly. We might not have the | |
4106 | * necessary privileges, and the system manager has already mounted /tmp/ and everything else for us. | |
4107 | */ | |
4108 | if (MANAGER_IS_USER(m)) { | |
4109 | r = manager_execute_generators(m, paths, /* remount_ro= */ false); | |
4110 | goto finish; | |
4111 | } | |
4112 | ||
4113 | /* On some systems /tmp/ doesn't exist, and on some other systems we cannot create it at all. Avoid | |
4114 | * trying to mount a private tmpfs on it as there's no one size fits all. */ | |
4115 | if (is_dir("/tmp", /* follow= */ false) > 0 && !MANAGER_IS_TEST_RUN(m)) | |
4116 | flags |= FORK_PRIVATE_TMP; | |
4117 | ||
4118 | r = safe_fork("(sd-gens)", flags, NULL); | |
4119 | if (r == 0) { | |
4120 | r = manager_execute_generators(m, paths, /* remount_ro= */ true); | |
4121 | _exit(r >= 0 ? EXIT_SUCCESS : EXIT_FAILURE); | |
4122 | } | |
4123 | if (r < 0) { | |
4124 | if (!ERRNO_IS_PRIVILEGE(r) && r != -EINVAL) { | |
4125 | log_error_errno(r, "Failed to fork off sandboxing environment for executing generators: %m"); | |
4126 | goto finish; | |
4127 | } | |
4128 | ||
4129 | /* Failed to fork with new mount namespace? Maybe, running in a container environment with | |
4130 | * seccomp or without capability. | |
4131 | * | |
4132 | * We also allow -EINVAL to allow running without CLONE_NEWNS. | |
4133 | * | |
4134 | * Also, when running on non-native userland architecture via systemd-nspawn and | |
4135 | * qemu-user-static QEMU-emulator, clone() with CLONE_NEWNS fails with EINVAL, see | |
4136 | * https://github.com/systemd/systemd/issues/28901. | |
4137 | */ | |
4138 | log_debug_errno(r, | |
4139 | "Failed to fork off sandboxing environment for executing generators. " | |
4140 | "Falling back to execute generators without sandboxing: %m"); | |
4141 | r = manager_execute_generators(m, paths, /* remount_ro= */ false); | |
4142 | } | |
4143 | ||
4144 | finish: | |
4145 | lookup_paths_trim_generator(&m->lookup_paths); | |
4146 | return r; | |
4147 | } | |
4148 | ||
4149 | int manager_transient_environment_add(Manager *m, char **plus) { | |
4150 | char **a; | |
4151 | ||
4152 | assert(m); | |
4153 | ||
4154 | if (strv_isempty(plus)) | |
4155 | return 0; | |
4156 | ||
4157 | a = strv_env_merge(m->transient_environment, plus); | |
4158 | if (!a) | |
4159 | return log_oom(); | |
4160 | ||
4161 | sanitize_environment(a); | |
4162 | ||
4163 | return strv_free_and_replace(m->transient_environment, a); | |
4164 | } | |
4165 | ||
4166 | int manager_client_environment_modify( | |
4167 | Manager *m, | |
4168 | char **minus, | |
4169 | char **plus) { | |
4170 | ||
4171 | char **a = NULL, **b = NULL, **l; | |
4172 | ||
4173 | assert(m); | |
4174 | ||
4175 | if (strv_isempty(minus) && strv_isempty(plus)) | |
4176 | return 0; | |
4177 | ||
4178 | l = m->client_environment; | |
4179 | ||
4180 | if (!strv_isempty(minus)) { | |
4181 | a = strv_env_delete(l, 1, minus); | |
4182 | if (!a) | |
4183 | return -ENOMEM; | |
4184 | ||
4185 | l = a; | |
4186 | } | |
4187 | ||
4188 | if (!strv_isempty(plus)) { | |
4189 | b = strv_env_merge(l, plus); | |
4190 | if (!b) { | |
4191 | strv_free(a); | |
4192 | return -ENOMEM; | |
4193 | } | |
4194 | ||
4195 | l = b; | |
4196 | } | |
4197 | ||
4198 | if (m->client_environment != l) | |
4199 | strv_free(m->client_environment); | |
4200 | ||
4201 | if (a != l) | |
4202 | strv_free(a); | |
4203 | if (b != l) | |
4204 | strv_free(b); | |
4205 | ||
4206 | m->client_environment = sanitize_environment(l); | |
4207 | return 0; | |
4208 | } | |
4209 | ||
4210 | int manager_get_effective_environment(Manager *m, char ***ret) { | |
4211 | char **l; | |
4212 | ||
4213 | assert(m); | |
4214 | assert(ret); | |
4215 | ||
4216 | l = strv_env_merge(m->transient_environment, m->client_environment); | |
4217 | if (!l) | |
4218 | return -ENOMEM; | |
4219 | ||
4220 | *ret = l; | |
4221 | return 0; | |
4222 | } | |
4223 | ||
4224 | int manager_set_unit_defaults(Manager *m, const UnitDefaults *defaults) { | |
4225 | _cleanup_free_ char *label = NULL; | |
4226 | struct rlimit *rlimit[_RLIMIT_MAX]; | |
4227 | int r; | |
4228 | ||
4229 | assert(m); | |
4230 | assert(defaults); | |
4231 | ||
4232 | if (streq_ptr(defaults->smack_process_label, "/")) | |
4233 | label = NULL; | |
4234 | else { | |
4235 | const char *l = defaults->smack_process_label; | |
4236 | #ifdef SMACK_DEFAULT_PROCESS_LABEL | |
4237 | if (!l) | |
4238 | l = SMACK_DEFAULT_PROCESS_LABEL; | |
4239 | #endif | |
4240 | if (l) { | |
4241 | label = strdup(l); | |
4242 | if (!label) | |
4243 | return -ENOMEM; | |
4244 | } else | |
4245 | label = NULL; | |
4246 | } | |
4247 | ||
4248 | r = rlimit_copy_all(rlimit, defaults->rlimit); | |
4249 | if (r < 0) | |
4250 | return r; | |
4251 | ||
4252 | m->defaults.std_output = defaults->std_output; | |
4253 | m->defaults.std_error = defaults->std_error; | |
4254 | ||
4255 | m->defaults.restart_usec = defaults->restart_usec; | |
4256 | m->defaults.timeout_start_usec = defaults->timeout_start_usec; | |
4257 | m->defaults.timeout_stop_usec = defaults->timeout_stop_usec; | |
4258 | m->defaults.timeout_abort_usec = defaults->timeout_abort_usec; | |
4259 | m->defaults.timeout_abort_set = defaults->timeout_abort_set; | |
4260 | m->defaults.device_timeout_usec = defaults->device_timeout_usec; | |
4261 | ||
4262 | m->defaults.start_limit = defaults->start_limit; | |
4263 | ||
4264 | m->defaults.memory_accounting = defaults->memory_accounting; | |
4265 | m->defaults.io_accounting = defaults->io_accounting; | |
4266 | m->defaults.tasks_accounting = defaults->tasks_accounting; | |
4267 | m->defaults.ip_accounting = defaults->ip_accounting; | |
4268 | ||
4269 | m->defaults.tasks_max = defaults->tasks_max; | |
4270 | m->defaults.timer_accuracy_usec = defaults->timer_accuracy_usec; | |
4271 | ||
4272 | m->defaults.oom_policy = defaults->oom_policy; | |
4273 | m->defaults.oom_score_adjust = defaults->oom_score_adjust; | |
4274 | m->defaults.oom_score_adjust_set = defaults->oom_score_adjust_set; | |
4275 | ||
4276 | m->defaults.memory_pressure_watch = defaults->memory_pressure_watch; | |
4277 | m->defaults.memory_pressure_threshold_usec = defaults->memory_pressure_threshold_usec; | |
4278 | ||
4279 | free_and_replace(m->defaults.smack_process_label, label); | |
4280 | rlimit_free_all(m->defaults.rlimit); | |
4281 | memcpy(m->defaults.rlimit, rlimit, sizeof(struct rlimit*) * _RLIMIT_MAX); | |
4282 | ||
4283 | return 0; | |
4284 | } | |
4285 | ||
4286 | void manager_recheck_dbus(Manager *m) { | |
4287 | assert(m); | |
4288 | ||
4289 | /* Connects to the bus if the dbus service and socket are running. If we are running in user mode | |
4290 | * this is all it does. In system mode we'll also connect to the system bus (which will most likely | |
4291 | * just reuse the connection of the API bus). That's because the system bus after all runs as service | |
4292 | * of the system instance, while in the user instance we can assume it's already there. */ | |
4293 | ||
4294 | if (MANAGER_IS_RELOADING(m)) | |
4295 | return; /* don't check while we are reloading… */ | |
4296 | ||
4297 | if (manager_dbus_is_running(m, false)) { | |
4298 | (void) bus_init_api(m); | |
4299 | ||
4300 | if (MANAGER_IS_SYSTEM(m)) | |
4301 | (void) bus_init_system(m); | |
4302 | } else { | |
4303 | (void) bus_done_api(m); | |
4304 | ||
4305 | if (MANAGER_IS_SYSTEM(m)) | |
4306 | (void) bus_done_system(m); | |
4307 | } | |
4308 | } | |
4309 | ||
4310 | static bool manager_journal_is_running(Manager *m) { | |
4311 | Unit *u; | |
4312 | ||
4313 | assert(m); | |
4314 | ||
4315 | if (MANAGER_IS_TEST_RUN(m)) | |
4316 | return false; | |
4317 | ||
4318 | /* If we are the user manager we can safely assume that the journal is up */ | |
4319 | if (!MANAGER_IS_SYSTEM(m)) | |
4320 | return true; | |
4321 | ||
4322 | /* Check that the socket is not only up, but in RUNNING state */ | |
4323 | u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET); | |
4324 | if (!u) | |
4325 | return false; | |
4326 | if (SOCKET(u)->state != SOCKET_RUNNING) | |
4327 | return false; | |
4328 | ||
4329 | /* Similar, check if the daemon itself is fully up, too */ | |
4330 | u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE); | |
4331 | if (!u) | |
4332 | return false; | |
4333 | if (!IN_SET(SERVICE(u)->state, SERVICE_RELOAD, SERVICE_RUNNING)) | |
4334 | return false; | |
4335 | ||
4336 | return true; | |
4337 | } | |
4338 | ||
4339 | void disable_printk_ratelimit(void) { | |
4340 | /* Disable kernel's printk ratelimit. | |
4341 | * | |
4342 | * Logging to /dev/kmsg is most useful during early boot and shutdown, where normal logging | |
4343 | * mechanisms are not available. The semantics of this sysctl are such that any kernel command-line | |
4344 | * setting takes precedence. */ | |
4345 | int r; | |
4346 | ||
4347 | r = sysctl_write("kernel/printk_devkmsg", "on"); | |
4348 | if (r < 0) | |
4349 | log_debug_errno(r, "Failed to set sysctl kernel.printk_devkmsg=on: %m"); | |
4350 | } | |
4351 | ||
4352 | void manager_recheck_journal(Manager *m) { | |
4353 | ||
4354 | assert(m); | |
4355 | ||
4356 | /* Don't bother with this unless we are in the special situation of being PID 1 */ | |
4357 | if (getpid_cached() != 1) | |
4358 | return; | |
4359 | ||
4360 | /* Don't check this while we are reloading, things might still change */ | |
4361 | if (MANAGER_IS_RELOADING(m)) | |
4362 | return; | |
4363 | ||
4364 | /* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. If | |
4365 | * the journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we | |
4366 | * might trigger an activation ourselves we can't fulfill. */ | |
4367 | log_set_prohibit_ipc(!manager_journal_is_running(m)); | |
4368 | log_open(); | |
4369 | } | |
4370 | ||
4371 | static ShowStatus manager_get_show_status(Manager *m) { | |
4372 | assert(m); | |
4373 | ||
4374 | if (MANAGER_IS_USER(m)) | |
4375 | return _SHOW_STATUS_INVALID; | |
4376 | ||
4377 | if (m->show_status_overridden != _SHOW_STATUS_INVALID) | |
4378 | return m->show_status_overridden; | |
4379 | ||
4380 | return m->show_status; | |
4381 | } | |
4382 | ||
4383 | bool manager_get_show_status_on(Manager *m) { | |
4384 | assert(m); | |
4385 | ||
4386 | return show_status_on(manager_get_show_status(m)); | |
4387 | } | |
4388 | ||
4389 | static void set_show_status_marker(bool b) { | |
4390 | if (b) | |
4391 | (void) touch("/run/systemd/show-status"); | |
4392 | else | |
4393 | (void) unlink("/run/systemd/show-status"); | |
4394 | } | |
4395 | ||
4396 | void manager_set_show_status(Manager *m, ShowStatus mode, const char *reason) { | |
4397 | assert(m); | |
4398 | assert(reason); | |
4399 | assert(mode >= 0 && mode < _SHOW_STATUS_MAX); | |
4400 | ||
4401 | if (MANAGER_IS_USER(m)) | |
4402 | return; | |
4403 | ||
4404 | if (mode == m->show_status) | |
4405 | return; | |
4406 | ||
4407 | if (m->show_status_overridden == _SHOW_STATUS_INVALID) { | |
4408 | bool enabled; | |
4409 | ||
4410 | enabled = show_status_on(mode); | |
4411 | log_debug("%s (%s) showing of status (%s).", | |
4412 | enabled ? "Enabling" : "Disabling", | |
4413 | strna(show_status_to_string(mode)), | |
4414 | reason); | |
4415 | ||
4416 | set_show_status_marker(enabled); | |
4417 | } | |
4418 | ||
4419 | m->show_status = mode; | |
4420 | } | |
4421 | ||
4422 | void manager_override_show_status(Manager *m, ShowStatus mode, const char *reason) { | |
4423 | assert(m); | |
4424 | assert(mode < _SHOW_STATUS_MAX); | |
4425 | ||
4426 | if (MANAGER_IS_USER(m)) | |
4427 | return; | |
4428 | ||
4429 | if (mode == m->show_status_overridden) | |
4430 | return; | |
4431 | ||
4432 | m->show_status_overridden = mode; | |
4433 | ||
4434 | if (mode == _SHOW_STATUS_INVALID) | |
4435 | mode = m->show_status; | |
4436 | ||
4437 | log_debug("%s (%s) showing of status (%s).", | |
4438 | m->show_status_overridden != _SHOW_STATUS_INVALID ? "Overriding" : "Restoring", | |
4439 | strna(show_status_to_string(mode)), | |
4440 | reason); | |
4441 | ||
4442 | set_show_status_marker(show_status_on(mode)); | |
4443 | } | |
4444 | ||
4445 | const char* manager_get_confirm_spawn(Manager *m) { | |
4446 | static int last_errno = 0; | |
4447 | struct stat st; | |
4448 | int r; | |
4449 | ||
4450 | assert(m); | |
4451 | ||
4452 | /* Here's the deal: we want to test the validity of the console but don't want | |
4453 | * PID1 to go through the whole console process which might block. But we also | |
4454 | * want to warn the user only once if something is wrong with the console so we | |
4455 | * cannot do the sanity checks after spawning our children. So here we simply do | |
4456 | * really basic tests to hopefully trap common errors. | |
4457 | * | |
4458 | * If the console suddenly disappear at the time our children will really it | |
4459 | * then they will simply fail to acquire it and a positive answer will be | |
4460 | * assumed. New children will fall back to /dev/console though. | |
4461 | * | |
4462 | * Note: TTYs are devices that can come and go any time, and frequently aren't | |
4463 | * available yet during early boot (consider a USB rs232 dongle...). If for any | |
4464 | * reason the configured console is not ready, we fall back to the default | |
4465 | * console. */ | |
4466 | ||
4467 | if (!m->confirm_spawn || path_equal(m->confirm_spawn, "/dev/console")) | |
4468 | return m->confirm_spawn; | |
4469 | ||
4470 | if (stat(m->confirm_spawn, &st) < 0) { | |
4471 | r = -errno; | |
4472 | goto fail; | |
4473 | } | |
4474 | ||
4475 | if (!S_ISCHR(st.st_mode)) { | |
4476 | r = -ENOTTY; | |
4477 | goto fail; | |
4478 | } | |
4479 | ||
4480 | last_errno = 0; | |
4481 | return m->confirm_spawn; | |
4482 | ||
4483 | fail: | |
4484 | if (last_errno != r) | |
4485 | last_errno = log_warning_errno(r, "Failed to open %s, using default console: %m", m->confirm_spawn); | |
4486 | ||
4487 | return "/dev/console"; | |
4488 | } | |
4489 | ||
4490 | void manager_set_first_boot(Manager *m, bool b) { | |
4491 | assert(m); | |
4492 | ||
4493 | if (!MANAGER_IS_SYSTEM(m)) | |
4494 | return; | |
4495 | ||
4496 | if (m->first_boot != (int) b) { | |
4497 | if (b) | |
4498 | (void) touch("/run/systemd/first-boot"); | |
4499 | else | |
4500 | (void) unlink("/run/systemd/first-boot"); | |
4501 | } | |
4502 | ||
4503 | m->first_boot = b; | |
4504 | } | |
4505 | ||
4506 | void manager_disable_confirm_spawn(void) { | |
4507 | (void) touch("/run/systemd/confirm_spawn_disabled"); | |
4508 | } | |
4509 | ||
4510 | static bool manager_should_show_status(Manager *m, StatusType type) { | |
4511 | assert(m); | |
4512 | ||
4513 | if (!MANAGER_IS_SYSTEM(m)) | |
4514 | return false; | |
4515 | ||
4516 | if (m->no_console_output) | |
4517 | return false; | |
4518 | ||
4519 | if (!IN_SET(manager_state(m), MANAGER_INITIALIZING, MANAGER_STARTING, MANAGER_STOPPING)) | |
4520 | return false; | |
4521 | ||
4522 | /* If we cannot find out the status properly, just proceed. */ | |
4523 | if (type != STATUS_TYPE_EMERGENCY && manager_check_ask_password(m) > 0) | |
4524 | return false; | |
4525 | ||
4526 | if (type == STATUS_TYPE_NOTICE && m->show_status != SHOW_STATUS_NO) | |
4527 | return true; | |
4528 | ||
4529 | return manager_get_show_status_on(m); | |
4530 | } | |
4531 | ||
4532 | void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) { | |
4533 | va_list ap; | |
4534 | ||
4535 | /* If m is NULL, assume we're after shutdown and let the messages through. */ | |
4536 | ||
4537 | if (m && !manager_should_show_status(m, type)) | |
4538 | return; | |
4539 | ||
4540 | /* XXX We should totally drop the check for ephemeral here | |
4541 | * and thus effectively make 'Type=idle' pointless. */ | |
4542 | if (type == STATUS_TYPE_EPHEMERAL && m && m->n_on_console > 0) | |
4543 | return; | |
4544 | ||
4545 | va_start(ap, format); | |
4546 | status_vprintf(status, SHOW_STATUS_ELLIPSIZE|(type == STATUS_TYPE_EPHEMERAL ? SHOW_STATUS_EPHEMERAL : 0), format, ap); | |
4547 | va_end(ap); | |
4548 | } | |
4549 | ||
4550 | Set* manager_get_units_needing_mounts_for(Manager *m, const char *path, UnitMountDependencyType t) { | |
4551 | assert(m); | |
4552 | assert(path); | |
4553 | assert(t >= 0 && t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX); | |
4554 | ||
4555 | if (path_equal(path, "/")) | |
4556 | path = ""; | |
4557 | ||
4558 | return hashmap_get(m->units_needing_mounts_for[t], path); | |
4559 | } | |
4560 | ||
4561 | int manager_update_failed_units(Manager *m, Unit *u, bool failed) { | |
4562 | unsigned size; | |
4563 | int r; | |
4564 | ||
4565 | assert(m); | |
4566 | assert(u->manager == m); | |
4567 | ||
4568 | size = set_size(m->failed_units); | |
4569 | ||
4570 | if (failed) { | |
4571 | r = set_ensure_put(&m->failed_units, NULL, u); | |
4572 | if (r < 0) | |
4573 | return log_oom(); | |
4574 | } else | |
4575 | (void) set_remove(m->failed_units, u); | |
4576 | ||
4577 | if (set_size(m->failed_units) != size) | |
4578 | bus_manager_send_change_signal(m); | |
4579 | ||
4580 | return 0; | |
4581 | } | |
4582 | ||
4583 | ManagerState manager_state(Manager *m) { | |
4584 | Unit *u; | |
4585 | ||
4586 | assert(m); | |
4587 | ||
4588 | /* Is the special shutdown target active or queued? If so, we are in shutdown state */ | |
4589 | u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET); | |
4590 | if (u && unit_active_or_pending(u)) | |
4591 | return MANAGER_STOPPING; | |
4592 | ||
4593 | /* Did we ever finish booting? If not then we are still starting up */ | |
4594 | if (!MANAGER_IS_FINISHED(m)) { | |
4595 | ||
4596 | u = manager_get_unit(m, SPECIAL_BASIC_TARGET); | |
4597 | if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) | |
4598 | return MANAGER_INITIALIZING; | |
4599 | ||
4600 | return MANAGER_STARTING; | |
4601 | } | |
4602 | ||
4603 | if (MANAGER_IS_SYSTEM(m)) { | |
4604 | /* Are the rescue or emergency targets active or queued? If so we are in maintenance state */ | |
4605 | u = manager_get_unit(m, SPECIAL_RESCUE_TARGET); | |
4606 | if (u && unit_active_or_pending(u)) | |
4607 | return MANAGER_MAINTENANCE; | |
4608 | ||
4609 | u = manager_get_unit(m, SPECIAL_EMERGENCY_TARGET); | |
4610 | if (u && unit_active_or_pending(u)) | |
4611 | return MANAGER_MAINTENANCE; | |
4612 | } | |
4613 | ||
4614 | /* Are there any failed units? If so, we are in degraded mode */ | |
4615 | if (!set_isempty(m->failed_units)) | |
4616 | return MANAGER_DEGRADED; | |
4617 | ||
4618 | return MANAGER_RUNNING; | |
4619 | } | |
4620 | ||
4621 | static void manager_unref_uid_internal( | |
4622 | Hashmap *uid_refs, | |
4623 | uid_t uid, | |
4624 | bool destroy_now, | |
4625 | int (*_clean_ipc)(uid_t uid)) { | |
4626 | ||
4627 | uint32_t c, n; | |
4628 | ||
4629 | assert(uid_is_valid(uid)); | |
4630 | assert(_clean_ipc); | |
4631 | ||
4632 | /* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the | |
4633 | * assumption that uid_t and gid_t are actually defined the same way, with the same validity rules. | |
4634 | * | |
4635 | * We store a hashmap where the key is the UID/GID and the value is a 32-bit reference counter, whose | |
4636 | * highest bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last | |
4637 | * reference to the UID/GID is dropped. The flag is set to on, once at least one reference from a | |
4638 | * unit where RemoveIPC= is set is added on a UID/GID. It is reset when the UID's/GID's reference | |
4639 | * counter drops to 0 again. */ | |
4640 | ||
4641 | assert_cc(sizeof(uid_t) == sizeof(gid_t)); | |
4642 | assert_cc(UID_INVALID == (uid_t) GID_INVALID); | |
4643 | ||
4644 | if (uid == 0) /* We don't keep track of root, and will never destroy it */ | |
4645 | return; | |
4646 | ||
4647 | c = PTR_TO_UINT32(hashmap_get(uid_refs, UID_TO_PTR(uid))); | |
4648 | ||
4649 | n = c & ~DESTROY_IPC_FLAG; | |
4650 | assert(n > 0); | |
4651 | n--; | |
4652 | ||
4653 | if (destroy_now && n == 0) { | |
4654 | hashmap_remove(uid_refs, UID_TO_PTR(uid)); | |
4655 | ||
4656 | if (c & DESTROY_IPC_FLAG) { | |
4657 | log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.", | |
4658 | _clean_ipc == clean_ipc_by_uid ? "UID" : "GID", | |
4659 | uid); | |
4660 | (void) _clean_ipc(uid); | |
4661 | } | |
4662 | } else { | |
4663 | c = n | (c & DESTROY_IPC_FLAG); | |
4664 | assert_se(hashmap_update(uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0); | |
4665 | } | |
4666 | } | |
4667 | ||
4668 | void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) { | |
4669 | manager_unref_uid_internal(m->uid_refs, uid, destroy_now, clean_ipc_by_uid); | |
4670 | } | |
4671 | ||
4672 | void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) { | |
4673 | manager_unref_uid_internal(m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid); | |
4674 | } | |
4675 | ||
4676 | static int manager_ref_uid_internal( | |
4677 | Hashmap **uid_refs, | |
4678 | uid_t uid, | |
4679 | bool clean_ipc) { | |
4680 | ||
4681 | uint32_t c, n; | |
4682 | int r; | |
4683 | ||
4684 | assert(uid_refs); | |
4685 | assert(uid_is_valid(uid)); | |
4686 | ||
4687 | /* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the | |
4688 | * assumption that uid_t and gid_t are actually defined the same way, with the same validity | |
4689 | * rules. */ | |
4690 | ||
4691 | assert_cc(sizeof(uid_t) == sizeof(gid_t)); | |
4692 | assert_cc(UID_INVALID == (uid_t) GID_INVALID); | |
4693 | ||
4694 | if (uid == 0) /* We don't keep track of root, and will never destroy it */ | |
4695 | return 0; | |
4696 | ||
4697 | r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops); | |
4698 | if (r < 0) | |
4699 | return r; | |
4700 | ||
4701 | c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid))); | |
4702 | ||
4703 | n = c & ~DESTROY_IPC_FLAG; | |
4704 | n++; | |
4705 | ||
4706 | if (n & DESTROY_IPC_FLAG) /* check for overflow */ | |
4707 | return -EOVERFLOW; | |
4708 | ||
4709 | c = n | (c & DESTROY_IPC_FLAG) | (clean_ipc ? DESTROY_IPC_FLAG : 0); | |
4710 | ||
4711 | return hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)); | |
4712 | } | |
4713 | ||
4714 | int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) { | |
4715 | return manager_ref_uid_internal(&m->uid_refs, uid, clean_ipc); | |
4716 | } | |
4717 | ||
4718 | int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) { | |
4719 | return manager_ref_uid_internal(&m->gid_refs, (uid_t) gid, clean_ipc); | |
4720 | } | |
4721 | ||
4722 | static void manager_vacuum_uid_refs_internal( | |
4723 | Hashmap *uid_refs, | |
4724 | int (*_clean_ipc)(uid_t uid)) { | |
4725 | ||
4726 | void *p, *k; | |
4727 | ||
4728 | assert(_clean_ipc); | |
4729 | ||
4730 | HASHMAP_FOREACH_KEY(p, k, uid_refs) { | |
4731 | uint32_t c, n; | |
4732 | uid_t uid; | |
4733 | ||
4734 | uid = PTR_TO_UID(k); | |
4735 | c = PTR_TO_UINT32(p); | |
4736 | ||
4737 | n = c & ~DESTROY_IPC_FLAG; | |
4738 | if (n > 0) | |
4739 | continue; | |
4740 | ||
4741 | if (c & DESTROY_IPC_FLAG) { | |
4742 | log_debug("Found unreferenced %s " UID_FMT " after reload/reexec. Cleaning up.", | |
4743 | _clean_ipc == clean_ipc_by_uid ? "UID" : "GID", | |
4744 | uid); | |
4745 | (void) _clean_ipc(uid); | |
4746 | } | |
4747 | ||
4748 | assert_se(hashmap_remove(uid_refs, k) == p); | |
4749 | } | |
4750 | } | |
4751 | ||
4752 | static void manager_vacuum_uid_refs(Manager *m) { | |
4753 | manager_vacuum_uid_refs_internal(m->uid_refs, clean_ipc_by_uid); | |
4754 | } | |
4755 | ||
4756 | static void manager_vacuum_gid_refs(Manager *m) { | |
4757 | manager_vacuum_uid_refs_internal(m->gid_refs, clean_ipc_by_gid); | |
4758 | } | |
4759 | ||
4760 | static void manager_vacuum(Manager *m) { | |
4761 | assert(m); | |
4762 | ||
4763 | /* Release any dynamic users no longer referenced */ | |
4764 | dynamic_user_vacuum(m, true); | |
4765 | ||
4766 | /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */ | |
4767 | manager_vacuum_uid_refs(m); | |
4768 | manager_vacuum_gid_refs(m); | |
4769 | ||
4770 | /* Release any runtimes no longer referenced */ | |
4771 | exec_shared_runtime_vacuum(m); | |
4772 | } | |
4773 | ||
4774 | static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
4775 | struct buffer { | |
4776 | uid_t uid; | |
4777 | gid_t gid; | |
4778 | char unit_name[UNIT_NAME_MAX+1]; | |
4779 | } _packed_ buffer; | |
4780 | ||
4781 | Manager *m = ASSERT_PTR(userdata); | |
4782 | ssize_t l; | |
4783 | size_t n; | |
4784 | Unit *u; | |
4785 | ||
4786 | assert(source); | |
4787 | ||
4788 | /* Invoked whenever a child process succeeded resolving its user/group to use and sent us the | |
4789 | * resulting UID/GID in a datagram. We parse the datagram here and pass it off to the unit, so that | |
4790 | * it can add a reference to the UID/GID so that it can destroy the UID/GID's IPC objects when the | |
4791 | * reference counter drops to 0. */ | |
4792 | ||
4793 | l = recv(fd, &buffer, sizeof(buffer), MSG_DONTWAIT); | |
4794 | if (l < 0) { | |
4795 | if (ERRNO_IS_TRANSIENT(errno)) | |
4796 | return 0; | |
4797 | ||
4798 | return log_error_errno(errno, "Failed to read from user lookup fd: %m"); | |
4799 | } | |
4800 | ||
4801 | if ((size_t) l <= offsetof(struct buffer, unit_name)) { | |
4802 | log_warning("Received too short user lookup message, ignoring."); | |
4803 | return 0; | |
4804 | } | |
4805 | ||
4806 | if ((size_t) l > offsetof(struct buffer, unit_name) + UNIT_NAME_MAX) { | |
4807 | log_warning("Received too long user lookup message, ignoring."); | |
4808 | return 0; | |
4809 | } | |
4810 | ||
4811 | if (!uid_is_valid(buffer.uid) && !gid_is_valid(buffer.gid)) { | |
4812 | log_warning("Got user lookup message with invalid UID/GID pair, ignoring."); | |
4813 | return 0; | |
4814 | } | |
4815 | ||
4816 | n = (size_t) l - offsetof(struct buffer, unit_name); | |
4817 | if (memchr(buffer.unit_name, 0, n)) { | |
4818 | log_warning("Received lookup message with embedded NUL character, ignoring."); | |
4819 | return 0; | |
4820 | } | |
4821 | ||
4822 | buffer.unit_name[n] = 0; | |
4823 | u = manager_get_unit(m, buffer.unit_name); | |
4824 | if (!u) { | |
4825 | log_debug("Got user lookup message but unit doesn't exist, ignoring."); | |
4826 | return 0; | |
4827 | } | |
4828 | ||
4829 | log_unit_debug(u, "User lookup succeeded: uid=" UID_FMT " gid=" GID_FMT, buffer.uid, buffer.gid); | |
4830 | ||
4831 | unit_notify_user_lookup(u, buffer.uid, buffer.gid); | |
4832 | return 0; | |
4833 | } | |
4834 | ||
4835 | static int manager_dispatch_handoff_timestamp_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
4836 | Manager *m = ASSERT_PTR(userdata); | |
4837 | usec_t ts[2] = {}; | |
4838 | CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control; | |
4839 | struct msghdr msghdr = { | |
4840 | .msg_iov = &IOVEC_MAKE(ts, sizeof(ts)), | |
4841 | .msg_iovlen = 1, | |
4842 | .msg_control = &control, | |
4843 | .msg_controllen = sizeof(control), | |
4844 | }; | |
4845 | ssize_t n; | |
4846 | ||
4847 | assert(source); | |
4848 | ||
4849 | n = recvmsg_safe(m->handoff_timestamp_fds[0], &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC); | |
4850 | if (ERRNO_IS_NEG_TRANSIENT(n)) | |
4851 | return 0; /* Spurious wakeup, try again */ | |
4852 | if (n == -ECHRNG) { | |
4853 | log_warning_errno(n, "Got message with truncated control data (unexpected fds sent?), ignoring."); | |
4854 | return 0; | |
4855 | } | |
4856 | if (n == -EXFULL) { | |
4857 | log_warning_errno(n, "Got message with truncated payload data, ignoring."); | |
4858 | return 0; | |
4859 | } | |
4860 | if (n < 0) | |
4861 | return log_error_errno(n, "Failed to receive handoff timestamp message: %m"); | |
4862 | ||
4863 | cmsg_close_all(&msghdr); | |
4864 | ||
4865 | if (n != sizeof(ts)) { | |
4866 | log_warning("Got handoff timestamp message of unexpected size %zi (expected %zu), ignoring.", n, sizeof(ts)); | |
4867 | return 0; | |
4868 | } | |
4869 | ||
4870 | struct ucred *ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred); | |
4871 | if (!ucred || !pid_is_valid(ucred->pid)) { | |
4872 | log_warning("Received handoff timestamp message without valid credentials. Ignoring."); | |
4873 | return 0; | |
4874 | } | |
4875 | ||
4876 | log_debug("Got handoff timestamp event for PID " PID_FMT ".", ucred->pid); | |
4877 | ||
4878 | _cleanup_free_ Unit **units = NULL; | |
4879 | int n_units = manager_get_units_for_pidref(m, &PIDREF_MAKE_FROM_PID(ucred->pid), &units); | |
4880 | if (n_units < 0) { | |
4881 | log_warning_errno(n_units, "Unable to determine units for PID " PID_FMT ", ignoring: %m", ucred->pid); | |
4882 | return 0; | |
4883 | } | |
4884 | if (n_units == 0) { | |
4885 | log_debug("Got handoff timestamp for process " PID_FMT " we are not interested in, ignoring.", ucred->pid); | |
4886 | return 0; | |
4887 | } | |
4888 | ||
4889 | dual_timestamp dt = { | |
4890 | .realtime = ts[0], | |
4891 | .monotonic = ts[1], | |
4892 | }; | |
4893 | ||
4894 | FOREACH_ARRAY(u, units, n_units) { | |
4895 | if (!UNIT_VTABLE(*u)->notify_handoff_timestamp) | |
4896 | continue; | |
4897 | ||
4898 | UNIT_VTABLE(*u)->notify_handoff_timestamp(*u, ucred, &dt); | |
4899 | } | |
4900 | ||
4901 | return 0; | |
4902 | } | |
4903 | ||
4904 | static int manager_dispatch_pidref_transport_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
4905 | Manager *m = ASSERT_PTR(userdata); | |
4906 | _cleanup_(pidref_done) PidRef child_pidref = PIDREF_NULL, parent_pidref = PIDREF_NULL; | |
4907 | _cleanup_close_ int child_pidfd = -EBADF, parent_pidfd = -EBADF; | |
4908 | struct ucred *ucred = NULL; | |
4909 | CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred)) + CMSG_SPACE(sizeof(int)) * 2) control; | |
4910 | pid_t child_pid = 0; /* silence false-positive warning by coverity */ | |
4911 | struct msghdr msghdr = { | |
4912 | .msg_iov = &IOVEC_MAKE(&child_pid, sizeof(child_pid)), | |
4913 | .msg_iovlen = 1, | |
4914 | .msg_control = &control, | |
4915 | .msg_controllen = sizeof(control), | |
4916 | }; | |
4917 | struct cmsghdr *cmsg; | |
4918 | ssize_t n; | |
4919 | int r; | |
4920 | ||
4921 | assert(source); | |
4922 | ||
4923 | /* Server expects: | |
4924 | * - Parent PID in ucreds enabled via SO_PASSCRED | |
4925 | * - Parent PIDFD in SCM_PIDFD message enabled via SO_PASSPIDFD | |
4926 | * - Child PIDFD in SCM_RIGHTS in message body | |
4927 | * - Child PID in message IOV | |
4928 | * | |
4929 | * SO_PASSPIDFD may not be supported by the kernel (it is supported since v6.5) so we fall back to | |
4930 | * using parent PID from ucreds and accept some raciness. */ | |
4931 | n = recvmsg_safe(m->pidref_transport_fds[0], &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC); | |
4932 | if (ERRNO_IS_NEG_TRANSIENT(n)) | |
4933 | return 0; /* Spurious wakeup, try again */ | |
4934 | if (n == -ECHRNG) { | |
4935 | log_warning_errno(n, "Got message with truncated control data (unexpected fds sent?), ignoring."); | |
4936 | return 0; | |
4937 | } | |
4938 | if (n == -EXFULL) { | |
4939 | log_warning_errno(n, "Got message with truncated payload data, ignoring."); | |
4940 | return 0; | |
4941 | } | |
4942 | if (n < 0) | |
4943 | return log_error_errno(n, "Failed to receive pidref message: %m"); | |
4944 | ||
4945 | if (n != sizeof(child_pid)) { | |
4946 | log_warning("Got pidref message of unexpected size %zi (expected %zu), ignoring.", n, sizeof(child_pid)); | |
4947 | return 0; | |
4948 | } | |
4949 | ||
4950 | CMSG_FOREACH(cmsg, &msghdr) { | |
4951 | if (cmsg->cmsg_level != SOL_SOCKET) | |
4952 | continue; | |
4953 | ||
4954 | if (cmsg->cmsg_type == SCM_CREDENTIALS && cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) { | |
4955 | assert(!ucred); | |
4956 | ucred = CMSG_TYPED_DATA(cmsg, struct ucred); | |
4957 | } else if (cmsg->cmsg_type == SCM_PIDFD) { | |
4958 | assert(parent_pidfd < 0); | |
4959 | parent_pidfd = *CMSG_TYPED_DATA(cmsg, int); | |
4960 | } else if (cmsg->cmsg_type == SCM_RIGHTS) { | |
4961 | assert(child_pidfd < 0); | |
4962 | child_pidfd = *CMSG_TYPED_DATA(cmsg, int); | |
4963 | } | |
4964 | } | |
4965 | ||
4966 | /* Verify and set parent pidref. */ | |
4967 | if (!ucred || !pid_is_valid(ucred->pid)) { | |
4968 | log_warning("Received pidref message without valid credentials. Ignoring."); | |
4969 | return 0; | |
4970 | } | |
4971 | ||
4972 | /* Need to handle kernels without SO_PASSPIDFD where SCM_PIDFD will not be set. */ | |
4973 | if (parent_pidfd >= 0) | |
4974 | r = pidref_set_pidfd_consume(&parent_pidref, TAKE_FD(parent_pidfd)); | |
4975 | else | |
4976 | r = pidref_set_pid(&parent_pidref, ucred->pid); | |
4977 | if (r < 0) { | |
4978 | if (r == -ESRCH) | |
4979 | log_debug_errno(r, "PidRef child process died before message is processed. Ignoring."); | |
4980 | else | |
4981 | log_warning_errno(r, "Failed to pin pidref child process, ignoring message: %m"); | |
4982 | return 0; | |
4983 | } | |
4984 | ||
4985 | if (parent_pidref.pid != ucred->pid) { | |
4986 | assert(parent_pidref.fd >= 0); | |
4987 | log_warning("Got SCM_PIDFD for parent process " PID_FMT " but got SCM_CREDENTIALS for parent process " PID_FMT ". Ignoring.", | |
4988 | parent_pidref.pid, ucred->pid); | |
4989 | return 0; | |
4990 | } | |
4991 | ||
4992 | /* Verify and set child pidref. */ | |
4993 | if (!pid_is_valid(child_pid)) { | |
4994 | log_warning("Received pidref message without valid child PID. Ignoring."); | |
4995 | return 0; | |
4996 | } | |
4997 | ||
4998 | /* Need to handle kernels without PIDFD support. */ | |
4999 | if (child_pidfd >= 0) | |
5000 | r = pidref_set_pidfd_consume(&child_pidref, TAKE_FD(child_pidfd)); | |
5001 | else | |
5002 | r = pidref_set_pid(&child_pidref, child_pid); | |
5003 | if (r < 0) { | |
5004 | if (r == -ESRCH) | |
5005 | log_debug_errno(r, "PidRef child process died before message is processed. Ignoring."); | |
5006 | else | |
5007 | log_warning_errno(r, "Failed to pin pidref child process, ignoring message: %m"); | |
5008 | return 0; | |
5009 | } | |
5010 | ||
5011 | if (child_pidref.pid != child_pid) { | |
5012 | assert(child_pidref.fd >= 0); | |
5013 | log_warning("Got SCM_RIGHTS for child process " PID_FMT " but PID in IOV message is " PID_FMT ". Ignoring.", | |
5014 | child_pidref.pid, child_pid); | |
5015 | return 0; | |
5016 | } | |
5017 | ||
5018 | log_debug("Got pidref event with parent PID " PID_FMT " and child PID " PID_FMT ".", parent_pidref.pid, child_pidref.pid); | |
5019 | ||
5020 | /* Try finding cgroup of parent process. But if parent process exited and we're not using PIDFD, this could return NULL. | |
5021 | * Then fall back to finding cgroup of the child process. */ | |
5022 | Unit *u = manager_get_unit_by_pidref_cgroup(m, &parent_pidref); | |
5023 | if (!u) | |
5024 | u = manager_get_unit_by_pidref_cgroup(m, &child_pidref); | |
5025 | if (!u) { | |
5026 | log_debug("Got pidref for parent process " PID_FMT " and child process " PID_FMT " we are not interested in, ignoring.", parent_pidref.pid, child_pidref.pid); | |
5027 | return 0; | |
5028 | } | |
5029 | ||
5030 | if (!UNIT_VTABLE(u)->notify_pidref) { | |
5031 | log_unit_warning(u, "Received pidref event from unexpected unit type '%s'.", unit_type_to_string(u->type)); | |
5032 | return 0; | |
5033 | } | |
5034 | ||
5035 | UNIT_VTABLE(u)->notify_pidref(u, &parent_pidref, &child_pidref); | |
5036 | ||
5037 | return 0; | |
5038 | } | |
5039 | ||
5040 | void manager_ref_console(Manager *m) { | |
5041 | assert(m); | |
5042 | ||
5043 | m->n_on_console++; | |
5044 | } | |
5045 | ||
5046 | void manager_unref_console(Manager *m) { | |
5047 | ||
5048 | assert(m->n_on_console > 0); | |
5049 | m->n_on_console--; | |
5050 | ||
5051 | if (m->n_on_console == 0) | |
5052 | m->no_console_output = false; /* unset no_console_output flag, since the console is definitely free now */ | |
5053 | } | |
5054 | ||
5055 | void manager_override_log_level(Manager *m, int level) { | |
5056 | _cleanup_free_ char *s = NULL; | |
5057 | assert(m); | |
5058 | ||
5059 | if (!m->log_level_overridden) { | |
5060 | m->original_log_level = log_get_max_level(); | |
5061 | m->log_level_overridden = true; | |
5062 | } | |
5063 | ||
5064 | (void) log_level_to_string_alloc(level, &s); | |
5065 | log_info("Setting log level to %s.", strna(s)); | |
5066 | ||
5067 | log_set_max_level(level); | |
5068 | } | |
5069 | ||
5070 | void manager_restore_original_log_level(Manager *m) { | |
5071 | _cleanup_free_ char *s = NULL; | |
5072 | assert(m); | |
5073 | ||
5074 | if (!m->log_level_overridden) | |
5075 | return; | |
5076 | ||
5077 | (void) log_level_to_string_alloc(m->original_log_level, &s); | |
5078 | log_info("Restoring log level to original (%s).", strna(s)); | |
5079 | ||
5080 | log_set_max_level(m->original_log_level); | |
5081 | m->log_level_overridden = false; | |
5082 | } | |
5083 | ||
5084 | void manager_override_log_target(Manager *m, LogTarget target) { | |
5085 | assert(m); | |
5086 | ||
5087 | if (!m->log_target_overridden) { | |
5088 | m->original_log_target = log_get_target(); | |
5089 | m->log_target_overridden = true; | |
5090 | } | |
5091 | ||
5092 | log_info("Setting log target to %s.", log_target_to_string(target)); | |
5093 | log_set_target(target); | |
5094 | } | |
5095 | ||
5096 | void manager_restore_original_log_target(Manager *m) { | |
5097 | assert(m); | |
5098 | ||
5099 | if (!m->log_target_overridden) | |
5100 | return; | |
5101 | ||
5102 | log_info("Restoring log target to original %s.", log_target_to_string(m->original_log_target)); | |
5103 | ||
5104 | log_set_target(m->original_log_target); | |
5105 | m->log_target_overridden = false; | |
5106 | } | |
5107 | ||
5108 | ManagerTimestamp manager_timestamp_initrd_mangle(ManagerTimestamp s) { | |
5109 | if (in_initrd() && | |
5110 | s >= MANAGER_TIMESTAMP_SECURITY_START && | |
5111 | s <= MANAGER_TIMESTAMP_UNITS_LOAD_FINISH) | |
5112 | return s - MANAGER_TIMESTAMP_SECURITY_START + MANAGER_TIMESTAMP_INITRD_SECURITY_START; | |
5113 | return s; | |
5114 | } | |
5115 | ||
5116 | int manager_allocate_idle_pipe(Manager *m) { | |
5117 | int r; | |
5118 | ||
5119 | assert(m); | |
5120 | ||
5121 | if (m->idle_pipe[0] >= 0) { | |
5122 | assert(m->idle_pipe[1] >= 0); | |
5123 | assert(m->idle_pipe[2] >= 0); | |
5124 | assert(m->idle_pipe[3] >= 0); | |
5125 | return 0; | |
5126 | } | |
5127 | ||
5128 | assert(m->idle_pipe[1] < 0); | |
5129 | assert(m->idle_pipe[2] < 0); | |
5130 | assert(m->idle_pipe[3] < 0); | |
5131 | ||
5132 | r = RET_NERRNO(pipe2(m->idle_pipe + 0, O_NONBLOCK|O_CLOEXEC)); | |
5133 | if (r < 0) | |
5134 | return r; | |
5135 | ||
5136 | r = RET_NERRNO(pipe2(m->idle_pipe + 2, O_NONBLOCK|O_CLOEXEC)); | |
5137 | if (r < 0) { | |
5138 | safe_close_pair(m->idle_pipe + 0); | |
5139 | return r; | |
5140 | } | |
5141 | ||
5142 | return 1; | |
5143 | } | |
5144 | ||
5145 | void unit_defaults_init(UnitDefaults *defaults, RuntimeScope scope) { | |
5146 | assert(defaults); | |
5147 | assert(scope >= 0); | |
5148 | assert(scope < _RUNTIME_SCOPE_MAX); | |
5149 | ||
5150 | *defaults = (UnitDefaults) { | |
5151 | .std_output = EXEC_OUTPUT_JOURNAL, | |
5152 | .std_error = EXEC_OUTPUT_INHERIT, | |
5153 | .restart_usec = DEFAULT_RESTART_USEC, | |
5154 | .timeout_start_usec = manager_default_timeout(scope), | |
5155 | .timeout_stop_usec = manager_default_timeout(scope), | |
5156 | .timeout_abort_usec = manager_default_timeout(scope), | |
5157 | .timeout_abort_set = false, | |
5158 | .device_timeout_usec = manager_default_timeout(scope), | |
5159 | .start_limit = { DEFAULT_START_LIMIT_INTERVAL, DEFAULT_START_LIMIT_BURST }, | |
5160 | ||
5161 | .memory_accounting = MEMORY_ACCOUNTING_DEFAULT, | |
5162 | .io_accounting = false, | |
5163 | .tasks_accounting = true, | |
5164 | .ip_accounting = false, | |
5165 | ||
5166 | .tasks_max = DEFAULT_TASKS_MAX, | |
5167 | .timer_accuracy_usec = 1 * USEC_PER_MINUTE, | |
5168 | ||
5169 | .memory_pressure_watch = CGROUP_PRESSURE_WATCH_AUTO, | |
5170 | .memory_pressure_threshold_usec = MEMORY_PRESSURE_DEFAULT_THRESHOLD_USEC, | |
5171 | ||
5172 | .oom_policy = OOM_STOP, | |
5173 | .oom_score_adjust_set = false, | |
5174 | }; | |
5175 | } | |
5176 | ||
5177 | void unit_defaults_done(UnitDefaults *defaults) { | |
5178 | assert(defaults); | |
5179 | ||
5180 | defaults->smack_process_label = mfree(defaults->smack_process_label); | |
5181 | rlimit_free_all(defaults->rlimit); | |
5182 | } | |
5183 | ||
5184 | LogTarget manager_get_executor_log_target(Manager *m) { | |
5185 | assert(m); | |
5186 | ||
5187 | /* If journald is not available tell sd-executor to go to kmsg, as it might be starting journald */ | |
5188 | if (!MANAGER_IS_TEST_RUN(m) && !manager_journal_is_running(m)) | |
5189 | return LOG_TARGET_KMSG; | |
5190 | ||
5191 | return log_get_target(); | |
5192 | } | |
5193 | ||
5194 | static const char* const manager_state_table[_MANAGER_STATE_MAX] = { | |
5195 | [MANAGER_INITIALIZING] = "initializing", | |
5196 | [MANAGER_STARTING] = "starting", | |
5197 | [MANAGER_RUNNING] = "running", | |
5198 | [MANAGER_DEGRADED] = "degraded", | |
5199 | [MANAGER_MAINTENANCE] = "maintenance", | |
5200 | [MANAGER_STOPPING] = "stopping", | |
5201 | }; | |
5202 | ||
5203 | DEFINE_STRING_TABLE_LOOKUP(manager_state, ManagerState); | |
5204 | ||
5205 | static const char* const manager_objective_table[_MANAGER_OBJECTIVE_MAX] = { | |
5206 | [MANAGER_OK] = "ok", | |
5207 | [MANAGER_EXIT] = "exit", | |
5208 | [MANAGER_RELOAD] = "reload", | |
5209 | [MANAGER_REEXECUTE] = "reexecute", | |
5210 | [MANAGER_REBOOT] = "reboot", | |
5211 | [MANAGER_SOFT_REBOOT] = "soft-reboot", | |
5212 | [MANAGER_POWEROFF] = "poweroff", | |
5213 | [MANAGER_HALT] = "halt", | |
5214 | [MANAGER_KEXEC] = "kexec", | |
5215 | [MANAGER_SWITCH_ROOT] = "switch-root", | |
5216 | }; | |
5217 | ||
5218 | DEFINE_STRING_TABLE_LOOKUP(manager_objective, ManagerObjective); | |
5219 | ||
5220 | static const char* const manager_timestamp_table[_MANAGER_TIMESTAMP_MAX] = { | |
5221 | [MANAGER_TIMESTAMP_FIRMWARE] = "firmware", | |
5222 | [MANAGER_TIMESTAMP_LOADER] = "loader", | |
5223 | [MANAGER_TIMESTAMP_KERNEL] = "kernel", | |
5224 | [MANAGER_TIMESTAMP_INITRD] = "initrd", | |
5225 | [MANAGER_TIMESTAMP_USERSPACE] = "userspace", | |
5226 | [MANAGER_TIMESTAMP_FINISH] = "finish", | |
5227 | [MANAGER_TIMESTAMP_SECURITY_START] = "security-start", | |
5228 | [MANAGER_TIMESTAMP_SECURITY_FINISH] = "security-finish", | |
5229 | [MANAGER_TIMESTAMP_GENERATORS_START] = "generators-start", | |
5230 | [MANAGER_TIMESTAMP_GENERATORS_FINISH] = "generators-finish", | |
5231 | [MANAGER_TIMESTAMP_UNITS_LOAD_START] = "units-load-start", | |
5232 | [MANAGER_TIMESTAMP_UNITS_LOAD_FINISH] = "units-load-finish", | |
5233 | [MANAGER_TIMESTAMP_UNITS_LOAD] = "units-load", | |
5234 | [MANAGER_TIMESTAMP_INITRD_SECURITY_START] = "initrd-security-start", | |
5235 | [MANAGER_TIMESTAMP_INITRD_SECURITY_FINISH] = "initrd-security-finish", | |
5236 | [MANAGER_TIMESTAMP_INITRD_GENERATORS_START] = "initrd-generators-start", | |
5237 | [MANAGER_TIMESTAMP_INITRD_GENERATORS_FINISH] = "initrd-generators-finish", | |
5238 | [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_START] = "initrd-units-load-start", | |
5239 | [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_FINISH] = "initrd-units-load-finish", | |
5240 | [MANAGER_TIMESTAMP_SHUTDOWN_START] = "shutdown-start", | |
5241 | }; | |
5242 | ||
5243 | DEFINE_STRING_TABLE_LOOKUP(manager_timestamp, ManagerTimestamp); |