]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/udev/udevd.c
time-util: assume CLOCK_BOOTTIME always exists
[thirdparty/systemd.git] / src / udev / udevd.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
6 */
7
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <getopt.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <sys/epoll.h>
16 #include <sys/file.h>
17 #include <sys/inotify.h>
18 #include <sys/ioctl.h>
19 #include <sys/mount.h>
20 #include <sys/prctl.h>
21 #include <sys/signalfd.h>
22 #include <sys/stat.h>
23 #include <sys/time.h>
24 #include <sys/wait.h>
25 #include <unistd.h>
26
27 #include "sd-daemon.h"
28 #include "sd-event.h"
29
30 #include "alloc-util.h"
31 #include "cgroup-setup.h"
32 #include "cgroup-util.h"
33 #include "cpu-set-util.h"
34 #include "dev-setup.h"
35 #include "device-monitor-private.h"
36 #include "device-private.h"
37 #include "device-util.h"
38 #include "event-util.h"
39 #include "fd-util.h"
40 #include "fileio.h"
41 #include "format-util.h"
42 #include "fs-util.h"
43 #include "hashmap.h"
44 #include "inotify-util.h"
45 #include "io-util.h"
46 #include "limits-util.h"
47 #include "list.h"
48 #include "main-func.h"
49 #include "mkdir.h"
50 #include "netlink-util.h"
51 #include "parse-util.h"
52 #include "path-util.h"
53 #include "pretty-print.h"
54 #include "proc-cmdline.h"
55 #include "process-util.h"
56 #include "selinux-util.h"
57 #include "signal-util.h"
58 #include "socket-util.h"
59 #include "string-util.h"
60 #include "strv.h"
61 #include "strxcpyx.h"
62 #include "syslog-util.h"
63 #include "udevd.h"
64 #include "udev-builtin.h"
65 #include "udev-ctrl.h"
66 #include "udev-event.h"
67 #include "udev-util.h"
68 #include "udev-watch.h"
69 #include "user-util.h"
70 #include "version.h"
71
72 #define WORKER_NUM_MAX 2048U
73 #define EVENT_RETRY_INTERVAL_USEC (200 * USEC_PER_MSEC)
74 #define EVENT_RETRY_TIMEOUT_USEC (3 * USEC_PER_MINUTE)
75
76 static bool arg_debug = false;
77 static int arg_daemonize = false;
78 static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
79 static unsigned arg_children_max = 0;
80 static usec_t arg_exec_delay_usec = 0;
81 static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
82 static int arg_timeout_signal = SIGKILL;
83 static bool arg_blockdev_read_only = false;
84
85 typedef struct Event Event;
86 typedef struct Worker Worker;
87
88 typedef struct Manager {
89 sd_event *event;
90 Hashmap *workers;
91 LIST_HEAD(Event, events);
92 char *cgroup;
93 pid_t pid; /* the process that originally allocated the manager object */
94 int log_level;
95
96 UdevRules *rules;
97 Hashmap *properties;
98
99 sd_netlink *rtnl;
100
101 sd_device_monitor *monitor;
102 UdevCtrl *ctrl;
103 int worker_watch[2];
104
105 /* used by udev-watch */
106 int inotify_fd;
107 sd_event_source *inotify_event;
108
109 sd_event_source *kill_workers_event;
110
111 usec_t last_usec;
112
113 bool stop_exec_queue;
114 bool exit;
115 } Manager;
116
117 typedef enum EventState {
118 EVENT_UNDEF,
119 EVENT_QUEUED,
120 EVENT_RUNNING,
121 } EventState;
122
123 typedef struct Event {
124 Manager *manager;
125 Worker *worker;
126 EventState state;
127
128 sd_device *dev;
129
130 sd_device_action_t action;
131 uint64_t seqnum;
132 uint64_t blocker_seqnum;
133 usec_t retry_again_next_usec;
134 usec_t retry_again_timeout_usec;
135
136 sd_event_source *timeout_warning_event;
137 sd_event_source *timeout_event;
138
139 LIST_FIELDS(Event, event);
140 } Event;
141
142 typedef enum WorkerState {
143 WORKER_UNDEF,
144 WORKER_RUNNING,
145 WORKER_IDLE,
146 WORKER_KILLED,
147 WORKER_KILLING,
148 } WorkerState;
149
150 typedef struct Worker {
151 Manager *manager;
152 pid_t pid;
153 sd_device_monitor *monitor;
154 WorkerState state;
155 Event *event;
156 } Worker;
157
158 /* passed from worker to main process */
159 typedef enum EventResult {
160 EVENT_RESULT_SUCCESS,
161 EVENT_RESULT_FAILED,
162 EVENT_RESULT_TRY_AGAIN, /* when the block device is locked by another process. */
163 _EVENT_RESULT_MAX,
164 _EVENT_RESULT_INVALID = -EINVAL,
165 } EventResult;
166
167 static Event *event_free(Event *event) {
168 if (!event)
169 return NULL;
170
171 assert(event->manager);
172
173 LIST_REMOVE(event, event->manager->events, event);
174 sd_device_unref(event->dev);
175
176 sd_event_source_disable_unref(event->timeout_warning_event);
177 sd_event_source_disable_unref(event->timeout_event);
178
179 if (event->worker)
180 event->worker->event = NULL;
181
182 return mfree(event);
183 }
184
185 static void event_queue_cleanup(Manager *manager, EventState match_state) {
186 LIST_FOREACH(event, event, manager->events) {
187 if (match_state != EVENT_UNDEF && match_state != event->state)
188 continue;
189
190 event_free(event);
191 }
192 }
193
194 static Worker *worker_free(Worker *worker) {
195 if (!worker)
196 return NULL;
197
198 assert(worker->manager);
199
200 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
201 sd_device_monitor_unref(worker->monitor);
202 event_free(worker->event);
203
204 return mfree(worker);
205 }
206
207 DEFINE_TRIVIAL_CLEANUP_FUNC(Worker*, worker_free);
208 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(worker_hash_op, void, trivial_hash_func, trivial_compare_func, Worker, worker_free);
209
210 static void manager_clear_for_worker(Manager *manager) {
211 assert(manager);
212
213 manager->inotify_event = sd_event_source_disable_unref(manager->inotify_event);
214 manager->kill_workers_event = sd_event_source_disable_unref(manager->kill_workers_event);
215
216 manager->event = sd_event_unref(manager->event);
217
218 manager->workers = hashmap_free(manager->workers);
219 event_queue_cleanup(manager, EVENT_UNDEF);
220
221 manager->monitor = sd_device_monitor_unref(manager->monitor);
222 manager->ctrl = udev_ctrl_unref(manager->ctrl);
223
224 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
225 }
226
227 static Manager* manager_free(Manager *manager) {
228 if (!manager)
229 return NULL;
230
231 udev_builtin_exit();
232
233 manager_clear_for_worker(manager);
234
235 sd_netlink_unref(manager->rtnl);
236
237 hashmap_free_free_free(manager->properties);
238 udev_rules_free(manager->rules);
239
240 safe_close(manager->inotify_fd);
241 safe_close_pair(manager->worker_watch);
242
243 free(manager->cgroup);
244 return mfree(manager);
245 }
246
247 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
248
249 static int worker_new(Worker **ret, Manager *manager, sd_device_monitor *worker_monitor, pid_t pid) {
250 _cleanup_(worker_freep) Worker *worker = NULL;
251 int r;
252
253 assert(ret);
254 assert(manager);
255 assert(worker_monitor);
256 assert(pid > 1);
257
258 /* close monitor, but keep address around */
259 device_monitor_disconnect(worker_monitor);
260
261 worker = new(Worker, 1);
262 if (!worker)
263 return -ENOMEM;
264
265 *worker = (Worker) {
266 .manager = manager,
267 .monitor = sd_device_monitor_ref(worker_monitor),
268 .pid = pid,
269 };
270
271 r = hashmap_ensure_put(&manager->workers, &worker_hash_op, PID_TO_PTR(pid), worker);
272 if (r < 0)
273 return r;
274
275 *ret = TAKE_PTR(worker);
276
277 return 0;
278 }
279
280 static void manager_kill_workers(Manager *manager, bool force) {
281 Worker *worker;
282
283 assert(manager);
284
285 HASHMAP_FOREACH(worker, manager->workers) {
286 if (worker->state == WORKER_KILLED)
287 continue;
288
289 if (worker->state == WORKER_RUNNING && !force) {
290 worker->state = WORKER_KILLING;
291 continue;
292 }
293
294 worker->state = WORKER_KILLED;
295 (void) kill(worker->pid, SIGTERM);
296 }
297 }
298
299 static void manager_exit(Manager *manager) {
300 assert(manager);
301
302 manager->exit = true;
303
304 sd_notify(false,
305 "STOPPING=1\n"
306 "STATUS=Starting shutdown...");
307
308 /* close sources of new events and discard buffered events */
309 manager->ctrl = udev_ctrl_unref(manager->ctrl);
310
311 manager->inotify_event = sd_event_source_disable_unref(manager->inotify_event);
312 manager->inotify_fd = safe_close(manager->inotify_fd);
313
314 manager->monitor = sd_device_monitor_unref(manager->monitor);
315
316 /* discard queued events and kill workers */
317 event_queue_cleanup(manager, EVENT_QUEUED);
318 manager_kill_workers(manager, true);
319 }
320
321 static void notify_ready(void) {
322 int r;
323
324 r = sd_notifyf(false,
325 "READY=1\n"
326 "STATUS=Processing with %u children at max", arg_children_max);
327 if (r < 0)
328 log_warning_errno(r, "Failed to send readiness notification, ignoring: %m");
329 }
330
331 /* reload requested, HUP signal received, rules changed, builtin changed */
332 static void manager_reload(Manager *manager) {
333 assert(manager);
334
335 sd_notify(false,
336 "RELOADING=1\n"
337 "STATUS=Flushing configuration...");
338
339 manager_kill_workers(manager, false);
340 manager->rules = udev_rules_free(manager->rules);
341 udev_builtin_exit();
342
343 notify_ready();
344 }
345
346 static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
347 Manager *manager = userdata;
348
349 assert(manager);
350
351 log_debug("Cleanup idle workers");
352 manager_kill_workers(manager, false);
353
354 return 1;
355 }
356
357 static void device_broadcast(sd_device_monitor *monitor, sd_device *dev) {
358 int r;
359
360 assert(dev);
361
362 /* On exit, manager->monitor is already NULL. */
363 if (!monitor)
364 return;
365
366 r = device_monitor_send_device(monitor, NULL, dev);
367 if (r < 0)
368 log_device_warning_errno(dev, r,
369 "Failed to broadcast event to libudev listeners, ignoring: %m");
370 }
371
372 static int worker_send_result(Manager *manager, EventResult result) {
373 assert(manager);
374 assert(manager->worker_watch[WRITE_END] >= 0);
375
376 return loop_write(manager->worker_watch[WRITE_END], &result, sizeof(result), false);
377 }
378
379 static int device_get_block_device(sd_device *dev, const char **ret) {
380 const char *val;
381 int r;
382
383 assert(dev);
384 assert(ret);
385
386 if (device_for_action(dev, SD_DEVICE_REMOVE))
387 goto irrelevant;
388
389 r = sd_device_get_subsystem(dev, &val);
390 if (r < 0)
391 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
392
393 if (!streq(val, "block"))
394 goto irrelevant;
395
396 r = sd_device_get_sysname(dev, &val);
397 if (r < 0)
398 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
399
400 if (STARTSWITH_SET(val, "dm-", "md", "drbd"))
401 goto irrelevant;
402
403 r = sd_device_get_devtype(dev, &val);
404 if (r < 0 && r != -ENOENT)
405 return log_device_debug_errno(dev, r, "Failed to get devtype: %m");
406 if (r >= 0 && streq(val, "partition")) {
407 r = sd_device_get_parent(dev, &dev);
408 if (r < 0)
409 return log_device_debug_errno(dev, r, "Failed to get parent device: %m");
410 }
411
412 r = sd_device_get_devname(dev, &val);
413 if (r == -ENOENT)
414 goto irrelevant;
415 if (r < 0)
416 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
417
418 *ret = val;
419 return 1;
420
421 irrelevant:
422 *ret = NULL;
423 return 0;
424 }
425
426 static int worker_lock_block_device(sd_device *dev, int *ret_fd) {
427 _cleanup_close_ int fd = -1;
428 const char *val;
429 int r;
430
431 assert(dev);
432 assert(ret_fd);
433
434 /* Take a shared lock on the device node; this establishes a concept of device "ownership" to
435 * serialize device access. External processes holding an exclusive lock will cause udev to skip the
436 * event handling; in the case udev acquired the lock, the external process can block until udev has
437 * finished its event handling. */
438
439 r = device_get_block_device(dev, &val);
440 if (r < 0)
441 return r;
442 if (r == 0)
443 goto nolock;
444
445 fd = open(val, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
446 if (fd < 0) {
447 bool ignore = ERRNO_IS_DEVICE_ABSENT(errno);
448
449 log_device_debug_errno(dev, errno, "Failed to open '%s'%s: %m", val, ignore ? ", ignoring" : "");
450 if (!ignore)
451 return -errno;
452
453 goto nolock;
454 }
455
456 if (flock(fd, LOCK_SH|LOCK_NB) < 0)
457 return log_device_debug_errno(dev, errno, "Failed to flock(%s): %m", val);
458
459 *ret_fd = TAKE_FD(fd);
460 return 1;
461
462 nolock:
463 *ret_fd = -1;
464 return 0;
465 }
466
467 static int worker_mark_block_device_read_only(sd_device *dev) {
468 _cleanup_close_ int fd = -1;
469 const char *val;
470 int state = 1, r;
471
472 assert(dev);
473
474 if (!arg_blockdev_read_only)
475 return 0;
476
477 /* Do this only once, when the block device is new. If the device is later retriggered let's not
478 * toggle the bit again, so that people can boot up with full read-only mode and then unset the bit
479 * for specific devices only. */
480 if (!device_for_action(dev, SD_DEVICE_ADD))
481 return 0;
482
483 r = sd_device_get_subsystem(dev, &val);
484 if (r < 0)
485 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
486
487 if (!streq(val, "block"))
488 return 0;
489
490 r = sd_device_get_sysname(dev, &val);
491 if (r < 0)
492 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
493
494 /* Exclude synthetic devices for now, this is supposed to be a safety feature to avoid modification
495 * of physical devices, and what sits on top of those doesn't really matter if we don't allow the
496 * underlying block devices to receive changes. */
497 if (STARTSWITH_SET(val, "dm-", "md", "drbd", "loop", "nbd", "zram"))
498 return 0;
499
500 r = sd_device_get_devname(dev, &val);
501 if (r == -ENOENT)
502 return 0;
503 if (r < 0)
504 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
505
506 fd = open(val, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
507 if (fd < 0)
508 return log_device_debug_errno(dev, errno, "Failed to open '%s', ignoring: %m", val);
509
510 if (ioctl(fd, BLKROSET, &state) < 0)
511 return log_device_warning_errno(dev, errno, "Failed to mark block device '%s' read-only: %m", val);
512
513 log_device_info(dev, "Successfully marked block device '%s' read-only.", val);
514 return 0;
515 }
516
517 static int worker_process_device(Manager *manager, sd_device *dev) {
518 _cleanup_(udev_event_freep) UdevEvent *udev_event = NULL;
519 _cleanup_close_ int fd_lock = -1;
520 int r;
521
522 assert(manager);
523 assert(dev);
524
525 log_device_uevent(dev, "Processing device");
526
527 udev_event = udev_event_new(dev, arg_exec_delay_usec, manager->rtnl, manager->log_level);
528 if (!udev_event)
529 return -ENOMEM;
530
531 /* If this is a block device and the device is locked currently via the BSD advisory locks,
532 * someone else is using it exclusively. We don't run our udev rules now to not interfere.
533 * Instead of processing the event, we requeue the event and will try again after a delay.
534 *
535 * The user-facing side of this: https://systemd.io/BLOCK_DEVICE_LOCKING */
536 r = worker_lock_block_device(dev, &fd_lock);
537 if (r < 0)
538 return r;
539
540 (void) worker_mark_block_device_read_only(dev);
541
542 /* apply rules, create node, symlinks */
543 r = udev_event_execute_rules(
544 udev_event,
545 manager->inotify_fd,
546 arg_event_timeout_usec,
547 arg_timeout_signal,
548 manager->properties,
549 manager->rules);
550 if (r < 0)
551 return r;
552
553 udev_event_execute_run(udev_event, arg_event_timeout_usec, arg_timeout_signal);
554
555 if (!manager->rtnl)
556 /* in case rtnl was initialized */
557 manager->rtnl = sd_netlink_ref(udev_event->rtnl);
558
559 r = udev_event_process_inotify_watch(udev_event, manager->inotify_fd);
560 if (r < 0)
561 return r;
562
563 log_device_uevent(dev, "Device processed");
564 return 0;
565 }
566
567 static int worker_device_monitor_handler(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
568 Manager *manager = userdata;
569 EventResult result;
570 int r;
571
572 assert(dev);
573 assert(manager);
574
575 r = worker_process_device(manager, dev);
576 if (r == -EAGAIN) {
577 /* if we couldn't acquire the flock(), then requeue the event */
578 result = EVENT_RESULT_TRY_AGAIN;
579 log_device_debug_errno(dev, r, "Block device is currently locked, requeueing the event.");
580 } else if (r < 0) {
581 result = EVENT_RESULT_FAILED;
582 log_device_warning_errno(dev, r, "Failed to process device, ignoring: %m");
583 } else
584 result = EVENT_RESULT_SUCCESS;
585
586 if (result != EVENT_RESULT_TRY_AGAIN)
587 /* send processed event back to libudev listeners */
588 device_broadcast(monitor, dev);
589
590 /* send udevd the result of the event execution */
591 r = worker_send_result(manager, result);
592 if (r < 0)
593 log_device_warning_errno(dev, r, "Failed to send signal to main daemon, ignoring: %m");
594
595 /* Reset the log level, as it might be changed by "OPTIONS=log_level=". */
596 log_set_max_level(manager->log_level);
597
598 return 1;
599 }
600
601 static int worker_main(Manager *_manager, sd_device_monitor *monitor, sd_device *first_device) {
602 _cleanup_(sd_device_unrefp) sd_device *dev = first_device;
603 _cleanup_(manager_freep) Manager *manager = _manager;
604 int r;
605
606 assert(manager);
607 assert(monitor);
608 assert(dev);
609
610 assert_se(unsetenv("NOTIFY_SOCKET") == 0);
611
612 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, -1) >= 0);
613
614 /* Reset OOM score, we only protect the main daemon. */
615 r = set_oom_score_adjust(0);
616 if (r < 0)
617 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
618
619 /* Clear unnecessary data in Manager object. */
620 manager_clear_for_worker(manager);
621
622 r = sd_event_new(&manager->event);
623 if (r < 0)
624 return log_error_errno(r, "Failed to allocate event loop: %m");
625
626 r = sd_event_add_signal(manager->event, NULL, SIGTERM, NULL, NULL);
627 if (r < 0)
628 return log_error_errno(r, "Failed to set SIGTERM event: %m");
629
630 r = sd_device_monitor_attach_event(monitor, manager->event);
631 if (r < 0)
632 return log_error_errno(r, "Failed to attach event loop to device monitor: %m");
633
634 r = sd_device_monitor_start(monitor, worker_device_monitor_handler, manager);
635 if (r < 0)
636 return log_error_errno(r, "Failed to start device monitor: %m");
637
638 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(monitor), "worker-device-monitor");
639
640 /* Process first device */
641 (void) worker_device_monitor_handler(monitor, dev, manager);
642
643 r = sd_event_loop(manager->event);
644 if (r < 0)
645 return log_error_errno(r, "Event loop failed: %m");
646
647 return 0;
648 }
649
650 static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
651 Event *event = userdata;
652
653 assert(event);
654 assert(event->worker);
655
656 kill_and_sigcont(event->worker->pid, arg_timeout_signal);
657 event->worker->state = WORKER_KILLED;
658
659 log_device_error(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" killed", event->worker->pid, event->seqnum);
660
661 return 1;
662 }
663
664 static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
665 Event *event = userdata;
666
667 assert(event);
668 assert(event->worker);
669
670 log_device_warning(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" is taking a long time", event->worker->pid, event->seqnum);
671
672 return 1;
673 }
674
675 static void worker_attach_event(Worker *worker, Event *event) {
676 sd_event *e;
677
678 assert(worker);
679 assert(worker->manager);
680 assert(event);
681 assert(!event->worker);
682 assert(!worker->event);
683
684 worker->state = WORKER_RUNNING;
685 worker->event = event;
686 event->state = EVENT_RUNNING;
687 event->worker = worker;
688
689 e = worker->manager->event;
690
691 (void) sd_event_add_time_relative(e, &event->timeout_warning_event, CLOCK_MONOTONIC,
692 udev_warn_timeout(arg_event_timeout_usec), USEC_PER_SEC,
693 on_event_timeout_warning, event);
694
695 (void) sd_event_add_time_relative(e, &event->timeout_event, CLOCK_MONOTONIC,
696 arg_event_timeout_usec, USEC_PER_SEC,
697 on_event_timeout, event);
698 }
699
700 static int worker_spawn(Manager *manager, Event *event) {
701 _cleanup_(sd_device_monitor_unrefp) sd_device_monitor *worker_monitor = NULL;
702 Worker *worker;
703 pid_t pid;
704 int r;
705
706 /* listen for new events */
707 r = device_monitor_new_full(&worker_monitor, MONITOR_GROUP_NONE, -1);
708 if (r < 0)
709 return r;
710
711 /* allow the main daemon netlink address to send devices to the worker */
712 r = device_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
713 if (r < 0)
714 return log_error_errno(r, "Worker: Failed to set unicast sender: %m");
715
716 r = device_monitor_enable_receiving(worker_monitor);
717 if (r < 0)
718 return log_error_errno(r, "Worker: Failed to enable receiving of device: %m");
719
720 r = safe_fork(NULL, FORK_DEATHSIG, &pid);
721 if (r < 0) {
722 event->state = EVENT_QUEUED;
723 return log_error_errno(r, "Failed to fork() worker: %m");
724 }
725 if (r == 0) {
726 DEVICE_TRACE_POINT(worker_spawned, event->dev, getpid());
727
728 /* Worker process */
729 r = worker_main(manager, worker_monitor, sd_device_ref(event->dev));
730 log_close();
731 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
732 }
733
734 r = worker_new(&worker, manager, worker_monitor, pid);
735 if (r < 0)
736 return log_error_errno(r, "Failed to create worker object: %m");
737
738 worker_attach_event(worker, event);
739
740 log_device_debug(event->dev, "Worker ["PID_FMT"] is forked for processing SEQNUM=%"PRIu64".", pid, event->seqnum);
741 return 0;
742 }
743
744 static int event_run(Event *event) {
745 static bool log_children_max_reached = true;
746 Manager *manager;
747 Worker *worker;
748 int r;
749
750 assert(event);
751 assert(event->manager);
752
753 log_device_uevent(event->dev, "Device ready for processing");
754
755 manager = event->manager;
756 HASHMAP_FOREACH(worker, manager->workers) {
757 if (worker->state != WORKER_IDLE)
758 continue;
759
760 r = device_monitor_send_device(manager->monitor, worker->monitor, event->dev);
761 if (r < 0) {
762 log_device_error_errno(event->dev, r, "Worker ["PID_FMT"] did not accept message, killing the worker: %m",
763 worker->pid);
764 (void) kill(worker->pid, SIGKILL);
765 worker->state = WORKER_KILLED;
766 continue;
767 }
768 worker_attach_event(worker, event);
769 return 1; /* event is now processing. */
770 }
771
772 if (hashmap_size(manager->workers) >= arg_children_max) {
773 /* Avoid spamming the debug logs if the limit is already reached and
774 * many events still need to be processed */
775 if (log_children_max_reached && arg_children_max > 1) {
776 log_debug("Maximum number (%u) of children reached.", hashmap_size(manager->workers));
777 log_children_max_reached = false;
778 }
779 return 0; /* no free worker */
780 }
781
782 /* Re-enable the debug message for the next batch of events */
783 log_children_max_reached = true;
784
785 /* start new worker and pass initial device */
786 r = worker_spawn(manager, event);
787 if (r < 0)
788 return r;
789
790 return 1; /* event is now processing. */
791 }
792
793 static int event_is_blocked(Event *event) {
794 const char *subsystem, *devpath, *devpath_old = NULL;
795 dev_t devnum = makedev(0, 0);
796 Event *loop_event = NULL;
797 size_t devpath_len;
798 int r, ifindex = 0;
799 bool is_block;
800
801 /* lookup event for identical, parent, child device */
802
803 assert(event);
804 assert(event->manager);
805 assert(event->blocker_seqnum <= event->seqnum);
806
807 if (event->retry_again_next_usec > 0) {
808 usec_t now_usec;
809
810 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
811 if (r < 0)
812 return r;
813
814 if (event->retry_again_next_usec <= now_usec)
815 return true;
816 }
817
818 if (event->blocker_seqnum == event->seqnum)
819 /* we have checked previously and no blocker found */
820 return false;
821
822 LIST_FOREACH(event, e, event->manager->events) {
823 loop_event = e;
824
825 /* we already found a later event, earlier cannot block us, no need to check again */
826 if (loop_event->seqnum < event->blocker_seqnum)
827 continue;
828
829 /* event we checked earlier still exists, no need to check again */
830 if (loop_event->seqnum == event->blocker_seqnum)
831 return true;
832
833 /* found ourself, no later event can block us */
834 if (loop_event->seqnum >= event->seqnum)
835 goto no_blocker;
836
837 /* found event we have not checked */
838 break;
839 }
840
841 assert(loop_event);
842 assert(loop_event->seqnum > event->blocker_seqnum &&
843 loop_event->seqnum < event->seqnum);
844
845 r = sd_device_get_subsystem(event->dev, &subsystem);
846 if (r < 0)
847 return r;
848
849 is_block = streq(subsystem, "block");
850
851 r = sd_device_get_devpath(event->dev, &devpath);
852 if (r < 0)
853 return r;
854
855 devpath_len = strlen(devpath);
856
857 r = sd_device_get_property_value(event->dev, "DEVPATH_OLD", &devpath_old);
858 if (r < 0 && r != -ENOENT)
859 return r;
860
861 r = sd_device_get_devnum(event->dev, &devnum);
862 if (r < 0 && r != -ENOENT)
863 return r;
864
865 r = sd_device_get_ifindex(event->dev, &ifindex);
866 if (r < 0 && r != -ENOENT)
867 return r;
868
869 /* check if queue contains events we depend on */
870 LIST_FOREACH(event, e, loop_event) {
871 size_t loop_devpath_len, common;
872 const char *loop_devpath;
873
874 loop_event = e;
875
876 /* found ourself, no later event can block us */
877 if (loop_event->seqnum >= event->seqnum)
878 goto no_blocker;
879
880 /* check major/minor */
881 if (major(devnum) != 0) {
882 const char *s;
883 dev_t d;
884
885 if (sd_device_get_subsystem(loop_event->dev, &s) < 0)
886 continue;
887
888 if (sd_device_get_devnum(loop_event->dev, &d) >= 0 &&
889 devnum == d && is_block == streq(s, "block"))
890 break;
891 }
892
893 /* check network device ifindex */
894 if (ifindex > 0) {
895 int i;
896
897 if (sd_device_get_ifindex(loop_event->dev, &i) >= 0 &&
898 ifindex == i)
899 break;
900 }
901
902 if (sd_device_get_devpath(loop_event->dev, &loop_devpath) < 0)
903 continue;
904
905 /* check our old name */
906 if (devpath_old && streq(devpath_old, loop_devpath))
907 break;
908
909 loop_devpath_len = strlen(loop_devpath);
910
911 /* compare devpath */
912 common = MIN(devpath_len, loop_devpath_len);
913
914 /* one devpath is contained in the other? */
915 if (!strneq(devpath, loop_devpath, common))
916 continue;
917
918 /* identical device event found */
919 if (devpath_len == loop_devpath_len)
920 break;
921
922 /* parent device event found */
923 if (devpath[common] == '/')
924 break;
925
926 /* child device event found */
927 if (loop_devpath[common] == '/')
928 break;
929 }
930
931 assert(loop_event);
932
933 log_device_debug(event->dev, "SEQNUM=%" PRIu64 " blocked by SEQNUM=%" PRIu64,
934 event->seqnum, loop_event->seqnum);
935
936 event->blocker_seqnum = loop_event->seqnum;
937 return true;
938
939 no_blocker:
940 event->blocker_seqnum = event->seqnum;
941 return false;
942 }
943
944 static int event_queue_start(Manager *manager) {
945 usec_t usec;
946 int r;
947
948 assert(manager);
949
950 if (LIST_IS_EMPTY(manager->events) ||
951 manager->exit || manager->stop_exec_queue)
952 return 0;
953
954 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
955 /* check for changed config, every 3 seconds at most */
956 if (manager->last_usec == 0 ||
957 usec > usec_add(manager->last_usec, 3 * USEC_PER_SEC)) {
958 if (udev_rules_check_timestamp(manager->rules) ||
959 udev_builtin_validate())
960 manager_reload(manager);
961
962 manager->last_usec = usec;
963 }
964
965 r = event_source_disable(manager->kill_workers_event);
966 if (r < 0)
967 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
968
969 udev_builtin_init();
970
971 if (!manager->rules) {
972 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
973 if (r < 0)
974 return log_warning_errno(r, "Failed to read udev rules: %m");
975 }
976
977 /* fork with up-to-date SELinux label database, so the child inherits the up-to-date db
978 * and, until the next SELinux policy changes, we safe further reloads in future children */
979 mac_selinux_maybe_reload();
980
981 LIST_FOREACH(event, event, manager->events) {
982 if (event->state != EVENT_QUEUED)
983 continue;
984
985 /* do not start event if parent or child event is still running or queued */
986 r = event_is_blocked(event);
987 if (r > 0)
988 continue;
989 if (r < 0)
990 log_device_warning_errno(event->dev, r,
991 "Failed to check dependencies for event (SEQNUM=%"PRIu64", ACTION=%s), "
992 "assuming there is no blocking event, ignoring: %m",
993 event->seqnum,
994 strna(device_action_to_string(event->action)));
995
996 r = event_run(event);
997 if (r <= 0) /* 0 means there are no idle workers. Let's escape from the loop. */
998 return r;
999 }
1000
1001 return 0;
1002 }
1003
1004 static int event_requeue(Event *event) {
1005 usec_t now_usec;
1006 int r;
1007
1008 assert(event);
1009 assert(event->manager);
1010 assert(event->manager->event);
1011
1012 event->timeout_warning_event = sd_event_source_disable_unref(event->timeout_warning_event);
1013 event->timeout_event = sd_event_source_disable_unref(event->timeout_event);
1014
1015 /* add a short delay to suppress busy loop */
1016 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
1017 if (r < 0)
1018 return log_device_warning_errno(event->dev, r,
1019 "Failed to get current time, "
1020 "skipping event (SEQNUM=%"PRIu64", ACTION=%s): %m",
1021 event->seqnum, strna(device_action_to_string(event->action)));
1022
1023 if (event->retry_again_timeout_usec > 0 && event->retry_again_timeout_usec <= now_usec)
1024 return log_device_warning_errno(event->dev, SYNTHETIC_ERRNO(ETIMEDOUT),
1025 "The underlying block device is locked by a process more than %s, "
1026 "skipping event (SEQNUM=%"PRIu64", ACTION=%s).",
1027 FORMAT_TIMESPAN(EVENT_RETRY_TIMEOUT_USEC, USEC_PER_MINUTE),
1028 event->seqnum, strna(device_action_to_string(event->action)));
1029
1030 event->retry_again_next_usec = usec_add(now_usec, EVENT_RETRY_INTERVAL_USEC);
1031 if (event->retry_again_timeout_usec == 0)
1032 event->retry_again_timeout_usec = usec_add(now_usec, EVENT_RETRY_TIMEOUT_USEC);
1033
1034 if (event->worker && event->worker->event == event)
1035 event->worker->event = NULL;
1036 event->worker = NULL;
1037
1038 event->state = EVENT_QUEUED;
1039 return 0;
1040 }
1041
1042 static int event_queue_assume_block_device_unlocked(Manager *manager, sd_device *dev) {
1043 const char *devname;
1044 int r;
1045
1046 /* When a new event for a block device is queued or we get an inotify event, assume that the
1047 * device is not locked anymore. The assumption may not be true, but that should not cause any
1048 * issues, as in that case events will be requeued soon. */
1049
1050 r = device_get_block_device(dev, &devname);
1051 if (r <= 0)
1052 return r;
1053
1054 LIST_FOREACH(event, event, manager->events) {
1055 const char *event_devname;
1056
1057 if (event->state != EVENT_QUEUED)
1058 continue;
1059
1060 if (event->retry_again_next_usec == 0)
1061 continue;
1062
1063 if (device_get_block_device(event->dev, &event_devname) <= 0)
1064 continue;
1065
1066 if (!streq(devname, event_devname))
1067 continue;
1068
1069 event->retry_again_next_usec = 0;
1070 }
1071
1072 return 0;
1073 }
1074
1075 static int event_queue_insert(Manager *manager, sd_device *dev) {
1076 sd_device_action_t action;
1077 uint64_t seqnum;
1078 Event *event;
1079 int r;
1080
1081 assert(manager);
1082 assert(dev);
1083
1084 /* only one process can add events to the queue */
1085 assert(manager->pid == getpid_cached());
1086
1087 /* We only accepts devices received by device monitor. */
1088 r = sd_device_get_seqnum(dev, &seqnum);
1089 if (r < 0)
1090 return r;
1091
1092 r = sd_device_get_action(dev, &action);
1093 if (r < 0)
1094 return r;
1095
1096 event = new(Event, 1);
1097 if (!event)
1098 return -ENOMEM;
1099
1100 *event = (Event) {
1101 .manager = manager,
1102 .dev = sd_device_ref(dev),
1103 .seqnum = seqnum,
1104 .action = action,
1105 .state = EVENT_QUEUED,
1106 };
1107
1108 if (LIST_IS_EMPTY(manager->events)) {
1109 r = touch("/run/udev/queue");
1110 if (r < 0)
1111 log_warning_errno(r, "Failed to touch /run/udev/queue, ignoring: %m");
1112 }
1113
1114 LIST_APPEND(event, manager->events, event);
1115
1116 log_device_uevent(dev, "Device is queued");
1117
1118 return 0;
1119 }
1120
1121 static int on_uevent(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
1122 Manager *manager = userdata;
1123 int r;
1124
1125 assert(manager);
1126
1127 DEVICE_TRACE_POINT(kernel_uevent_received, dev);
1128
1129 device_ensure_usec_initialized(dev, NULL);
1130
1131 r = event_queue_insert(manager, dev);
1132 if (r < 0) {
1133 log_device_error_errno(dev, r, "Failed to insert device into event queue: %m");
1134 return 1;
1135 }
1136
1137 (void) event_queue_assume_block_device_unlocked(manager, dev);
1138
1139 /* we have fresh events, try to schedule them */
1140 event_queue_start(manager);
1141
1142 return 1;
1143 }
1144
1145 static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1146 Manager *manager = userdata;
1147
1148 assert(manager);
1149
1150 for (;;) {
1151 EventResult result;
1152 struct iovec iovec = IOVEC_MAKE(&result, sizeof(result));
1153 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control;
1154 struct msghdr msghdr = {
1155 .msg_iov = &iovec,
1156 .msg_iovlen = 1,
1157 .msg_control = &control,
1158 .msg_controllen = sizeof(control),
1159 };
1160 ssize_t size;
1161 struct ucred *ucred;
1162 Worker *worker;
1163
1164 size = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT);
1165 if (size == -EINTR)
1166 continue;
1167 if (size == -EAGAIN)
1168 /* nothing more to read */
1169 break;
1170 if (size < 0)
1171 return log_error_errno(size, "Failed to receive message: %m");
1172
1173 cmsg_close_all(&msghdr);
1174
1175 if (size != sizeof(EventResult)) {
1176 log_warning("Ignoring worker message with invalid size %zi bytes", size);
1177 continue;
1178 }
1179
1180 ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred);
1181 if (!ucred || ucred->pid <= 0) {
1182 log_warning("Ignoring worker message without valid PID");
1183 continue;
1184 }
1185
1186 /* lookup worker who sent the signal */
1187 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
1188 if (!worker) {
1189 log_debug("Worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
1190 continue;
1191 }
1192
1193 if (worker->state == WORKER_KILLING) {
1194 worker->state = WORKER_KILLED;
1195 (void) kill(worker->pid, SIGTERM);
1196 } else if (worker->state != WORKER_KILLED)
1197 worker->state = WORKER_IDLE;
1198
1199 /* worker returned */
1200 if (result == EVENT_RESULT_TRY_AGAIN &&
1201 event_requeue(worker->event) < 0)
1202 device_broadcast(manager->monitor, worker->event->dev);
1203
1204 /* When event_requeue() succeeds, worker->event is NULL, and event_free() handles NULL gracefully. */
1205 event_free(worker->event);
1206 }
1207
1208 /* we have free workers, try to schedule events */
1209 event_queue_start(manager);
1210
1211 return 1;
1212 }
1213
1214 /* receive the udevd message from userspace */
1215 static int on_ctrl_msg(UdevCtrl *uctrl, UdevCtrlMessageType type, const UdevCtrlMessageValue *value, void *userdata) {
1216 Manager *manager = userdata;
1217 int r;
1218
1219 assert(value);
1220 assert(manager);
1221
1222 switch (type) {
1223 case UDEV_CTRL_SET_LOG_LEVEL:
1224 log_debug("Received udev control message (SET_LOG_LEVEL), setting log_level=%i", value->intval);
1225 log_set_max_level(value->intval);
1226 manager->log_level = value->intval;
1227 manager_kill_workers(manager, false);
1228 break;
1229 case UDEV_CTRL_STOP_EXEC_QUEUE:
1230 log_debug("Received udev control message (STOP_EXEC_QUEUE)");
1231 manager->stop_exec_queue = true;
1232 break;
1233 case UDEV_CTRL_START_EXEC_QUEUE:
1234 log_debug("Received udev control message (START_EXEC_QUEUE)");
1235 manager->stop_exec_queue = false;
1236 event_queue_start(manager);
1237 break;
1238 case UDEV_CTRL_RELOAD:
1239 log_debug("Received udev control message (RELOAD)");
1240 manager_reload(manager);
1241 break;
1242 case UDEV_CTRL_SET_ENV: {
1243 _unused_ _cleanup_free_ char *old_val = NULL;
1244 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL;
1245 const char *eq;
1246
1247 eq = strchr(value->buf, '=');
1248 if (!eq) {
1249 log_error("Invalid key format '%s'", value->buf);
1250 return 1;
1251 }
1252
1253 key = strndup(value->buf, eq - value->buf);
1254 if (!key) {
1255 log_oom();
1256 return 1;
1257 }
1258
1259 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1260
1261 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1262 if (r < 0) {
1263 log_oom();
1264 return 1;
1265 }
1266
1267 eq++;
1268 if (isempty(eq)) {
1269 log_debug("Received udev control message (ENV), unsetting '%s'", key);
1270
1271 r = hashmap_put(manager->properties, key, NULL);
1272 if (r < 0) {
1273 log_oom();
1274 return 1;
1275 }
1276 } else {
1277 val = strdup(eq);
1278 if (!val) {
1279 log_oom();
1280 return 1;
1281 }
1282
1283 log_debug("Received udev control message (ENV), setting '%s=%s'", key, val);
1284
1285 r = hashmap_put(manager->properties, key, val);
1286 if (r < 0) {
1287 log_oom();
1288 return 1;
1289 }
1290 }
1291
1292 key = val = NULL;
1293 manager_kill_workers(manager, false);
1294 break;
1295 }
1296 case UDEV_CTRL_SET_CHILDREN_MAX:
1297 if (value->intval <= 0) {
1298 log_debug("Received invalid udev control message (SET_MAX_CHILDREN, %i), ignoring.", value->intval);
1299 return 0;
1300 }
1301
1302 log_debug("Received udev control message (SET_MAX_CHILDREN), setting children_max=%i", value->intval);
1303 arg_children_max = value->intval;
1304
1305 notify_ready();
1306 break;
1307 case UDEV_CTRL_PING:
1308 log_debug("Received udev control message (PING)");
1309 break;
1310 case UDEV_CTRL_EXIT:
1311 log_debug("Received udev control message (EXIT)");
1312 manager_exit(manager);
1313 break;
1314 default:
1315 log_debug("Received unknown udev control message, ignoring");
1316 }
1317
1318 return 1;
1319 }
1320
1321 static int synthesize_change_one(sd_device *dev, sd_device *target) {
1322 int r;
1323
1324 if (DEBUG_LOGGING) {
1325 const char *syspath = NULL;
1326 (void) sd_device_get_syspath(target, &syspath);
1327 log_device_debug(dev, "device is closed, synthesising 'change' on %s", strna(syspath));
1328 }
1329
1330 r = sd_device_trigger(target, SD_DEVICE_CHANGE);
1331 if (r < 0)
1332 return log_device_debug_errno(target, r, "Failed to trigger 'change' uevent: %m");
1333
1334 DEVICE_TRACE_POINT(synthetic_change_event, dev);
1335
1336 return 0;
1337 }
1338
1339 static int synthesize_change(sd_device *dev) {
1340 const char *subsystem, *sysname, *devtype;
1341 int r;
1342
1343 r = sd_device_get_subsystem(dev, &subsystem);
1344 if (r < 0)
1345 return r;
1346
1347 r = sd_device_get_devtype(dev, &devtype);
1348 if (r < 0)
1349 return r;
1350
1351 r = sd_device_get_sysname(dev, &sysname);
1352 if (r < 0)
1353 return r;
1354
1355 if (streq_ptr(subsystem, "block") &&
1356 streq_ptr(devtype, "disk") &&
1357 !startswith(sysname, "dm-")) {
1358 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1359 bool part_table_read = false, has_partitions = false;
1360 const char *devname;
1361 sd_device *d;
1362 int fd;
1363
1364 r = sd_device_get_devname(dev, &devname);
1365 if (r < 0)
1366 return r;
1367
1368 /* Try to re-read the partition table. This only succeeds if none of the devices is
1369 * busy. The kernel returns 0 if no partition table is found, and we will not get an
1370 * event for the disk. */
1371 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
1372 if (fd >= 0) {
1373 r = flock(fd, LOCK_EX|LOCK_NB);
1374 if (r >= 0)
1375 r = ioctl(fd, BLKRRPART, 0);
1376
1377 close(fd);
1378 if (r >= 0)
1379 part_table_read = true;
1380 }
1381
1382 /* search for partitions */
1383 r = sd_device_enumerator_new(&e);
1384 if (r < 0)
1385 return r;
1386
1387 r = sd_device_enumerator_allow_uninitialized(e);
1388 if (r < 0)
1389 return r;
1390
1391 r = sd_device_enumerator_add_match_parent(e, dev);
1392 if (r < 0)
1393 return r;
1394
1395 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1396 if (r < 0)
1397 return r;
1398
1399 FOREACH_DEVICE(e, d) {
1400 const char *t;
1401
1402 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1403 continue;
1404
1405 has_partitions = true;
1406 break;
1407 }
1408
1409 /* We have partitions and re-read the table, the kernel already sent out a "change"
1410 * event for the disk, and "remove/add" for all partitions. */
1411 if (part_table_read && has_partitions)
1412 return 0;
1413
1414 /* We have partitions but re-reading the partition table did not work, synthesize
1415 * "change" for the disk and all partitions. */
1416 (void) synthesize_change_one(dev, dev);
1417
1418 FOREACH_DEVICE(e, d) {
1419 const char *t;
1420
1421 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1422 continue;
1423
1424 (void) synthesize_change_one(dev, d);
1425 }
1426
1427 } else
1428 (void) synthesize_change_one(dev, dev);
1429
1430 return 0;
1431 }
1432
1433 static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1434 Manager *manager = userdata;
1435 union inotify_event_buffer buffer;
1436 ssize_t l;
1437 int r;
1438
1439 assert(manager);
1440
1441 r = event_source_disable(manager->kill_workers_event);
1442 if (r < 0)
1443 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1444
1445 l = read(fd, &buffer, sizeof(buffer));
1446 if (l < 0) {
1447 if (ERRNO_IS_TRANSIENT(errno))
1448 return 1;
1449
1450 return log_error_errno(errno, "Failed to read inotify fd: %m");
1451 }
1452
1453 FOREACH_INOTIFY_EVENT_WARN(e, buffer, l) {
1454 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1455 const char *devnode;
1456
1457 r = device_new_from_watch_handle(&dev, e->wd);
1458 if (r < 0) {
1459 log_debug_errno(r, "Failed to create sd_device object from watch handle, ignoring: %m");
1460 continue;
1461 }
1462
1463 if (sd_device_get_devname(dev, &devnode) < 0)
1464 continue;
1465
1466 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
1467 if (e->mask & IN_CLOSE_WRITE) {
1468 (void) event_queue_assume_block_device_unlocked(manager, dev);
1469 (void) synthesize_change(dev);
1470 }
1471
1472 /* Do not handle IN_IGNORED here. It should be handled by worker in 'remove' uevent;
1473 * udev_event_execute_rules() -> event_execute_rules_on_remove() -> udev_watch_end(). */
1474 }
1475
1476 return 1;
1477 }
1478
1479 static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1480 Manager *manager = userdata;
1481
1482 assert(manager);
1483
1484 manager_exit(manager);
1485
1486 return 1;
1487 }
1488
1489 static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1490 Manager *manager = userdata;
1491
1492 assert(manager);
1493
1494 manager_reload(manager);
1495
1496 return 1;
1497 }
1498
1499 static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1500 Manager *manager = userdata;
1501 int r;
1502
1503 assert(manager);
1504
1505 for (;;) {
1506 pid_t pid;
1507 int status;
1508 Worker *worker;
1509
1510 pid = waitpid(-1, &status, WNOHANG);
1511 if (pid <= 0)
1512 break;
1513
1514 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
1515 if (!worker) {
1516 log_warning("Worker ["PID_FMT"] is unknown, ignoring", pid);
1517 continue;
1518 }
1519
1520 if (WIFEXITED(status)) {
1521 if (WEXITSTATUS(status) == 0)
1522 log_debug("Worker ["PID_FMT"] exited", pid);
1523 else
1524 log_warning("Worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1525 } else if (WIFSIGNALED(status))
1526 log_warning("Worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
1527 else if (WIFSTOPPED(status)) {
1528 log_info("Worker ["PID_FMT"] stopped", pid);
1529 continue;
1530 } else if (WIFCONTINUED(status)) {
1531 log_info("Worker ["PID_FMT"] continued", pid);
1532 continue;
1533 } else
1534 log_warning("Worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1535
1536 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1537 log_device_error(worker->event->dev, "Worker ["PID_FMT"] failed", pid);
1538
1539 /* delete state from disk */
1540 device_delete_db(worker->event->dev);
1541 device_tag_index(worker->event->dev, NULL, false);
1542
1543 /* Forward kernel event to libudev listeners */
1544 device_broadcast(manager->monitor, worker->event->dev);
1545 }
1546
1547 worker_free(worker);
1548 }
1549
1550 /* we can start new workers, try to schedule events */
1551 event_queue_start(manager);
1552
1553 /* Disable unnecessary cleanup event */
1554 if (hashmap_isempty(manager->workers)) {
1555 r = event_source_disable(manager->kill_workers_event);
1556 if (r < 0)
1557 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1558 }
1559
1560 return 1;
1561 }
1562
1563 static int on_post(sd_event_source *s, void *userdata) {
1564 Manager *manager = userdata;
1565
1566 assert(manager);
1567
1568 if (!LIST_IS_EMPTY(manager->events)) {
1569 /* Try to process pending events if idle workers exist. Why is this necessary?
1570 * When a worker finished an event and became idle, even if there was a pending event,
1571 * the corresponding device might have been locked and the processing of the event
1572 * delayed for a while, preventing the worker from processing the event immediately.
1573 * Now, the device may be unlocked. Let's try again! */
1574 event_queue_start(manager);
1575 return 1;
1576 }
1577
1578 /* There are no queued events. Let's remove /run/udev/queue and clean up the idle processes. */
1579
1580 if (unlink("/run/udev/queue") < 0) {
1581 if (errno != ENOENT)
1582 log_warning_errno(errno, "Failed to unlink /run/udev/queue, ignoring: %m");
1583 } else
1584 log_debug("No events are queued, removing /run/udev/queue.");
1585
1586 if (!hashmap_isempty(manager->workers)) {
1587 /* There are idle workers */
1588 (void) event_reset_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
1589 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC,
1590 on_kill_workers_event, manager, 0, "kill-workers-event", false);
1591 return 1;
1592 }
1593
1594 /* There are no idle workers. */
1595
1596 if (manager->exit)
1597 return sd_event_exit(manager->event, 0);
1598
1599 if (manager->cgroup)
1600 /* cleanup possible left-over processes in our cgroup */
1601 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1602
1603 return 1;
1604 }
1605
1606 static int listen_fds(int *ret_ctrl, int *ret_netlink) {
1607 int ctrl_fd = -1, netlink_fd = -1;
1608 int fd, n;
1609
1610 assert(ret_ctrl);
1611 assert(ret_netlink);
1612
1613 n = sd_listen_fds(true);
1614 if (n < 0)
1615 return n;
1616
1617 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1618 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1) > 0) {
1619 if (ctrl_fd >= 0)
1620 return -EINVAL;
1621 ctrl_fd = fd;
1622 continue;
1623 }
1624
1625 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
1626 if (netlink_fd >= 0)
1627 return -EINVAL;
1628 netlink_fd = fd;
1629 continue;
1630 }
1631
1632 return -EINVAL;
1633 }
1634
1635 *ret_ctrl = ctrl_fd;
1636 *ret_netlink = netlink_fd;
1637
1638 return 0;
1639 }
1640
1641 /*
1642 * read the kernel command line, in case we need to get into debug mode
1643 * udev.log_level=<level> syslog priority
1644 * udev.children_max=<number of workers> events are fully serialized if set to 1
1645 * udev.exec_delay=<number of seconds> delay execution of every executed program
1646 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1647 * udev.blockdev_read_only<=bool> mark all block devices read-only when they appear
1648 */
1649 static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
1650 int r;
1651
1652 assert(key);
1653
1654 if (proc_cmdline_key_streq(key, "udev.log_level") ||
1655 proc_cmdline_key_streq(key, "udev.log_priority")) { /* kept for backward compatibility */
1656
1657 if (proc_cmdline_value_missing(key, value))
1658 return 0;
1659
1660 r = log_level_from_string(value);
1661 if (r >= 0)
1662 log_set_max_level(r);
1663
1664 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1665
1666 if (proc_cmdline_value_missing(key, value))
1667 return 0;
1668
1669 r = parse_sec(value, &arg_event_timeout_usec);
1670
1671 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1672
1673 if (proc_cmdline_value_missing(key, value))
1674 return 0;
1675
1676 r = safe_atou(value, &arg_children_max);
1677
1678 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1679
1680 if (proc_cmdline_value_missing(key, value))
1681 return 0;
1682
1683 r = parse_sec(value, &arg_exec_delay_usec);
1684
1685 } else if (proc_cmdline_key_streq(key, "udev.timeout_signal")) {
1686
1687 if (proc_cmdline_value_missing(key, value))
1688 return 0;
1689
1690 r = signal_from_string(value);
1691 if (r > 0)
1692 arg_timeout_signal = r;
1693
1694 } else if (proc_cmdline_key_streq(key, "udev.blockdev_read_only")) {
1695
1696 if (!value)
1697 arg_blockdev_read_only = true;
1698 else {
1699 r = parse_boolean(value);
1700 if (r < 0)
1701 log_warning_errno(r, "Failed to parse udev.blockdev-read-only argument, ignoring: %s", value);
1702 else
1703 arg_blockdev_read_only = r;
1704 }
1705
1706 if (arg_blockdev_read_only)
1707 log_notice("All physical block devices will be marked read-only.");
1708
1709 return 0;
1710
1711 } else {
1712 if (startswith(key, "udev."))
1713 log_warning("Unknown udev kernel command line option \"%s\", ignoring.", key);
1714
1715 return 0;
1716 }
1717
1718 if (r < 0)
1719 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1720
1721 return 0;
1722 }
1723
1724 static int help(void) {
1725 _cleanup_free_ char *link = NULL;
1726 int r;
1727
1728 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1729 if (r < 0)
1730 return log_oom();
1731
1732 printf("%s [OPTIONS...]\n\n"
1733 "Rule-based manager for device events and files.\n\n"
1734 " -h --help Print this message\n"
1735 " -V --version Print version of the program\n"
1736 " -d --daemon Detach and run in the background\n"
1737 " -D --debug Enable debug output\n"
1738 " -c --children-max=INT Set maximum number of workers\n"
1739 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1740 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1741 " -N --resolve-names=early|late|never\n"
1742 " When to resolve users and groups\n"
1743 "\nSee the %s for details.\n",
1744 program_invocation_short_name,
1745 link);
1746
1747 return 0;
1748 }
1749
1750 static int parse_argv(int argc, char *argv[]) {
1751 enum {
1752 ARG_TIMEOUT_SIGNAL,
1753 };
1754
1755 static const struct option options[] = {
1756 { "daemon", no_argument, NULL, 'd' },
1757 { "debug", no_argument, NULL, 'D' },
1758 { "children-max", required_argument, NULL, 'c' },
1759 { "exec-delay", required_argument, NULL, 'e' },
1760 { "event-timeout", required_argument, NULL, 't' },
1761 { "resolve-names", required_argument, NULL, 'N' },
1762 { "help", no_argument, NULL, 'h' },
1763 { "version", no_argument, NULL, 'V' },
1764 { "timeout-signal", required_argument, NULL, ARG_TIMEOUT_SIGNAL },
1765 {}
1766 };
1767
1768 int c, r;
1769
1770 assert(argc >= 0);
1771 assert(argv);
1772
1773 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
1774 switch (c) {
1775
1776 case 'd':
1777 arg_daemonize = true;
1778 break;
1779 case 'c':
1780 r = safe_atou(optarg, &arg_children_max);
1781 if (r < 0)
1782 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
1783 break;
1784 case 'e':
1785 r = parse_sec(optarg, &arg_exec_delay_usec);
1786 if (r < 0)
1787 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
1788 break;
1789 case ARG_TIMEOUT_SIGNAL:
1790 r = signal_from_string(optarg);
1791 if (r <= 0)
1792 log_warning_errno(r, "Failed to parse --timeout-signal= value '%s', ignoring: %m", optarg);
1793 else
1794 arg_timeout_signal = r;
1795
1796 break;
1797 case 't':
1798 r = parse_sec(optarg, &arg_event_timeout_usec);
1799 if (r < 0)
1800 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
1801 break;
1802 case 'D':
1803 arg_debug = true;
1804 break;
1805 case 'N': {
1806 ResolveNameTiming t;
1807
1808 t = resolve_name_timing_from_string(optarg);
1809 if (t < 0)
1810 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1811 else
1812 arg_resolve_name_timing = t;
1813 break;
1814 }
1815 case 'h':
1816 return help();
1817 case 'V':
1818 printf("%s\n", GIT_VERSION);
1819 return 0;
1820 case '?':
1821 return -EINVAL;
1822 default:
1823 assert_not_reached();
1824
1825 }
1826 }
1827
1828 return 1;
1829 }
1830
1831 static int create_subcgroup(char **ret) {
1832 _cleanup_free_ char *cgroup = NULL, *subcgroup = NULL;
1833 int r;
1834
1835 if (getppid() != 1)
1836 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "Not invoked by PID1.");
1837
1838 r = sd_booted();
1839 if (r < 0)
1840 return log_debug_errno(r, "Failed to check if systemd is running: %m");
1841 if (r == 0)
1842 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "systemd is not running.");
1843
1844 /* Get our own cgroup, we regularly kill everything udev has left behind.
1845 * We only do this on systemd systems, and only if we are directly spawned
1846 * by PID1. Otherwise we are not guaranteed to have a dedicated cgroup. */
1847
1848 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
1849 if (r < 0) {
1850 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
1851 return log_debug_errno(r, "Dedicated cgroup not found: %m");
1852 return log_debug_errno(r, "Failed to get cgroup: %m");
1853 }
1854
1855 r = cg_get_xattr_bool(SYSTEMD_CGROUP_CONTROLLER, cgroup, "trusted.delegate");
1856 if (IN_SET(r, 0, -ENODATA))
1857 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "The cgroup %s is not delegated to us.", cgroup);
1858 if (r < 0)
1859 return log_debug_errno(r, "Failed to read trusted.delegate attribute: %m");
1860
1861 /* We are invoked with our own delegated cgroup tree, let's move us one level down, so that we
1862 * don't collide with the "no processes in inner nodes" rule of cgroups, when the service
1863 * manager invokes the ExecReload= job in the .control/ subcgroup. */
1864
1865 subcgroup = path_join(cgroup, "/udev");
1866 if (!subcgroup)
1867 return log_oom_debug();
1868
1869 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, subcgroup, 0);
1870 if (r < 0)
1871 return log_debug_errno(r, "Failed to create %s subcgroup: %m", subcgroup);
1872
1873 log_debug("Created %s subcgroup.", subcgroup);
1874 if (ret)
1875 *ret = TAKE_PTR(subcgroup);
1876 return 0;
1877 }
1878
1879 static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent) {
1880 _cleanup_(manager_freep) Manager *manager = NULL;
1881 _cleanup_free_ char *cgroup = NULL;
1882 int r;
1883
1884 assert(ret);
1885
1886 (void) create_subcgroup(&cgroup);
1887
1888 manager = new(Manager, 1);
1889 if (!manager)
1890 return log_oom();
1891
1892 *manager = (Manager) {
1893 .inotify_fd = -1,
1894 .worker_watch = { -1, -1 },
1895 .cgroup = TAKE_PTR(cgroup),
1896 };
1897
1898 r = udev_ctrl_new_from_fd(&manager->ctrl, fd_ctrl);
1899 if (r < 0)
1900 return log_error_errno(r, "Failed to initialize udev control socket: %m");
1901
1902 r = udev_ctrl_enable_receiving(manager->ctrl);
1903 if (r < 0)
1904 return log_error_errno(r, "Failed to bind udev control socket: %m");
1905
1906 r = device_monitor_new_full(&manager->monitor, MONITOR_GROUP_KERNEL, fd_uevent);
1907 if (r < 0)
1908 return log_error_errno(r, "Failed to initialize device monitor: %m");
1909
1910 /* Bump receiver buffer, but only if we are not called via socket activation, as in that
1911 * case systemd sets the receive buffer size for us, and the value in the .socket unit
1912 * should take full effect. */
1913 if (fd_uevent < 0) {
1914 r = sd_device_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
1915 if (r < 0)
1916 log_warning_errno(r, "Failed to set receive buffer size for device monitor, ignoring: %m");
1917 }
1918
1919 r = device_monitor_enable_receiving(manager->monitor);
1920 if (r < 0)
1921 return log_error_errno(r, "Failed to bind netlink socket: %m");
1922
1923 manager->log_level = log_get_max_level();
1924
1925 *ret = TAKE_PTR(manager);
1926
1927 return 0;
1928 }
1929
1930 static int main_loop(Manager *manager) {
1931 int fd_worker, r;
1932
1933 manager->pid = getpid_cached();
1934
1935 /* unnamed socket from workers to the main daemon */
1936 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1937 if (r < 0)
1938 return log_error_errno(errno, "Failed to create socketpair for communicating with workers: %m");
1939
1940 fd_worker = manager->worker_watch[READ_END];
1941
1942 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
1943 if (r < 0)
1944 return log_error_errno(r, "Failed to enable SO_PASSCRED: %m");
1945
1946 manager->inotify_fd = inotify_init1(IN_CLOEXEC);
1947 if (manager->inotify_fd < 0)
1948 return log_error_errno(errno, "Failed to create inotify descriptor: %m");
1949
1950 udev_watch_restore(manager->inotify_fd);
1951
1952 /* block and listen to all signals on signalfd */
1953 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
1954
1955 r = sd_event_default(&manager->event);
1956 if (r < 0)
1957 return log_error_errno(r, "Failed to allocate event loop: %m");
1958
1959 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1960 if (r < 0)
1961 return log_error_errno(r, "Failed to create SIGINT event source: %m");
1962
1963 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1964 if (r < 0)
1965 return log_error_errno(r, "Failed to create SIGTERM event source: %m");
1966
1967 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1968 if (r < 0)
1969 return log_error_errno(r, "Failed to create SIGHUP event source: %m");
1970
1971 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1972 if (r < 0)
1973 return log_error_errno(r, "Failed to create SIGCHLD event source: %m");
1974
1975 r = sd_event_set_watchdog(manager->event, true);
1976 if (r < 0)
1977 return log_error_errno(r, "Failed to create watchdog event source: %m");
1978
1979 r = udev_ctrl_attach_event(manager->ctrl, manager->event);
1980 if (r < 0)
1981 return log_error_errno(r, "Failed to attach event to udev control: %m");
1982
1983 r = udev_ctrl_start(manager->ctrl, on_ctrl_msg, manager);
1984 if (r < 0)
1985 return log_error_errno(r, "Failed to start device monitor: %m");
1986
1987 /* This needs to be after the inotify and uevent handling, to make sure
1988 * that the ping is send back after fully processing the pending uevents
1989 * (including the synthetic ones we may create due to inotify events).
1990 */
1991 r = sd_event_source_set_priority(udev_ctrl_get_event_source(manager->ctrl), SD_EVENT_PRIORITY_IDLE);
1992 if (r < 0)
1993 return log_error_errno(r, "Failed to set IDLE event priority for udev control event source: %m");
1994
1995 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->inotify_fd, EPOLLIN, on_inotify, manager);
1996 if (r < 0)
1997 return log_error_errno(r, "Failed to create inotify event source: %m");
1998
1999 r = sd_device_monitor_attach_event(manager->monitor, manager->event);
2000 if (r < 0)
2001 return log_error_errno(r, "Failed to attach event to device monitor: %m");
2002
2003 r = sd_device_monitor_start(manager->monitor, on_uevent, manager);
2004 if (r < 0)
2005 return log_error_errno(r, "Failed to start device monitor: %m");
2006
2007 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(manager->monitor), "device-monitor");
2008
2009 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
2010 if (r < 0)
2011 return log_error_errno(r, "Failed to create worker event source: %m");
2012
2013 r = sd_event_add_post(manager->event, NULL, on_post, manager);
2014 if (r < 0)
2015 return log_error_errno(r, "Failed to create post event source: %m");
2016
2017 udev_builtin_init();
2018
2019 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
2020 if (!manager->rules)
2021 return log_error_errno(r, "Failed to read udev rules: %m");
2022
2023 r = udev_rules_apply_static_dev_perms(manager->rules);
2024 if (r < 0)
2025 log_error_errno(r, "Failed to apply permissions on static device nodes: %m");
2026
2027 notify_ready();
2028
2029 r = sd_event_loop(manager->event);
2030 if (r < 0)
2031 log_error_errno(r, "Event loop failed: %m");
2032
2033 sd_notify(false,
2034 "STOPPING=1\n"
2035 "STATUS=Shutting down...");
2036 return r;
2037 }
2038
2039 int run_udevd(int argc, char *argv[]) {
2040 _cleanup_(manager_freep) Manager *manager = NULL;
2041 int fd_ctrl = -1, fd_uevent = -1;
2042 int r;
2043
2044 log_set_target(LOG_TARGET_AUTO);
2045 log_open();
2046 udev_parse_config_full(&arg_children_max, &arg_exec_delay_usec, &arg_event_timeout_usec, &arg_resolve_name_timing, &arg_timeout_signal);
2047 log_parse_environment();
2048 log_open(); /* Done again to update after reading configuration. */
2049
2050 r = parse_argv(argc, argv);
2051 if (r <= 0)
2052 return r;
2053
2054 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
2055 if (r < 0)
2056 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
2057
2058 if (arg_debug) {
2059 log_set_target(LOG_TARGET_CONSOLE);
2060 log_set_max_level(LOG_DEBUG);
2061 }
2062
2063 r = must_be_root();
2064 if (r < 0)
2065 return r;
2066
2067 if (arg_children_max == 0) {
2068 unsigned long cpu_limit, mem_limit, cpu_count = 1;
2069
2070 r = cpus_in_affinity_mask();
2071 if (r < 0)
2072 log_warning_errno(r, "Failed to determine number of local CPUs, ignoring: %m");
2073 else
2074 cpu_count = r;
2075
2076 cpu_limit = cpu_count * 2 + 16;
2077 mem_limit = MAX(physical_memory() / (128UL*1024*1024), 10U);
2078
2079 arg_children_max = MIN(cpu_limit, mem_limit);
2080 arg_children_max = MIN(WORKER_NUM_MAX, arg_children_max);
2081
2082 log_debug("Set children_max to %u", arg_children_max);
2083 }
2084
2085 /* set umask before creating any file/directory */
2086 umask(022);
2087
2088 r = mac_selinux_init();
2089 if (r < 0)
2090 return r;
2091
2092 r = RET_NERRNO(mkdir("/run/udev", 0755));
2093 if (r < 0 && r != -EEXIST)
2094 return log_error_errno(r, "Failed to create /run/udev: %m");
2095
2096 r = listen_fds(&fd_ctrl, &fd_uevent);
2097 if (r < 0)
2098 return log_error_errno(r, "Failed to listen on fds: %m");
2099
2100 r = manager_new(&manager, fd_ctrl, fd_uevent);
2101 if (r < 0)
2102 return log_error_errno(r, "Failed to create manager: %m");
2103
2104 if (arg_daemonize) {
2105 pid_t pid;
2106
2107 log_info("Starting version " GIT_VERSION);
2108
2109 /* connect /dev/null to stdin, stdout, stderr */
2110 if (log_get_max_level() < LOG_DEBUG) {
2111 r = make_null_stdio();
2112 if (r < 0)
2113 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
2114 }
2115
2116 pid = fork();
2117 if (pid < 0)
2118 return log_error_errno(errno, "Failed to fork daemon: %m");
2119 if (pid > 0)
2120 /* parent */
2121 return 0;
2122
2123 /* child */
2124 (void) setsid();
2125 }
2126
2127 return main_loop(manager);
2128 }