]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/udev/udevd.c
Merge pull request #24138 from Keksgesicht/rfe/cryptenroll-keyfile
[thirdparty/systemd.git] / src / udev / udevd.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
6 */
7
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <getopt.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <sys/epoll.h>
16 #include <sys/file.h>
17 #include <sys/inotify.h>
18 #include <sys/ioctl.h>
19 #include <sys/mount.h>
20 #include <sys/prctl.h>
21 #include <sys/signalfd.h>
22 #include <sys/stat.h>
23 #include <sys/time.h>
24 #include <sys/wait.h>
25 #include <unistd.h>
26
27 #include "sd-daemon.h"
28 #include "sd-event.h"
29
30 #include "alloc-util.h"
31 #include "cgroup-setup.h"
32 #include "cgroup-util.h"
33 #include "cpu-set-util.h"
34 #include "dev-setup.h"
35 #include "device-monitor-private.h"
36 #include "device-private.h"
37 #include "device-util.h"
38 #include "errno-list.h"
39 #include "event-util.h"
40 #include "fd-util.h"
41 #include "fileio.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "hashmap.h"
45 #include "inotify-util.h"
46 #include "io-util.h"
47 #include "limits-util.h"
48 #include "list.h"
49 #include "main-func.h"
50 #include "mkdir.h"
51 #include "netlink-util.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "pretty-print.h"
55 #include "proc-cmdline.h"
56 #include "process-util.h"
57 #include "selinux-util.h"
58 #include "signal-util.h"
59 #include "socket-util.h"
60 #include "string-util.h"
61 #include "strv.h"
62 #include "strxcpyx.h"
63 #include "syslog-util.h"
64 #include "udevd.h"
65 #include "udev-builtin.h"
66 #include "udev-ctrl.h"
67 #include "udev-event.h"
68 #include "udev-util.h"
69 #include "udev-watch.h"
70 #include "user-util.h"
71 #include "version.h"
72
73 #define WORKER_NUM_MAX 2048U
74 #define EVENT_RETRY_INTERVAL_USEC (200 * USEC_PER_MSEC)
75 #define EVENT_RETRY_TIMEOUT_USEC (3 * USEC_PER_MINUTE)
76
77 static bool arg_debug = false;
78 static int arg_daemonize = false;
79 static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
80 static unsigned arg_children_max = 0;
81 static usec_t arg_exec_delay_usec = 0;
82 static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
83 static int arg_timeout_signal = SIGKILL;
84 static bool arg_blockdev_read_only = false;
85
86 typedef struct Event Event;
87 typedef struct Worker Worker;
88
89 typedef struct Manager {
90 sd_event *event;
91 Hashmap *workers;
92 LIST_HEAD(Event, events);
93 char *cgroup;
94 pid_t pid; /* the process that originally allocated the manager object */
95 int log_level;
96
97 UdevRules *rules;
98 Hashmap *properties;
99
100 sd_netlink *rtnl;
101
102 sd_device_monitor *monitor;
103 UdevCtrl *ctrl;
104 int worker_watch[2];
105
106 /* used by udev-watch */
107 int inotify_fd;
108 sd_event_source *inotify_event;
109
110 sd_event_source *kill_workers_event;
111
112 usec_t last_usec;
113
114 bool stop_exec_queue;
115 bool exit;
116 } Manager;
117
118 typedef enum EventState {
119 EVENT_UNDEF,
120 EVENT_QUEUED,
121 EVENT_RUNNING,
122 } EventState;
123
124 typedef struct Event {
125 Manager *manager;
126 Worker *worker;
127 EventState state;
128
129 sd_device *dev;
130
131 sd_device_action_t action;
132 uint64_t seqnum;
133 uint64_t blocker_seqnum;
134 const char *id;
135 const char *devpath;
136 const char *devpath_old;
137 const char *devnode;
138 usec_t retry_again_next_usec;
139 usec_t retry_again_timeout_usec;
140
141 sd_event_source *timeout_warning_event;
142 sd_event_source *timeout_event;
143
144 LIST_FIELDS(Event, event);
145 } Event;
146
147 typedef enum WorkerState {
148 WORKER_UNDEF,
149 WORKER_RUNNING,
150 WORKER_IDLE,
151 WORKER_KILLED,
152 WORKER_KILLING,
153 } WorkerState;
154
155 typedef struct Worker {
156 Manager *manager;
157 pid_t pid;
158 sd_event_source *child_event_source;
159 sd_device_monitor *monitor;
160 WorkerState state;
161 Event *event;
162 } Worker;
163
164 /* passed from worker to main process */
165 typedef enum EventResult {
166 EVENT_RESULT_NERRNO_MIN = -ERRNO_MAX,
167 EVENT_RESULT_NERRNO_MAX = -1,
168 EVENT_RESULT_SUCCESS = 0,
169 EVENT_RESULT_EXIT_STATUS_BASE = 0,
170 EVENT_RESULT_EXIT_STATUS_MAX = 255,
171 EVENT_RESULT_TRY_AGAIN = 256, /* when the block device is locked by another process. */
172 EVENT_RESULT_SIGNAL_BASE = 257,
173 EVENT_RESULT_SIGNAL_MAX = EVENT_RESULT_SIGNAL_BASE + _NSIG,
174 _EVENT_RESULT_MAX,
175 _EVENT_RESULT_INVALID = -EINVAL,
176 } EventResult;
177
178 static Event *event_free(Event *event) {
179 if (!event)
180 return NULL;
181
182 assert(event->manager);
183
184 LIST_REMOVE(event, event->manager->events, event);
185 sd_device_unref(event->dev);
186
187 /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
188 * main process. */
189 sd_event_source_unref(event->timeout_warning_event);
190 sd_event_source_unref(event->timeout_event);
191
192 if (event->worker)
193 event->worker->event = NULL;
194
195 return mfree(event);
196 }
197
198 static void event_queue_cleanup(Manager *manager, EventState match_state) {
199 LIST_FOREACH(event, event, manager->events) {
200 if (match_state != EVENT_UNDEF && match_state != event->state)
201 continue;
202
203 event_free(event);
204 }
205 }
206
207 static Worker *worker_free(Worker *worker) {
208 if (!worker)
209 return NULL;
210
211 if (worker->manager)
212 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
213
214 sd_event_source_unref(worker->child_event_source);
215 sd_device_monitor_unref(worker->monitor);
216 event_free(worker->event);
217
218 return mfree(worker);
219 }
220
221 DEFINE_TRIVIAL_CLEANUP_FUNC(Worker*, worker_free);
222 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(worker_hash_op, void, trivial_hash_func, trivial_compare_func, Worker, worker_free);
223
224 static void manager_clear_for_worker(Manager *manager) {
225 assert(manager);
226
227 /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
228 * main process. */
229 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
230 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
231
232 manager->event = sd_event_unref(manager->event);
233
234 manager->workers = hashmap_free(manager->workers);
235 event_queue_cleanup(manager, EVENT_UNDEF);
236
237 manager->monitor = sd_device_monitor_unref(manager->monitor);
238 manager->ctrl = udev_ctrl_unref(manager->ctrl);
239
240 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
241 }
242
243 static Manager* manager_free(Manager *manager) {
244 if (!manager)
245 return NULL;
246
247 udev_builtin_exit();
248
249 manager_clear_for_worker(manager);
250
251 sd_netlink_unref(manager->rtnl);
252
253 hashmap_free_free_free(manager->properties);
254 udev_rules_free(manager->rules);
255
256 safe_close(manager->inotify_fd);
257 safe_close_pair(manager->worker_watch);
258
259 free(manager->cgroup);
260 return mfree(manager);
261 }
262
263 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
264
265 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata);
266
267 static int worker_new(Worker **ret, Manager *manager, sd_device_monitor *worker_monitor, pid_t pid) {
268 _cleanup_(worker_freep) Worker *worker = NULL;
269 int r;
270
271 assert(ret);
272 assert(manager);
273 assert(worker_monitor);
274 assert(pid > 1);
275
276 /* close monitor, but keep address around */
277 device_monitor_disconnect(worker_monitor);
278
279 worker = new(Worker, 1);
280 if (!worker)
281 return -ENOMEM;
282
283 *worker = (Worker) {
284 .monitor = sd_device_monitor_ref(worker_monitor),
285 .pid = pid,
286 };
287
288 r = sd_event_add_child(manager->event, &worker->child_event_source, pid, WEXITED, on_sigchld, worker);
289 if (r < 0)
290 return r;
291
292 r = hashmap_ensure_put(&manager->workers, &worker_hash_op, PID_TO_PTR(pid), worker);
293 if (r < 0)
294 return r;
295
296 worker->manager = manager;
297
298 *ret = TAKE_PTR(worker);
299 return 0;
300 }
301
302 static void manager_kill_workers(Manager *manager, bool force) {
303 Worker *worker;
304
305 assert(manager);
306
307 HASHMAP_FOREACH(worker, manager->workers) {
308 if (worker->state == WORKER_KILLED)
309 continue;
310
311 if (worker->state == WORKER_RUNNING && !force) {
312 worker->state = WORKER_KILLING;
313 continue;
314 }
315
316 worker->state = WORKER_KILLED;
317 (void) kill(worker->pid, SIGTERM);
318 }
319 }
320
321 static void manager_exit(Manager *manager) {
322 assert(manager);
323
324 manager->exit = true;
325
326 sd_notify(false,
327 "STOPPING=1\n"
328 "STATUS=Starting shutdown...");
329
330 /* close sources of new events and discard buffered events */
331 manager->ctrl = udev_ctrl_unref(manager->ctrl);
332
333 manager->inotify_event = sd_event_source_disable_unref(manager->inotify_event);
334 manager->inotify_fd = safe_close(manager->inotify_fd);
335
336 manager->monitor = sd_device_monitor_unref(manager->monitor);
337
338 /* discard queued events and kill workers */
339 event_queue_cleanup(manager, EVENT_QUEUED);
340 manager_kill_workers(manager, true);
341 }
342
343 static void notify_ready(void) {
344 int r;
345
346 r = sd_notifyf(false,
347 "READY=1\n"
348 "STATUS=Processing with %u children at max", arg_children_max);
349 if (r < 0)
350 log_warning_errno(r, "Failed to send readiness notification, ignoring: %m");
351 }
352
353 /* reload requested, HUP signal received, rules changed, builtin changed */
354 static void manager_reload(Manager *manager, bool force) {
355 _cleanup_(udev_rules_freep) UdevRules *rules = NULL;
356 usec_t now_usec;
357 int r;
358
359 assert(manager);
360
361 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &now_usec) >= 0);
362 if (!force && now_usec < usec_add(manager->last_usec, 3 * USEC_PER_SEC))
363 /* check for changed config, every 3 seconds at most */
364 return;
365 manager->last_usec = now_usec;
366
367 /* Reload SELinux label database, to make the child inherit the up-to-date database. */
368 mac_selinux_maybe_reload();
369
370 /* Nothing changed. It is not necessary to reload. */
371 if (!udev_rules_should_reload(manager->rules) && !udev_builtin_validate())
372 return;
373
374 sd_notify(false,
375 "RELOADING=1\n"
376 "STATUS=Flushing configuration...");
377
378 manager_kill_workers(manager, false);
379
380 udev_builtin_exit();
381 udev_builtin_init();
382
383 r = udev_rules_load(&rules, arg_resolve_name_timing);
384 if (r < 0)
385 log_warning_errno(r, "Failed to read udev rules, using the previously loaded rules, ignoring: %m");
386 else
387 udev_rules_free_and_replace(manager->rules, rules);
388
389 notify_ready();
390 }
391
392 static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
393 Manager *manager = userdata;
394
395 assert(manager);
396
397 log_debug("Cleanup idle workers");
398 manager_kill_workers(manager, false);
399
400 return 1;
401 }
402
403 static void device_broadcast(sd_device_monitor *monitor, sd_device *dev, EventResult result) {
404 int r;
405
406 assert(dev);
407
408 /* On exit, manager->monitor is already NULL. */
409 if (!monitor)
410 return;
411
412 if (result != EVENT_RESULT_SUCCESS) {
413 (void) device_add_property(dev, "UDEV_WORKER_FAILED", "1");
414
415 switch (result) {
416 case EVENT_RESULT_NERRNO_MIN ... EVENT_RESULT_NERRNO_MAX: {
417 const char *str;
418
419 (void) device_add_propertyf(dev, "UDEV_WORKER_ERRNO", "%i", -result);
420
421 str = errno_to_name(result);
422 if (str)
423 (void) device_add_property(dev, "UDEV_WORKER_ERRNO_NAME", str);
424 break;
425 }
426 case EVENT_RESULT_EXIT_STATUS_BASE ... EVENT_RESULT_EXIT_STATUS_MAX:
427 (void) device_add_propertyf(dev, "UDEV_WORKER_EXIT_STATUS", "%i", result - EVENT_RESULT_EXIT_STATUS_BASE);
428 break;
429
430 case EVENT_RESULT_TRY_AGAIN:
431 assert_not_reached();
432 break;
433
434 case EVENT_RESULT_SIGNAL_BASE ... EVENT_RESULT_SIGNAL_MAX: {
435 const char *str;
436
437 (void) device_add_propertyf(dev, "UDEV_WORKER_SIGNAL", "%i", result - EVENT_RESULT_SIGNAL_BASE);
438
439 str = signal_to_string(result - EVENT_RESULT_SIGNAL_BASE);
440 if (str)
441 (void) device_add_property(dev, "UDEV_WORKER_SIGNAL_NAME", str);
442 break;
443 }
444 default:
445 log_device_warning(dev, "Unknown event result \"%i\", ignoring.", result);
446 }
447 }
448
449 r = device_monitor_send_device(monitor, NULL, dev);
450 if (r < 0)
451 log_device_warning_errno(dev, r,
452 "Failed to broadcast event to libudev listeners, ignoring: %m");
453 }
454
455 static int worker_send_result(Manager *manager, EventResult result) {
456 assert(manager);
457 assert(manager->worker_watch[WRITE_END] >= 0);
458
459 return loop_write(manager->worker_watch[WRITE_END], &result, sizeof(result), false);
460 }
461
462 static int device_get_whole_disk(sd_device *dev, sd_device **ret_device, const char **ret_devname) {
463 const char *val;
464 int r;
465
466 assert(dev);
467
468 if (device_for_action(dev, SD_DEVICE_REMOVE))
469 goto irrelevant;
470
471 r = sd_device_get_subsystem(dev, &val);
472 if (r < 0)
473 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
474
475 if (!streq(val, "block"))
476 goto irrelevant;
477
478 r = sd_device_get_sysname(dev, &val);
479 if (r < 0)
480 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
481
482 /* Exclude the following devices:
483 * For "dm-", see the comment added by e918a1b5a94f270186dca59156354acd2a596494.
484 * For "md", see the commit message of 2e5b17d01347d3c3118be2b8ad63d20415dbb1f0,
485 * but not sure the assumption is still valid even when partitions are created on the md
486 * devices, surprisingly which seems to be possible, see PR #22973.
487 * For "drbd", see the commit message of fee854ee8ccde0cd28e0f925dea18cce35f3993d. */
488 if (STARTSWITH_SET(val, "dm-", "md", "drbd"))
489 goto irrelevant;
490
491 r = sd_device_get_devtype(dev, &val);
492 if (r < 0 && r != -ENOENT)
493 return log_device_debug_errno(dev, r, "Failed to get devtype: %m");
494 if (r >= 0 && streq(val, "partition")) {
495 r = sd_device_get_parent(dev, &dev);
496 if (r == -ENOENT) /* The device may be already removed. */
497 goto irrelevant;
498 if (r < 0)
499 return log_device_debug_errno(dev, r, "Failed to get parent device: %m");
500 }
501
502 r = sd_device_get_devname(dev, &val);
503 if (r == -ENOENT)
504 goto irrelevant;
505 if (r < 0)
506 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
507
508 if (ret_device)
509 *ret_device = dev;
510 if (ret_devname)
511 *ret_devname = val;
512 return 1;
513
514 irrelevant:
515 if (ret_device)
516 *ret_device = NULL;
517 if (ret_devname)
518 *ret_devname = NULL;
519 return 0;
520 }
521
522 static int worker_lock_whole_disk(sd_device *dev, int *ret_fd) {
523 _cleanup_close_ int fd = -1;
524 sd_device *dev_whole_disk;
525 const char *val;
526 int r;
527
528 assert(dev);
529 assert(ret_fd);
530
531 /* Take a shared lock on the device node; this establishes a concept of device "ownership" to
532 * serialize device access. External processes holding an exclusive lock will cause udev to skip the
533 * event handling; in the case udev acquired the lock, the external process can block until udev has
534 * finished its event handling. */
535
536 r = device_get_whole_disk(dev, &dev_whole_disk, &val);
537 if (r < 0)
538 return r;
539 if (r == 0)
540 goto nolock;
541
542 fd = sd_device_open(dev_whole_disk, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
543 if (fd < 0) {
544 bool ignore = ERRNO_IS_DEVICE_ABSENT(fd);
545
546 log_device_debug_errno(dev, fd, "Failed to open '%s'%s: %m", val, ignore ? ", ignoring" : "");
547 if (!ignore)
548 return fd;
549
550 goto nolock;
551 }
552
553 if (flock(fd, LOCK_SH|LOCK_NB) < 0)
554 return log_device_debug_errno(dev, errno, "Failed to flock(%s): %m", val);
555
556 *ret_fd = TAKE_FD(fd);
557 return 1;
558
559 nolock:
560 *ret_fd = -1;
561 return 0;
562 }
563
564 static int worker_mark_block_device_read_only(sd_device *dev) {
565 _cleanup_close_ int fd = -1;
566 const char *val;
567 int state = 1, r;
568
569 assert(dev);
570
571 if (!arg_blockdev_read_only)
572 return 0;
573
574 /* Do this only once, when the block device is new. If the device is later retriggered let's not
575 * toggle the bit again, so that people can boot up with full read-only mode and then unset the bit
576 * for specific devices only. */
577 if (!device_for_action(dev, SD_DEVICE_ADD))
578 return 0;
579
580 r = sd_device_get_subsystem(dev, &val);
581 if (r < 0)
582 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
583
584 if (!streq(val, "block"))
585 return 0;
586
587 r = sd_device_get_sysname(dev, &val);
588 if (r < 0)
589 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
590
591 /* Exclude synthetic devices for now, this is supposed to be a safety feature to avoid modification
592 * of physical devices, and what sits on top of those doesn't really matter if we don't allow the
593 * underlying block devices to receive changes. */
594 if (STARTSWITH_SET(val, "dm-", "md", "drbd", "loop", "nbd", "zram"))
595 return 0;
596
597 fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
598 if (fd < 0)
599 return log_device_debug_errno(dev, fd, "Failed to open '%s', ignoring: %m", val);
600
601 if (ioctl(fd, BLKROSET, &state) < 0)
602 return log_device_warning_errno(dev, errno, "Failed to mark block device '%s' read-only: %m", val);
603
604 log_device_info(dev, "Successfully marked block device '%s' read-only.", val);
605 return 0;
606 }
607
608 static int worker_process_device(Manager *manager, sd_device *dev) {
609 _cleanup_(udev_event_freep) UdevEvent *udev_event = NULL;
610 _cleanup_close_ int fd_lock = -1;
611 int r;
612
613 assert(manager);
614 assert(dev);
615
616 log_device_uevent(dev, "Processing device");
617
618 udev_event = udev_event_new(dev, arg_exec_delay_usec, manager->rtnl, manager->log_level);
619 if (!udev_event)
620 return -ENOMEM;
621
622 /* If this is a block device and the device is locked currently via the BSD advisory locks,
623 * someone else is using it exclusively. We don't run our udev rules now to not interfere.
624 * Instead of processing the event, we requeue the event and will try again after a delay.
625 *
626 * The user-facing side of this: https://systemd.io/BLOCK_DEVICE_LOCKING */
627 r = worker_lock_whole_disk(dev, &fd_lock);
628 if (r == -EAGAIN)
629 return EVENT_RESULT_TRY_AGAIN;
630 if (r < 0)
631 return r;
632
633 (void) worker_mark_block_device_read_only(dev);
634
635 /* apply rules, create node, symlinks */
636 r = udev_event_execute_rules(
637 udev_event,
638 manager->inotify_fd,
639 arg_event_timeout_usec,
640 arg_timeout_signal,
641 manager->properties,
642 manager->rules);
643 if (r < 0)
644 return r;
645
646 udev_event_execute_run(udev_event, arg_event_timeout_usec, arg_timeout_signal);
647
648 if (!manager->rtnl)
649 /* in case rtnl was initialized */
650 manager->rtnl = sd_netlink_ref(udev_event->rtnl);
651
652 r = udev_event_process_inotify_watch(udev_event, manager->inotify_fd);
653 if (r < 0)
654 return r;
655
656 log_device_uevent(dev, "Device processed");
657 return 0;
658 }
659
660 static int worker_device_monitor_handler(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
661 Manager *manager = userdata;
662 int r;
663
664 assert(dev);
665 assert(manager);
666
667 r = worker_process_device(manager, dev);
668 if (r == EVENT_RESULT_TRY_AGAIN)
669 /* if we couldn't acquire the flock(), then requeue the event */
670 log_device_debug(dev, "Block device is currently locked, requeueing the event.");
671 else {
672 if (r < 0)
673 log_device_warning_errno(dev, r, "Failed to process device, ignoring: %m");
674
675 /* send processed event back to libudev listeners */
676 device_broadcast(monitor, dev, r);
677 }
678
679 /* send udevd the result of the event execution */
680 r = worker_send_result(manager, r);
681 if (r < 0)
682 log_device_warning_errno(dev, r, "Failed to send signal to main daemon, ignoring: %m");
683
684 /* Reset the log level, as it might be changed by "OPTIONS=log_level=". */
685 log_set_max_level(manager->log_level);
686
687 return 1;
688 }
689
690 static int worker_main(Manager *_manager, sd_device_monitor *monitor, sd_device *first_device) {
691 _cleanup_(sd_device_unrefp) sd_device *dev = first_device;
692 _cleanup_(manager_freep) Manager *manager = _manager;
693 int r;
694
695 assert(manager);
696 assert(monitor);
697 assert(dev);
698
699 assert_se(unsetenv("NOTIFY_SOCKET") == 0);
700
701 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, -1) >= 0);
702
703 /* Reset OOM score, we only protect the main daemon. */
704 r = set_oom_score_adjust(0);
705 if (r < 0)
706 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
707
708 /* Clear unnecessary data in Manager object. */
709 manager_clear_for_worker(manager);
710
711 r = sd_event_new(&manager->event);
712 if (r < 0)
713 return log_error_errno(r, "Failed to allocate event loop: %m");
714
715 r = sd_event_add_signal(manager->event, NULL, SIGTERM, NULL, NULL);
716 if (r < 0)
717 return log_error_errno(r, "Failed to set SIGTERM event: %m");
718
719 r = sd_device_monitor_attach_event(monitor, manager->event);
720 if (r < 0)
721 return log_error_errno(r, "Failed to attach event loop to device monitor: %m");
722
723 r = sd_device_monitor_start(monitor, worker_device_monitor_handler, manager);
724 if (r < 0)
725 return log_error_errno(r, "Failed to start device monitor: %m");
726
727 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(monitor), "worker-device-monitor");
728
729 /* Process first device */
730 (void) worker_device_monitor_handler(monitor, dev, manager);
731
732 r = sd_event_loop(manager->event);
733 if (r < 0)
734 return log_error_errno(r, "Event loop failed: %m");
735
736 return 0;
737 }
738
739 static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
740 Event *event = userdata;
741
742 assert(event);
743 assert(event->worker);
744
745 kill_and_sigcont(event->worker->pid, arg_timeout_signal);
746 event->worker->state = WORKER_KILLED;
747
748 log_device_error(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" killed", event->worker->pid, event->seqnum);
749
750 return 1;
751 }
752
753 static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
754 Event *event = userdata;
755
756 assert(event);
757 assert(event->worker);
758
759 log_device_warning(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" is taking a long time", event->worker->pid, event->seqnum);
760
761 return 1;
762 }
763
764 static void worker_attach_event(Worker *worker, Event *event) {
765 sd_event *e;
766
767 assert(worker);
768 assert(worker->manager);
769 assert(event);
770 assert(!event->worker);
771 assert(!worker->event);
772
773 worker->state = WORKER_RUNNING;
774 worker->event = event;
775 event->state = EVENT_RUNNING;
776 event->worker = worker;
777
778 e = worker->manager->event;
779
780 (void) sd_event_add_time_relative(e, &event->timeout_warning_event, CLOCK_MONOTONIC,
781 udev_warn_timeout(arg_event_timeout_usec), USEC_PER_SEC,
782 on_event_timeout_warning, event);
783
784 (void) sd_event_add_time_relative(e, &event->timeout_event, CLOCK_MONOTONIC,
785 arg_event_timeout_usec, USEC_PER_SEC,
786 on_event_timeout, event);
787 }
788
789 static int worker_spawn(Manager *manager, Event *event) {
790 _cleanup_(sd_device_monitor_unrefp) sd_device_monitor *worker_monitor = NULL;
791 Worker *worker;
792 pid_t pid;
793 int r;
794
795 /* listen for new events */
796 r = device_monitor_new_full(&worker_monitor, MONITOR_GROUP_NONE, -1);
797 if (r < 0)
798 return r;
799
800 (void) sd_device_monitor_set_description(worker_monitor, "worker");
801
802 /* allow the main daemon netlink address to send devices to the worker */
803 r = device_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
804 if (r < 0)
805 return log_error_errno(r, "Worker: Failed to set unicast sender: %m");
806
807 r = device_monitor_enable_receiving(worker_monitor);
808 if (r < 0)
809 return log_error_errno(r, "Worker: Failed to enable receiving of device: %m");
810
811 r = safe_fork(NULL, FORK_DEATHSIG, &pid);
812 if (r < 0) {
813 event->state = EVENT_QUEUED;
814 return log_error_errno(r, "Failed to fork() worker: %m");
815 }
816 if (r == 0) {
817 DEVICE_TRACE_POINT(worker_spawned, event->dev, getpid());
818
819 /* Worker process */
820 r = worker_main(manager, worker_monitor, sd_device_ref(event->dev));
821 log_close();
822 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
823 }
824
825 r = worker_new(&worker, manager, worker_monitor, pid);
826 if (r < 0)
827 return log_error_errno(r, "Failed to create worker object: %m");
828
829 worker_attach_event(worker, event);
830
831 log_device_debug(event->dev, "Worker ["PID_FMT"] is forked for processing SEQNUM=%"PRIu64".", pid, event->seqnum);
832 return 0;
833 }
834
835 static int event_run(Event *event) {
836 static bool log_children_max_reached = true;
837 Manager *manager;
838 Worker *worker;
839 int r;
840
841 assert(event);
842 assert(event->manager);
843
844 log_device_uevent(event->dev, "Device ready for processing");
845
846 manager = event->manager;
847 HASHMAP_FOREACH(worker, manager->workers) {
848 if (worker->state != WORKER_IDLE)
849 continue;
850
851 r = device_monitor_send_device(manager->monitor, worker->monitor, event->dev);
852 if (r < 0) {
853 log_device_error_errno(event->dev, r, "Worker ["PID_FMT"] did not accept message, killing the worker: %m",
854 worker->pid);
855 (void) kill(worker->pid, SIGKILL);
856 worker->state = WORKER_KILLED;
857 continue;
858 }
859 worker_attach_event(worker, event);
860 return 1; /* event is now processing. */
861 }
862
863 if (hashmap_size(manager->workers) >= arg_children_max) {
864 /* Avoid spamming the debug logs if the limit is already reached and
865 * many events still need to be processed */
866 if (log_children_max_reached && arg_children_max > 1) {
867 log_debug("Maximum number (%u) of children reached.", hashmap_size(manager->workers));
868 log_children_max_reached = false;
869 }
870 return 0; /* no free worker */
871 }
872
873 /* Re-enable the debug message for the next batch of events */
874 log_children_max_reached = true;
875
876 /* start new worker and pass initial device */
877 r = worker_spawn(manager, event);
878 if (r < 0)
879 return r;
880
881 return 1; /* event is now processing. */
882 }
883
884 static int event_is_blocked(Event *event) {
885 Event *loop_event = NULL;
886 int r;
887
888 /* lookup event for identical, parent, child device */
889
890 assert(event);
891 assert(event->manager);
892 assert(event->blocker_seqnum <= event->seqnum);
893
894 if (event->retry_again_next_usec > 0) {
895 usec_t now_usec;
896
897 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
898 if (r < 0)
899 return r;
900
901 if (event->retry_again_next_usec <= now_usec)
902 return true;
903 }
904
905 if (event->blocker_seqnum == event->seqnum)
906 /* we have checked previously and no blocker found */
907 return false;
908
909 LIST_FOREACH(event, e, event->manager->events) {
910 loop_event = e;
911
912 /* we already found a later event, earlier cannot block us, no need to check again */
913 if (loop_event->seqnum < event->blocker_seqnum)
914 continue;
915
916 /* event we checked earlier still exists, no need to check again */
917 if (loop_event->seqnum == event->blocker_seqnum)
918 return true;
919
920 /* found ourself, no later event can block us */
921 if (loop_event->seqnum >= event->seqnum)
922 goto no_blocker;
923
924 /* found event we have not checked */
925 break;
926 }
927
928 assert(loop_event);
929 assert(loop_event->seqnum > event->blocker_seqnum &&
930 loop_event->seqnum < event->seqnum);
931
932 /* check if queue contains events we depend on */
933 LIST_FOREACH(event, e, loop_event) {
934 loop_event = e;
935
936 /* found ourself, no later event can block us */
937 if (loop_event->seqnum >= event->seqnum)
938 goto no_blocker;
939
940 if (streq_ptr(loop_event->id, event->id))
941 break;
942
943 if (devpath_conflict(event->devpath, loop_event->devpath) ||
944 devpath_conflict(event->devpath, loop_event->devpath_old) ||
945 devpath_conflict(event->devpath_old, loop_event->devpath))
946 break;
947
948 if (event->devnode && streq_ptr(event->devnode, loop_event->devnode))
949 break;
950 }
951
952 assert(loop_event);
953
954 log_device_debug(event->dev, "SEQNUM=%" PRIu64 " blocked by SEQNUM=%" PRIu64,
955 event->seqnum, loop_event->seqnum);
956
957 event->blocker_seqnum = loop_event->seqnum;
958 return true;
959
960 no_blocker:
961 event->blocker_seqnum = event->seqnum;
962 return false;
963 }
964
965 static int event_queue_start(Manager *manager) {
966 int r;
967
968 assert(manager);
969
970 if (!manager->events || manager->exit || manager->stop_exec_queue)
971 return 0;
972
973 r = event_source_disable(manager->kill_workers_event);
974 if (r < 0)
975 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
976
977 manager_reload(manager, /* force = */ false);
978
979 LIST_FOREACH(event, event, manager->events) {
980 if (event->state != EVENT_QUEUED)
981 continue;
982
983 /* do not start event if parent or child event is still running or queued */
984 r = event_is_blocked(event);
985 if (r > 0)
986 continue;
987 if (r < 0)
988 log_device_warning_errno(event->dev, r,
989 "Failed to check dependencies for event (SEQNUM=%"PRIu64", ACTION=%s), "
990 "assuming there is no blocking event, ignoring: %m",
991 event->seqnum,
992 strna(device_action_to_string(event->action)));
993
994 r = event_run(event);
995 if (r <= 0) /* 0 means there are no idle workers. Let's escape from the loop. */
996 return r;
997 }
998
999 return 0;
1000 }
1001
1002 static int event_requeue(Event *event) {
1003 usec_t now_usec;
1004 int r;
1005
1006 assert(event);
1007 assert(event->manager);
1008 assert(event->manager->event);
1009
1010 event->timeout_warning_event = sd_event_source_disable_unref(event->timeout_warning_event);
1011 event->timeout_event = sd_event_source_disable_unref(event->timeout_event);
1012
1013 /* add a short delay to suppress busy loop */
1014 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
1015 if (r < 0)
1016 return log_device_warning_errno(event->dev, r,
1017 "Failed to get current time, "
1018 "skipping event (SEQNUM=%"PRIu64", ACTION=%s): %m",
1019 event->seqnum, strna(device_action_to_string(event->action)));
1020
1021 if (event->retry_again_timeout_usec > 0 && event->retry_again_timeout_usec <= now_usec)
1022 return log_device_warning_errno(event->dev, SYNTHETIC_ERRNO(ETIMEDOUT),
1023 "The underlying block device is locked by a process more than %s, "
1024 "skipping event (SEQNUM=%"PRIu64", ACTION=%s).",
1025 FORMAT_TIMESPAN(EVENT_RETRY_TIMEOUT_USEC, USEC_PER_MINUTE),
1026 event->seqnum, strna(device_action_to_string(event->action)));
1027
1028 event->retry_again_next_usec = usec_add(now_usec, EVENT_RETRY_INTERVAL_USEC);
1029 if (event->retry_again_timeout_usec == 0)
1030 event->retry_again_timeout_usec = usec_add(now_usec, EVENT_RETRY_TIMEOUT_USEC);
1031
1032 if (event->worker && event->worker->event == event)
1033 event->worker->event = NULL;
1034 event->worker = NULL;
1035
1036 event->state = EVENT_QUEUED;
1037 return 0;
1038 }
1039
1040 static int event_queue_assume_block_device_unlocked(Manager *manager, sd_device *dev) {
1041 const char *devname;
1042 int r;
1043
1044 /* When a new event for a block device is queued or we get an inotify event, assume that the
1045 * device is not locked anymore. The assumption may not be true, but that should not cause any
1046 * issues, as in that case events will be requeued soon. */
1047
1048 r = device_get_whole_disk(dev, NULL, &devname);
1049 if (r <= 0)
1050 return r;
1051
1052 LIST_FOREACH(event, event, manager->events) {
1053 const char *event_devname;
1054
1055 if (event->state != EVENT_QUEUED)
1056 continue;
1057
1058 if (event->retry_again_next_usec == 0)
1059 continue;
1060
1061 if (device_get_whole_disk(event->dev, NULL, &event_devname) <= 0)
1062 continue;
1063
1064 if (!streq(devname, event_devname))
1065 continue;
1066
1067 event->retry_again_next_usec = 0;
1068 }
1069
1070 return 0;
1071 }
1072
1073 static int event_queue_insert(Manager *manager, sd_device *dev) {
1074 const char *devpath, *devpath_old = NULL, *id = NULL, *devnode = NULL;
1075 sd_device_action_t action;
1076 uint64_t seqnum;
1077 Event *event;
1078 int r;
1079
1080 assert(manager);
1081 assert(dev);
1082
1083 /* only one process can add events to the queue */
1084 assert(manager->pid == getpid_cached());
1085
1086 /* We only accepts devices received by device monitor. */
1087 r = sd_device_get_seqnum(dev, &seqnum);
1088 if (r < 0)
1089 return r;
1090
1091 r = sd_device_get_action(dev, &action);
1092 if (r < 0)
1093 return r;
1094
1095 r = sd_device_get_devpath(dev, &devpath);
1096 if (r < 0)
1097 return r;
1098
1099 r = sd_device_get_property_value(dev, "DEVPATH_OLD", &devpath_old);
1100 if (r < 0 && r != -ENOENT)
1101 return r;
1102
1103 r = device_get_device_id(dev, &id);
1104 if (r < 0 && r != -ENOENT)
1105 return r;
1106
1107 r = sd_device_get_devname(dev, &devnode);
1108 if (r < 0 && r != -ENOENT)
1109 return r;
1110
1111 event = new(Event, 1);
1112 if (!event)
1113 return -ENOMEM;
1114
1115 *event = (Event) {
1116 .manager = manager,
1117 .dev = sd_device_ref(dev),
1118 .seqnum = seqnum,
1119 .action = action,
1120 .id = id,
1121 .devpath = devpath,
1122 .devpath_old = devpath_old,
1123 .devnode = devnode,
1124 .state = EVENT_QUEUED,
1125 };
1126
1127 if (!manager->events) {
1128 r = touch("/run/udev/queue");
1129 if (r < 0)
1130 log_warning_errno(r, "Failed to touch /run/udev/queue, ignoring: %m");
1131 }
1132
1133 LIST_APPEND(event, manager->events, event);
1134
1135 log_device_uevent(dev, "Device is queued");
1136
1137 return 0;
1138 }
1139
1140 static int on_uevent(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
1141 Manager *manager = userdata;
1142 int r;
1143
1144 assert(manager);
1145
1146 DEVICE_TRACE_POINT(kernel_uevent_received, dev);
1147
1148 device_ensure_usec_initialized(dev, NULL);
1149
1150 r = event_queue_insert(manager, dev);
1151 if (r < 0) {
1152 log_device_error_errno(dev, r, "Failed to insert device into event queue: %m");
1153 return 1;
1154 }
1155
1156 (void) event_queue_assume_block_device_unlocked(manager, dev);
1157
1158 /* we have fresh events, try to schedule them */
1159 event_queue_start(manager);
1160
1161 return 1;
1162 }
1163
1164 static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1165 Manager *manager = userdata;
1166
1167 assert(manager);
1168
1169 for (;;) {
1170 EventResult result;
1171 struct iovec iovec = IOVEC_MAKE(&result, sizeof(result));
1172 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control;
1173 struct msghdr msghdr = {
1174 .msg_iov = &iovec,
1175 .msg_iovlen = 1,
1176 .msg_control = &control,
1177 .msg_controllen = sizeof(control),
1178 };
1179 ssize_t size;
1180 struct ucred *ucred;
1181 Worker *worker;
1182
1183 size = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT);
1184 if (size == -EINTR)
1185 continue;
1186 if (size == -EAGAIN)
1187 /* nothing more to read */
1188 break;
1189 if (size < 0)
1190 return log_error_errno(size, "Failed to receive message: %m");
1191
1192 cmsg_close_all(&msghdr);
1193
1194 if (size != sizeof(result)) {
1195 log_warning("Ignoring worker message with invalid size %zi bytes", size);
1196 continue;
1197 }
1198
1199 ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred);
1200 if (!ucred || ucred->pid <= 0) {
1201 log_warning("Ignoring worker message without valid PID");
1202 continue;
1203 }
1204
1205 /* lookup worker who sent the signal */
1206 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
1207 if (!worker) {
1208 log_debug("Worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
1209 continue;
1210 }
1211
1212 if (worker->state == WORKER_KILLING) {
1213 worker->state = WORKER_KILLED;
1214 (void) kill(worker->pid, SIGTERM);
1215 } else if (worker->state != WORKER_KILLED)
1216 worker->state = WORKER_IDLE;
1217
1218 /* worker returned */
1219 if (result == EVENT_RESULT_TRY_AGAIN &&
1220 event_requeue(worker->event) < 0)
1221 device_broadcast(manager->monitor, worker->event->dev, -ETIMEDOUT);
1222
1223 /* When event_requeue() succeeds, worker->event is NULL, and event_free() handles NULL gracefully. */
1224 event_free(worker->event);
1225 }
1226
1227 /* we have free workers, try to schedule events */
1228 event_queue_start(manager);
1229
1230 return 1;
1231 }
1232
1233 /* receive the udevd message from userspace */
1234 static int on_ctrl_msg(UdevCtrl *uctrl, UdevCtrlMessageType type, const UdevCtrlMessageValue *value, void *userdata) {
1235 Manager *manager = userdata;
1236 int r;
1237
1238 assert(value);
1239 assert(manager);
1240
1241 switch (type) {
1242 case UDEV_CTRL_SET_LOG_LEVEL:
1243 log_debug("Received udev control message (SET_LOG_LEVEL), setting log_level=%i", value->intval);
1244 log_set_max_level(value->intval);
1245 manager->log_level = value->intval;
1246 manager_kill_workers(manager, false);
1247 break;
1248 case UDEV_CTRL_STOP_EXEC_QUEUE:
1249 log_debug("Received udev control message (STOP_EXEC_QUEUE)");
1250 manager->stop_exec_queue = true;
1251 break;
1252 case UDEV_CTRL_START_EXEC_QUEUE:
1253 log_debug("Received udev control message (START_EXEC_QUEUE)");
1254 manager->stop_exec_queue = false;
1255 /* It is not necessary to call event_queue_start() here, as it will be called in on_post() if necessary. */
1256 break;
1257 case UDEV_CTRL_RELOAD:
1258 log_debug("Received udev control message (RELOAD)");
1259 manager_reload(manager, /* force = */ true);
1260 break;
1261 case UDEV_CTRL_SET_ENV: {
1262 _unused_ _cleanup_free_ char *old_val = NULL;
1263 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL;
1264 const char *eq;
1265
1266 eq = strchr(value->buf, '=');
1267 if (!eq) {
1268 log_error("Invalid key format '%s'", value->buf);
1269 return 1;
1270 }
1271
1272 key = strndup(value->buf, eq - value->buf);
1273 if (!key) {
1274 log_oom();
1275 return 1;
1276 }
1277
1278 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1279
1280 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1281 if (r < 0) {
1282 log_oom();
1283 return 1;
1284 }
1285
1286 eq++;
1287 if (isempty(eq)) {
1288 log_debug("Received udev control message (ENV), unsetting '%s'", key);
1289
1290 r = hashmap_put(manager->properties, key, NULL);
1291 if (r < 0) {
1292 log_oom();
1293 return 1;
1294 }
1295 } else {
1296 val = strdup(eq);
1297 if (!val) {
1298 log_oom();
1299 return 1;
1300 }
1301
1302 log_debug("Received udev control message (ENV), setting '%s=%s'", key, val);
1303
1304 r = hashmap_put(manager->properties, key, val);
1305 if (r < 0) {
1306 log_oom();
1307 return 1;
1308 }
1309 }
1310
1311 key = val = NULL;
1312 manager_kill_workers(manager, false);
1313 break;
1314 }
1315 case UDEV_CTRL_SET_CHILDREN_MAX:
1316 if (value->intval <= 0) {
1317 log_debug("Received invalid udev control message (SET_MAX_CHILDREN, %i), ignoring.", value->intval);
1318 return 0;
1319 }
1320
1321 log_debug("Received udev control message (SET_MAX_CHILDREN), setting children_max=%i", value->intval);
1322 arg_children_max = value->intval;
1323
1324 notify_ready();
1325 break;
1326 case UDEV_CTRL_PING:
1327 log_debug("Received udev control message (PING)");
1328 break;
1329 case UDEV_CTRL_EXIT:
1330 log_debug("Received udev control message (EXIT)");
1331 manager_exit(manager);
1332 break;
1333 default:
1334 log_debug("Received unknown udev control message, ignoring");
1335 }
1336
1337 return 1;
1338 }
1339
1340 static int synthesize_change_one(sd_device *dev, sd_device *target) {
1341 int r;
1342
1343 if (DEBUG_LOGGING) {
1344 const char *syspath = NULL;
1345 (void) sd_device_get_syspath(target, &syspath);
1346 log_device_debug(dev, "device is closed, synthesising 'change' on %s", strna(syspath));
1347 }
1348
1349 r = sd_device_trigger(target, SD_DEVICE_CHANGE);
1350 if (r < 0)
1351 return log_device_debug_errno(target, r, "Failed to trigger 'change' uevent: %m");
1352
1353 DEVICE_TRACE_POINT(synthetic_change_event, dev);
1354
1355 return 0;
1356 }
1357
1358 static int synthesize_change(sd_device *dev) {
1359 const char *subsystem, *sysname, *devtype;
1360 int r;
1361
1362 r = sd_device_get_subsystem(dev, &subsystem);
1363 if (r < 0)
1364 return r;
1365
1366 r = sd_device_get_devtype(dev, &devtype);
1367 if (r < 0)
1368 return r;
1369
1370 r = sd_device_get_sysname(dev, &sysname);
1371 if (r < 0)
1372 return r;
1373
1374 if (streq_ptr(subsystem, "block") &&
1375 streq_ptr(devtype, "disk") &&
1376 !startswith(sysname, "dm-")) {
1377 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1378 bool part_table_read = false, has_partitions = false;
1379 sd_device *d;
1380 int fd;
1381
1382 /* Try to re-read the partition table. This only succeeds if none of the devices is
1383 * busy. The kernel returns 0 if no partition table is found, and we will not get an
1384 * event for the disk. */
1385 fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
1386 if (fd >= 0) {
1387 r = flock(fd, LOCK_EX|LOCK_NB);
1388 if (r >= 0)
1389 r = ioctl(fd, BLKRRPART, 0);
1390
1391 close(fd);
1392 if (r >= 0)
1393 part_table_read = true;
1394 }
1395
1396 /* search for partitions */
1397 r = sd_device_enumerator_new(&e);
1398 if (r < 0)
1399 return r;
1400
1401 r = sd_device_enumerator_allow_uninitialized(e);
1402 if (r < 0)
1403 return r;
1404
1405 r = sd_device_enumerator_add_match_parent(e, dev);
1406 if (r < 0)
1407 return r;
1408
1409 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1410 if (r < 0)
1411 return r;
1412
1413 FOREACH_DEVICE(e, d) {
1414 const char *t;
1415
1416 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1417 continue;
1418
1419 has_partitions = true;
1420 break;
1421 }
1422
1423 /* We have partitions and re-read the table, the kernel already sent out a "change"
1424 * event for the disk, and "remove/add" for all partitions. */
1425 if (part_table_read && has_partitions)
1426 return 0;
1427
1428 /* We have partitions but re-reading the partition table did not work, synthesize
1429 * "change" for the disk and all partitions. */
1430 (void) synthesize_change_one(dev, dev);
1431
1432 FOREACH_DEVICE(e, d) {
1433 const char *t;
1434
1435 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1436 continue;
1437
1438 (void) synthesize_change_one(dev, d);
1439 }
1440
1441 } else
1442 (void) synthesize_change_one(dev, dev);
1443
1444 return 0;
1445 }
1446
1447 static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1448 Manager *manager = userdata;
1449 union inotify_event_buffer buffer;
1450 ssize_t l;
1451 int r;
1452
1453 assert(manager);
1454
1455 r = event_source_disable(manager->kill_workers_event);
1456 if (r < 0)
1457 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1458
1459 l = read(fd, &buffer, sizeof(buffer));
1460 if (l < 0) {
1461 if (ERRNO_IS_TRANSIENT(errno))
1462 return 1;
1463
1464 return log_error_errno(errno, "Failed to read inotify fd: %m");
1465 }
1466
1467 FOREACH_INOTIFY_EVENT_WARN(e, buffer, l) {
1468 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1469 const char *devnode;
1470
1471 r = device_new_from_watch_handle(&dev, e->wd);
1472 if (r < 0) {
1473 log_debug_errno(r, "Failed to create sd_device object from watch handle, ignoring: %m");
1474 continue;
1475 }
1476
1477 if (sd_device_get_devname(dev, &devnode) < 0)
1478 continue;
1479
1480 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
1481 if (e->mask & IN_CLOSE_WRITE) {
1482 (void) event_queue_assume_block_device_unlocked(manager, dev);
1483 (void) synthesize_change(dev);
1484 }
1485
1486 /* Do not handle IN_IGNORED here. It should be handled by worker in 'remove' uevent;
1487 * udev_event_execute_rules() -> event_execute_rules_on_remove() -> udev_watch_end(). */
1488 }
1489
1490 return 1;
1491 }
1492
1493 static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1494 Manager *manager = userdata;
1495
1496 assert(manager);
1497
1498 manager_exit(manager);
1499
1500 return 1;
1501 }
1502
1503 static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1504 Manager *manager = userdata;
1505
1506 assert(manager);
1507
1508 manager_reload(manager, /* force = */ true);
1509
1510 return 1;
1511 }
1512
1513 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata) {
1514 Worker *worker = ASSERT_PTR(userdata);
1515 Manager *manager = ASSERT_PTR(worker->manager);
1516 sd_device *dev = worker->event ? ASSERT_PTR(worker->event->dev) : NULL;
1517 EventResult result;
1518 int r;
1519
1520 assert(si);
1521
1522 switch (si->si_code) {
1523 case CLD_EXITED:
1524 if (si->si_status == 0)
1525 log_device_debug(dev, "Worker ["PID_FMT"] exited.", si->si_pid);
1526 else
1527 log_device_warning(dev, "Worker ["PID_FMT"] exited with return code %i.",
1528 si->si_pid, si->si_status);
1529 result = EVENT_RESULT_EXIT_STATUS_BASE + si->si_status;
1530 break;
1531
1532 case CLD_KILLED:
1533 case CLD_DUMPED:
1534 log_device_warning(dev, "Worker ["PID_FMT"] terminated by signal %i (%s).",
1535 si->si_pid, si->si_status, signal_to_string(si->si_status));
1536 result = EVENT_RESULT_SIGNAL_BASE + si->si_status;
1537 break;
1538
1539 default:
1540 assert_not_reached();
1541 }
1542
1543 if (result != EVENT_RESULT_SUCCESS && dev) {
1544 /* delete state from disk */
1545 device_delete_db(dev);
1546 device_tag_index(dev, NULL, false);
1547
1548 /* Forward kernel event to libudev listeners */
1549 device_broadcast(manager->monitor, dev, result);
1550 }
1551
1552 worker_free(worker);
1553
1554 /* we can start new workers, try to schedule events */
1555 event_queue_start(manager);
1556
1557 /* Disable unnecessary cleanup event */
1558 if (hashmap_isempty(manager->workers)) {
1559 r = event_source_disable(manager->kill_workers_event);
1560 if (r < 0)
1561 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1562 }
1563
1564 return 1;
1565 }
1566
1567 static int on_post(sd_event_source *s, void *userdata) {
1568 Manager *manager = userdata;
1569
1570 assert(manager);
1571
1572 if (manager->events) {
1573 /* Try to process pending events if idle workers exist. Why is this necessary?
1574 * When a worker finished an event and became idle, even if there was a pending event,
1575 * the corresponding device might have been locked and the processing of the event
1576 * delayed for a while, preventing the worker from processing the event immediately.
1577 * Now, the device may be unlocked. Let's try again! */
1578 event_queue_start(manager);
1579 return 1;
1580 }
1581
1582 /* There are no queued events. Let's remove /run/udev/queue and clean up the idle processes. */
1583
1584 if (unlink("/run/udev/queue") < 0) {
1585 if (errno != ENOENT)
1586 log_warning_errno(errno, "Failed to unlink /run/udev/queue, ignoring: %m");
1587 } else
1588 log_debug("No events are queued, removing /run/udev/queue.");
1589
1590 if (!hashmap_isempty(manager->workers)) {
1591 /* There are idle workers */
1592 (void) event_reset_time_relative(manager->event, &manager->kill_workers_event,
1593 CLOCK_MONOTONIC, 3 * USEC_PER_SEC, USEC_PER_SEC,
1594 on_kill_workers_event, manager,
1595 0, "kill-workers-event", false);
1596 return 1;
1597 }
1598
1599 /* There are no idle workers. */
1600
1601 if (manager->exit)
1602 return sd_event_exit(manager->event, 0);
1603
1604 if (manager->cgroup)
1605 /* cleanup possible left-over processes in our cgroup */
1606 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1607
1608 return 1;
1609 }
1610
1611 static int listen_fds(int *ret_ctrl, int *ret_netlink) {
1612 int ctrl_fd = -1, netlink_fd = -1;
1613 int fd, n;
1614
1615 assert(ret_ctrl);
1616 assert(ret_netlink);
1617
1618 n = sd_listen_fds(true);
1619 if (n < 0)
1620 return n;
1621
1622 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1623 if (sd_is_socket(fd, AF_UNIX, SOCK_SEQPACKET, -1) > 0) {
1624 if (ctrl_fd >= 0)
1625 return -EINVAL;
1626 ctrl_fd = fd;
1627 continue;
1628 }
1629
1630 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
1631 if (netlink_fd >= 0)
1632 return -EINVAL;
1633 netlink_fd = fd;
1634 continue;
1635 }
1636
1637 return -EINVAL;
1638 }
1639
1640 *ret_ctrl = ctrl_fd;
1641 *ret_netlink = netlink_fd;
1642
1643 return 0;
1644 }
1645
1646 /*
1647 * read the kernel command line, in case we need to get into debug mode
1648 * udev.log_level=<level> syslog priority
1649 * udev.children_max=<number of workers> events are fully serialized if set to 1
1650 * udev.exec_delay=<number of seconds> delay execution of every executed program
1651 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1652 * udev.blockdev_read_only<=bool> mark all block devices read-only when they appear
1653 */
1654 static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
1655 int r;
1656
1657 assert(key);
1658
1659 if (proc_cmdline_key_streq(key, "udev.log_level") ||
1660 proc_cmdline_key_streq(key, "udev.log_priority")) { /* kept for backward compatibility */
1661
1662 if (proc_cmdline_value_missing(key, value))
1663 return 0;
1664
1665 r = log_level_from_string(value);
1666 if (r >= 0)
1667 log_set_max_level(r);
1668
1669 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1670
1671 if (proc_cmdline_value_missing(key, value))
1672 return 0;
1673
1674 r = parse_sec(value, &arg_event_timeout_usec);
1675
1676 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1677
1678 if (proc_cmdline_value_missing(key, value))
1679 return 0;
1680
1681 r = safe_atou(value, &arg_children_max);
1682
1683 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1684
1685 if (proc_cmdline_value_missing(key, value))
1686 return 0;
1687
1688 r = parse_sec(value, &arg_exec_delay_usec);
1689
1690 } else if (proc_cmdline_key_streq(key, "udev.timeout_signal")) {
1691
1692 if (proc_cmdline_value_missing(key, value))
1693 return 0;
1694
1695 r = signal_from_string(value);
1696 if (r > 0)
1697 arg_timeout_signal = r;
1698
1699 } else if (proc_cmdline_key_streq(key, "udev.blockdev_read_only")) {
1700
1701 if (!value)
1702 arg_blockdev_read_only = true;
1703 else {
1704 r = parse_boolean(value);
1705 if (r < 0)
1706 log_warning_errno(r, "Failed to parse udev.blockdev-read-only argument, ignoring: %s", value);
1707 else
1708 arg_blockdev_read_only = r;
1709 }
1710
1711 if (arg_blockdev_read_only)
1712 log_notice("All physical block devices will be marked read-only.");
1713
1714 return 0;
1715
1716 } else {
1717 if (startswith(key, "udev."))
1718 log_warning("Unknown udev kernel command line option \"%s\", ignoring.", key);
1719
1720 return 0;
1721 }
1722
1723 if (r < 0)
1724 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1725
1726 return 0;
1727 }
1728
1729 static int help(void) {
1730 _cleanup_free_ char *link = NULL;
1731 int r;
1732
1733 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1734 if (r < 0)
1735 return log_oom();
1736
1737 printf("%s [OPTIONS...]\n\n"
1738 "Rule-based manager for device events and files.\n\n"
1739 " -h --help Print this message\n"
1740 " -V --version Print version of the program\n"
1741 " -d --daemon Detach and run in the background\n"
1742 " -D --debug Enable debug output\n"
1743 " -c --children-max=INT Set maximum number of workers\n"
1744 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1745 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1746 " -N --resolve-names=early|late|never\n"
1747 " When to resolve users and groups\n"
1748 "\nSee the %s for details.\n",
1749 program_invocation_short_name,
1750 link);
1751
1752 return 0;
1753 }
1754
1755 static int parse_argv(int argc, char *argv[]) {
1756 enum {
1757 ARG_TIMEOUT_SIGNAL,
1758 };
1759
1760 static const struct option options[] = {
1761 { "daemon", no_argument, NULL, 'd' },
1762 { "debug", no_argument, NULL, 'D' },
1763 { "children-max", required_argument, NULL, 'c' },
1764 { "exec-delay", required_argument, NULL, 'e' },
1765 { "event-timeout", required_argument, NULL, 't' },
1766 { "resolve-names", required_argument, NULL, 'N' },
1767 { "help", no_argument, NULL, 'h' },
1768 { "version", no_argument, NULL, 'V' },
1769 { "timeout-signal", required_argument, NULL, ARG_TIMEOUT_SIGNAL },
1770 {}
1771 };
1772
1773 int c, r;
1774
1775 assert(argc >= 0);
1776 assert(argv);
1777
1778 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
1779 switch (c) {
1780
1781 case 'd':
1782 arg_daemonize = true;
1783 break;
1784 case 'c':
1785 r = safe_atou(optarg, &arg_children_max);
1786 if (r < 0)
1787 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
1788 break;
1789 case 'e':
1790 r = parse_sec(optarg, &arg_exec_delay_usec);
1791 if (r < 0)
1792 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
1793 break;
1794 case ARG_TIMEOUT_SIGNAL:
1795 r = signal_from_string(optarg);
1796 if (r <= 0)
1797 log_warning_errno(r, "Failed to parse --timeout-signal= value '%s', ignoring: %m", optarg);
1798 else
1799 arg_timeout_signal = r;
1800
1801 break;
1802 case 't':
1803 r = parse_sec(optarg, &arg_event_timeout_usec);
1804 if (r < 0)
1805 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
1806 break;
1807 case 'D':
1808 arg_debug = true;
1809 break;
1810 case 'N': {
1811 ResolveNameTiming t;
1812
1813 t = resolve_name_timing_from_string(optarg);
1814 if (t < 0)
1815 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1816 else
1817 arg_resolve_name_timing = t;
1818 break;
1819 }
1820 case 'h':
1821 return help();
1822 case 'V':
1823 printf("%s\n", GIT_VERSION);
1824 return 0;
1825 case '?':
1826 return -EINVAL;
1827 default:
1828 assert_not_reached();
1829
1830 }
1831 }
1832
1833 return 1;
1834 }
1835
1836 static int create_subcgroup(char **ret) {
1837 _cleanup_free_ char *cgroup = NULL, *subcgroup = NULL;
1838 int r;
1839
1840 if (getppid() != 1)
1841 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "Not invoked by PID1.");
1842
1843 r = sd_booted();
1844 if (r < 0)
1845 return log_debug_errno(r, "Failed to check if systemd is running: %m");
1846 if (r == 0)
1847 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "systemd is not running.");
1848
1849 /* Get our own cgroup, we regularly kill everything udev has left behind.
1850 * We only do this on systemd systems, and only if we are directly spawned
1851 * by PID1. Otherwise we are not guaranteed to have a dedicated cgroup. */
1852
1853 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
1854 if (r < 0) {
1855 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
1856 return log_debug_errno(r, "Dedicated cgroup not found: %m");
1857 return log_debug_errno(r, "Failed to get cgroup: %m");
1858 }
1859
1860 r = cg_get_xattr_bool(SYSTEMD_CGROUP_CONTROLLER, cgroup, "trusted.delegate");
1861 if (IN_SET(r, 0, -ENODATA))
1862 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "The cgroup %s is not delegated to us.", cgroup);
1863 if (r < 0)
1864 return log_debug_errno(r, "Failed to read trusted.delegate attribute: %m");
1865
1866 /* We are invoked with our own delegated cgroup tree, let's move us one level down, so that we
1867 * don't collide with the "no processes in inner nodes" rule of cgroups, when the service
1868 * manager invokes the ExecReload= job in the .control/ subcgroup. */
1869
1870 subcgroup = path_join(cgroup, "/udev");
1871 if (!subcgroup)
1872 return log_oom_debug();
1873
1874 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, subcgroup, 0);
1875 if (r < 0)
1876 return log_debug_errno(r, "Failed to create %s subcgroup: %m", subcgroup);
1877
1878 log_debug("Created %s subcgroup.", subcgroup);
1879 if (ret)
1880 *ret = TAKE_PTR(subcgroup);
1881 return 0;
1882 }
1883
1884 static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent) {
1885 _cleanup_(manager_freep) Manager *manager = NULL;
1886 _cleanup_free_ char *cgroup = NULL;
1887 int r;
1888
1889 assert(ret);
1890
1891 (void) create_subcgroup(&cgroup);
1892
1893 manager = new(Manager, 1);
1894 if (!manager)
1895 return log_oom();
1896
1897 *manager = (Manager) {
1898 .inotify_fd = -1,
1899 .worker_watch = { -1, -1 },
1900 .cgroup = TAKE_PTR(cgroup),
1901 };
1902
1903 r = udev_ctrl_new_from_fd(&manager->ctrl, fd_ctrl);
1904 if (r < 0)
1905 return log_error_errno(r, "Failed to initialize udev control socket: %m");
1906
1907 r = udev_ctrl_enable_receiving(manager->ctrl);
1908 if (r < 0)
1909 return log_error_errno(r, "Failed to bind udev control socket: %m");
1910
1911 r = device_monitor_new_full(&manager->monitor, MONITOR_GROUP_KERNEL, fd_uevent);
1912 if (r < 0)
1913 return log_error_errno(r, "Failed to initialize device monitor: %m");
1914
1915 /* Bump receiver buffer, but only if we are not called via socket activation, as in that
1916 * case systemd sets the receive buffer size for us, and the value in the .socket unit
1917 * should take full effect. */
1918 if (fd_uevent < 0) {
1919 r = sd_device_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
1920 if (r < 0)
1921 log_warning_errno(r, "Failed to set receive buffer size for device monitor, ignoring: %m");
1922 }
1923
1924 (void) sd_device_monitor_set_description(manager->monitor, "manager");
1925
1926 r = device_monitor_enable_receiving(manager->monitor);
1927 if (r < 0)
1928 return log_error_errno(r, "Failed to bind netlink socket: %m");
1929
1930 manager->log_level = log_get_max_level();
1931
1932 *ret = TAKE_PTR(manager);
1933
1934 return 0;
1935 }
1936
1937 static int main_loop(Manager *manager) {
1938 int fd_worker, r;
1939
1940 manager->pid = getpid_cached();
1941
1942 /* unnamed socket from workers to the main daemon */
1943 r = socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1944 if (r < 0)
1945 return log_error_errno(errno, "Failed to create socketpair for communicating with workers: %m");
1946
1947 fd_worker = manager->worker_watch[READ_END];
1948
1949 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
1950 if (r < 0)
1951 return log_error_errno(r, "Failed to enable SO_PASSCRED: %m");
1952
1953 manager->inotify_fd = inotify_init1(IN_CLOEXEC);
1954 if (manager->inotify_fd < 0)
1955 return log_error_errno(errno, "Failed to create inotify descriptor: %m");
1956
1957 udev_watch_restore(manager->inotify_fd);
1958
1959 /* block and listen to all signals on signalfd */
1960 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
1961
1962 r = sd_event_default(&manager->event);
1963 if (r < 0)
1964 return log_error_errno(r, "Failed to allocate event loop: %m");
1965
1966 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1967 if (r < 0)
1968 return log_error_errno(r, "Failed to create SIGINT event source: %m");
1969
1970 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1971 if (r < 0)
1972 return log_error_errno(r, "Failed to create SIGTERM event source: %m");
1973
1974 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1975 if (r < 0)
1976 return log_error_errno(r, "Failed to create SIGHUP event source: %m");
1977
1978 r = sd_event_set_watchdog(manager->event, true);
1979 if (r < 0)
1980 return log_error_errno(r, "Failed to create watchdog event source: %m");
1981
1982 r = udev_ctrl_attach_event(manager->ctrl, manager->event);
1983 if (r < 0)
1984 return log_error_errno(r, "Failed to attach event to udev control: %m");
1985
1986 r = udev_ctrl_start(manager->ctrl, on_ctrl_msg, manager);
1987 if (r < 0)
1988 return log_error_errno(r, "Failed to start device monitor: %m");
1989
1990 /* This needs to be after the inotify and uevent handling, to make sure
1991 * that the ping is send back after fully processing the pending uevents
1992 * (including the synthetic ones we may create due to inotify events).
1993 */
1994 r = sd_event_source_set_priority(udev_ctrl_get_event_source(manager->ctrl), SD_EVENT_PRIORITY_IDLE);
1995 if (r < 0)
1996 return log_error_errno(r, "Failed to set IDLE event priority for udev control event source: %m");
1997
1998 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->inotify_fd, EPOLLIN, on_inotify, manager);
1999 if (r < 0)
2000 return log_error_errno(r, "Failed to create inotify event source: %m");
2001
2002 r = sd_device_monitor_attach_event(manager->monitor, manager->event);
2003 if (r < 0)
2004 return log_error_errno(r, "Failed to attach event to device monitor: %m");
2005
2006 r = sd_device_monitor_start(manager->monitor, on_uevent, manager);
2007 if (r < 0)
2008 return log_error_errno(r, "Failed to start device monitor: %m");
2009
2010 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(manager->monitor), "device-monitor");
2011
2012 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
2013 if (r < 0)
2014 return log_error_errno(r, "Failed to create worker event source: %m");
2015
2016 r = sd_event_add_post(manager->event, NULL, on_post, manager);
2017 if (r < 0)
2018 return log_error_errno(r, "Failed to create post event source: %m");
2019
2020 manager->last_usec = now(CLOCK_MONOTONIC);
2021
2022 udev_builtin_init();
2023
2024 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
2025 if (r < 0)
2026 return log_error_errno(r, "Failed to read udev rules: %m");
2027
2028 r = udev_rules_apply_static_dev_perms(manager->rules);
2029 if (r < 0)
2030 log_warning_errno(r, "Failed to apply permissions on static device nodes, ignoring: %m");
2031
2032 notify_ready();
2033
2034 r = sd_event_loop(manager->event);
2035 if (r < 0)
2036 log_error_errno(r, "Event loop failed: %m");
2037
2038 sd_notify(false,
2039 "STOPPING=1\n"
2040 "STATUS=Shutting down...");
2041 return r;
2042 }
2043
2044 int run_udevd(int argc, char *argv[]) {
2045 _cleanup_(manager_freep) Manager *manager = NULL;
2046 int fd_ctrl = -1, fd_uevent = -1;
2047 int r;
2048
2049 log_set_target(LOG_TARGET_AUTO);
2050 log_open();
2051 udev_parse_config_full(&arg_children_max, &arg_exec_delay_usec, &arg_event_timeout_usec, &arg_resolve_name_timing, &arg_timeout_signal);
2052 log_parse_environment();
2053 log_open(); /* Done again to update after reading configuration. */
2054
2055 r = parse_argv(argc, argv);
2056 if (r <= 0)
2057 return r;
2058
2059 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
2060 if (r < 0)
2061 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
2062
2063 if (arg_debug) {
2064 log_set_target(LOG_TARGET_CONSOLE);
2065 log_set_max_level(LOG_DEBUG);
2066 }
2067
2068 r = must_be_root();
2069 if (r < 0)
2070 return r;
2071
2072 if (arg_children_max == 0) {
2073 unsigned long cpu_limit, mem_limit, cpu_count = 1;
2074
2075 r = cpus_in_affinity_mask();
2076 if (r < 0)
2077 log_warning_errno(r, "Failed to determine number of local CPUs, ignoring: %m");
2078 else
2079 cpu_count = r;
2080
2081 cpu_limit = cpu_count * 2 + 16;
2082 mem_limit = MAX(physical_memory() / (128UL*1024*1024), 10U);
2083
2084 arg_children_max = MIN(cpu_limit, mem_limit);
2085 arg_children_max = MIN(WORKER_NUM_MAX, arg_children_max);
2086
2087 log_debug("Set children_max to %u", arg_children_max);
2088 }
2089
2090 /* set umask before creating any file/directory */
2091 umask(022);
2092
2093 r = mac_selinux_init();
2094 if (r < 0)
2095 return r;
2096
2097 r = RET_NERRNO(mkdir("/run/udev", 0755));
2098 if (r < 0 && r != -EEXIST)
2099 return log_error_errno(r, "Failed to create /run/udev: %m");
2100
2101 r = listen_fds(&fd_ctrl, &fd_uevent);
2102 if (r < 0)
2103 return log_error_errno(r, "Failed to listen on fds: %m");
2104
2105 r = manager_new(&manager, fd_ctrl, fd_uevent);
2106 if (r < 0)
2107 return log_error_errno(r, "Failed to create manager: %m");
2108
2109 if (arg_daemonize) {
2110 pid_t pid;
2111
2112 log_info("Starting systemd-udevd version " GIT_VERSION);
2113
2114 /* connect /dev/null to stdin, stdout, stderr */
2115 if (log_get_max_level() < LOG_DEBUG) {
2116 r = make_null_stdio();
2117 if (r < 0)
2118 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
2119 }
2120
2121 pid = fork();
2122 if (pid < 0)
2123 return log_error_errno(errno, "Failed to fork daemon: %m");
2124 if (pid > 0)
2125 /* parent */
2126 return 0;
2127
2128 /* child */
2129 (void) setsid();
2130 }
2131
2132 return main_loop(manager);
2133 }