]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/udev/udevd.c
Merge pull request #23875 from yuwata/resolve-mdns-fix-use-after-free
[thirdparty/systemd.git] / src / udev / udevd.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
6 */
7
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <getopt.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <sys/epoll.h>
16 #include <sys/file.h>
17 #include <sys/inotify.h>
18 #include <sys/ioctl.h>
19 #include <sys/mount.h>
20 #include <sys/prctl.h>
21 #include <sys/signalfd.h>
22 #include <sys/stat.h>
23 #include <sys/time.h>
24 #include <sys/wait.h>
25 #include <unistd.h>
26
27 #include "sd-daemon.h"
28 #include "sd-event.h"
29
30 #include "alloc-util.h"
31 #include "cgroup-setup.h"
32 #include "cgroup-util.h"
33 #include "cpu-set-util.h"
34 #include "dev-setup.h"
35 #include "device-monitor-private.h"
36 #include "device-private.h"
37 #include "device-util.h"
38 #include "errno-list.h"
39 #include "event-util.h"
40 #include "fd-util.h"
41 #include "fileio.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "hashmap.h"
45 #include "inotify-util.h"
46 #include "io-util.h"
47 #include "limits-util.h"
48 #include "list.h"
49 #include "main-func.h"
50 #include "mkdir.h"
51 #include "netlink-util.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "pretty-print.h"
55 #include "proc-cmdline.h"
56 #include "process-util.h"
57 #include "selinux-util.h"
58 #include "signal-util.h"
59 #include "socket-util.h"
60 #include "string-util.h"
61 #include "strv.h"
62 #include "strxcpyx.h"
63 #include "syslog-util.h"
64 #include "udevd.h"
65 #include "udev-builtin.h"
66 #include "udev-ctrl.h"
67 #include "udev-event.h"
68 #include "udev-util.h"
69 #include "udev-watch.h"
70 #include "user-util.h"
71 #include "version.h"
72
73 #define WORKER_NUM_MAX 2048U
74 #define EVENT_RETRY_INTERVAL_USEC (200 * USEC_PER_MSEC)
75 #define EVENT_RETRY_TIMEOUT_USEC (3 * USEC_PER_MINUTE)
76
77 static bool arg_debug = false;
78 static int arg_daemonize = false;
79 static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
80 static unsigned arg_children_max = 0;
81 static usec_t arg_exec_delay_usec = 0;
82 static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
83 static int arg_timeout_signal = SIGKILL;
84 static bool arg_blockdev_read_only = false;
85
86 typedef struct Event Event;
87 typedef struct Worker Worker;
88
89 typedef struct Manager {
90 sd_event *event;
91 Hashmap *workers;
92 LIST_HEAD(Event, events);
93 char *cgroup;
94 pid_t pid; /* the process that originally allocated the manager object */
95 int log_level;
96
97 UdevRules *rules;
98 Hashmap *properties;
99
100 sd_netlink *rtnl;
101
102 sd_device_monitor *monitor;
103 UdevCtrl *ctrl;
104 int worker_watch[2];
105
106 /* used by udev-watch */
107 int inotify_fd;
108 sd_event_source *inotify_event;
109
110 sd_event_source *kill_workers_event;
111
112 usec_t last_usec;
113
114 bool stop_exec_queue;
115 bool exit;
116 } Manager;
117
118 typedef enum EventState {
119 EVENT_UNDEF,
120 EVENT_QUEUED,
121 EVENT_RUNNING,
122 } EventState;
123
124 typedef struct Event {
125 Manager *manager;
126 Worker *worker;
127 EventState state;
128
129 sd_device *dev;
130
131 sd_device_action_t action;
132 uint64_t seqnum;
133 uint64_t blocker_seqnum;
134 const char *id;
135 const char *devpath;
136 const char *devpath_old;
137 const char *devnode;
138 usec_t retry_again_next_usec;
139 usec_t retry_again_timeout_usec;
140
141 sd_event_source *timeout_warning_event;
142 sd_event_source *timeout_event;
143
144 LIST_FIELDS(Event, event);
145 } Event;
146
147 typedef enum WorkerState {
148 WORKER_UNDEF,
149 WORKER_RUNNING,
150 WORKER_IDLE,
151 WORKER_KILLED,
152 WORKER_KILLING,
153 } WorkerState;
154
155 typedef struct Worker {
156 Manager *manager;
157 pid_t pid;
158 sd_event_source *child_event_source;
159 sd_device_monitor *monitor;
160 WorkerState state;
161 Event *event;
162 } Worker;
163
164 /* passed from worker to main process */
165 typedef enum EventResult {
166 EVENT_RESULT_NERRNO_MIN = -ERRNO_MAX,
167 EVENT_RESULT_NERRNO_MAX = -1,
168 EVENT_RESULT_SUCCESS = 0,
169 EVENT_RESULT_EXIT_STATUS_BASE = 0,
170 EVENT_RESULT_EXIT_STATUS_MAX = 255,
171 EVENT_RESULT_TRY_AGAIN = 256, /* when the block device is locked by another process. */
172 EVENT_RESULT_SIGNAL_BASE = 257,
173 EVENT_RESULT_SIGNAL_MAX = EVENT_RESULT_SIGNAL_BASE + _NSIG,
174 _EVENT_RESULT_MAX,
175 _EVENT_RESULT_INVALID = -EINVAL,
176 } EventResult;
177
178 static Event *event_free(Event *event) {
179 if (!event)
180 return NULL;
181
182 assert(event->manager);
183
184 LIST_REMOVE(event, event->manager->events, event);
185 sd_device_unref(event->dev);
186
187 /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
188 * main process. */
189 sd_event_source_unref(event->timeout_warning_event);
190 sd_event_source_unref(event->timeout_event);
191
192 if (event->worker)
193 event->worker->event = NULL;
194
195 return mfree(event);
196 }
197
198 static void event_queue_cleanup(Manager *manager, EventState match_state) {
199 LIST_FOREACH(event, event, manager->events) {
200 if (match_state != EVENT_UNDEF && match_state != event->state)
201 continue;
202
203 event_free(event);
204 }
205 }
206
207 static Worker *worker_free(Worker *worker) {
208 if (!worker)
209 return NULL;
210
211 if (worker->manager)
212 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
213
214 sd_event_source_unref(worker->child_event_source);
215 sd_device_monitor_unref(worker->monitor);
216 event_free(worker->event);
217
218 return mfree(worker);
219 }
220
221 DEFINE_TRIVIAL_CLEANUP_FUNC(Worker*, worker_free);
222 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(worker_hash_op, void, trivial_hash_func, trivial_compare_func, Worker, worker_free);
223
224 static void manager_clear_for_worker(Manager *manager) {
225 assert(manager);
226
227 /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
228 * main process. */
229 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
230 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
231
232 manager->event = sd_event_unref(manager->event);
233
234 manager->workers = hashmap_free(manager->workers);
235 event_queue_cleanup(manager, EVENT_UNDEF);
236
237 manager->monitor = sd_device_monitor_unref(manager->monitor);
238 manager->ctrl = udev_ctrl_unref(manager->ctrl);
239
240 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
241 }
242
243 static Manager* manager_free(Manager *manager) {
244 if (!manager)
245 return NULL;
246
247 udev_builtin_exit();
248
249 manager_clear_for_worker(manager);
250
251 sd_netlink_unref(manager->rtnl);
252
253 hashmap_free_free_free(manager->properties);
254 udev_rules_free(manager->rules);
255
256 safe_close(manager->inotify_fd);
257 safe_close_pair(manager->worker_watch);
258
259 free(manager->cgroup);
260 return mfree(manager);
261 }
262
263 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
264
265 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata);
266
267 static int worker_new(Worker **ret, Manager *manager, sd_device_monitor *worker_monitor, pid_t pid) {
268 _cleanup_(worker_freep) Worker *worker = NULL;
269 int r;
270
271 assert(ret);
272 assert(manager);
273 assert(worker_monitor);
274 assert(pid > 1);
275
276 /* close monitor, but keep address around */
277 device_monitor_disconnect(worker_monitor);
278
279 worker = new(Worker, 1);
280 if (!worker)
281 return -ENOMEM;
282
283 *worker = (Worker) {
284 .monitor = sd_device_monitor_ref(worker_monitor),
285 .pid = pid,
286 };
287
288 r = sd_event_add_child(manager->event, &worker->child_event_source, pid, WEXITED, on_sigchld, worker);
289 if (r < 0)
290 return r;
291
292 r = hashmap_ensure_put(&manager->workers, &worker_hash_op, PID_TO_PTR(pid), worker);
293 if (r < 0)
294 return r;
295
296 worker->manager = manager;
297
298 *ret = TAKE_PTR(worker);
299 return 0;
300 }
301
302 static void manager_kill_workers(Manager *manager, bool force) {
303 Worker *worker;
304
305 assert(manager);
306
307 HASHMAP_FOREACH(worker, manager->workers) {
308 if (worker->state == WORKER_KILLED)
309 continue;
310
311 if (worker->state == WORKER_RUNNING && !force) {
312 worker->state = WORKER_KILLING;
313 continue;
314 }
315
316 worker->state = WORKER_KILLED;
317 (void) kill(worker->pid, SIGTERM);
318 }
319 }
320
321 static void manager_exit(Manager *manager) {
322 assert(manager);
323
324 manager->exit = true;
325
326 sd_notify(false,
327 "STOPPING=1\n"
328 "STATUS=Starting shutdown...");
329
330 /* close sources of new events and discard buffered events */
331 manager->ctrl = udev_ctrl_unref(manager->ctrl);
332
333 manager->inotify_event = sd_event_source_disable_unref(manager->inotify_event);
334 manager->inotify_fd = safe_close(manager->inotify_fd);
335
336 manager->monitor = sd_device_monitor_unref(manager->monitor);
337
338 /* discard queued events and kill workers */
339 event_queue_cleanup(manager, EVENT_QUEUED);
340 manager_kill_workers(manager, true);
341 }
342
343 static void notify_ready(void) {
344 int r;
345
346 r = sd_notifyf(false,
347 "READY=1\n"
348 "STATUS=Processing with %u children at max", arg_children_max);
349 if (r < 0)
350 log_warning_errno(r, "Failed to send readiness notification, ignoring: %m");
351 }
352
353 /* reload requested, HUP signal received, rules changed, builtin changed */
354 static void manager_reload(Manager *manager, bool force) {
355 _cleanup_(udev_rules_freep) UdevRules *rules = NULL;
356 usec_t now_usec;
357 int r;
358
359 assert(manager);
360
361 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &now_usec) >= 0);
362 if (!force && now_usec < usec_add(manager->last_usec, 3 * USEC_PER_SEC))
363 /* check for changed config, every 3 seconds at most */
364 return;
365 manager->last_usec = now_usec;
366
367 /* Reload SELinux label database, to make the child inherit the up-to-date database. */
368 mac_selinux_maybe_reload();
369
370 /* Nothing changed. It is not necessary to reload. */
371 if (!udev_rules_should_reload(manager->rules) && !udev_builtin_validate())
372 return;
373
374 sd_notify(false,
375 "RELOADING=1\n"
376 "STATUS=Flushing configuration...");
377
378 manager_kill_workers(manager, false);
379
380 udev_builtin_exit();
381 udev_builtin_init();
382
383 r = udev_rules_load(&rules, arg_resolve_name_timing);
384 if (r < 0)
385 log_warning_errno(r, "Failed to read udev rules, using the previously loaded rules, ignoring: %m");
386 else
387 udev_rules_free_and_replace(manager->rules, rules);
388
389 notify_ready();
390 }
391
392 static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
393 Manager *manager = userdata;
394
395 assert(manager);
396
397 log_debug("Cleanup idle workers");
398 manager_kill_workers(manager, false);
399
400 return 1;
401 }
402
403 static void device_broadcast(sd_device_monitor *monitor, sd_device *dev, EventResult result) {
404 int r;
405
406 assert(dev);
407
408 /* On exit, manager->monitor is already NULL. */
409 if (!monitor)
410 return;
411
412 if (result != EVENT_RESULT_SUCCESS) {
413 (void) device_add_property(dev, "UDEV_WORKER_FAILED", "1");
414
415 switch (result) {
416 case EVENT_RESULT_NERRNO_MIN ... EVENT_RESULT_NERRNO_MAX: {
417 const char *str;
418
419 (void) device_add_propertyf(dev, "UDEV_WORKER_ERRNO", "%i", -result);
420
421 str = errno_to_name(result);
422 if (str)
423 (void) device_add_property(dev, "UDEV_WORKER_ERRNO_NAME", str);
424 break;
425 }
426 case EVENT_RESULT_EXIT_STATUS_BASE ... EVENT_RESULT_EXIT_STATUS_MAX:
427 (void) device_add_propertyf(dev, "UDEV_WORKER_EXIT_STATUS", "%i", result - EVENT_RESULT_EXIT_STATUS_BASE);
428 break;
429
430 case EVENT_RESULT_TRY_AGAIN:
431 assert_not_reached();
432 break;
433
434 case EVENT_RESULT_SIGNAL_BASE ... EVENT_RESULT_SIGNAL_MAX: {
435 const char *str;
436
437 (void) device_add_propertyf(dev, "UDEV_WORKER_SIGNAL", "%i", result - EVENT_RESULT_SIGNAL_BASE);
438
439 str = signal_to_string(result - EVENT_RESULT_SIGNAL_BASE);
440 if (str)
441 (void) device_add_property(dev, "UDEV_WORKER_SIGNAL_NAME", str);
442 break;
443 }
444 default:
445 log_device_warning(dev, "Unknown event result \"%i\", ignoring.", result);
446 }
447 }
448
449 r = device_monitor_send_device(monitor, NULL, dev);
450 if (r < 0)
451 log_device_warning_errno(dev, r,
452 "Failed to broadcast event to libudev listeners, ignoring: %m");
453 }
454
455 static int worker_send_result(Manager *manager, EventResult result) {
456 assert(manager);
457 assert(manager->worker_watch[WRITE_END] >= 0);
458
459 return loop_write(manager->worker_watch[WRITE_END], &result, sizeof(result), false);
460 }
461
462 static int device_get_whole_disk(sd_device *dev, sd_device **ret_device, const char **ret_devname) {
463 const char *val;
464 int r;
465
466 assert(dev);
467
468 if (device_for_action(dev, SD_DEVICE_REMOVE))
469 goto irrelevant;
470
471 r = sd_device_get_subsystem(dev, &val);
472 if (r < 0)
473 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
474
475 if (!streq(val, "block"))
476 goto irrelevant;
477
478 r = sd_device_get_sysname(dev, &val);
479 if (r < 0)
480 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
481
482 /* Exclude the following devices:
483 * For "dm-", see the comment added by e918a1b5a94f270186dca59156354acd2a596494.
484 * For "md", see the commit message of 2e5b17d01347d3c3118be2b8ad63d20415dbb1f0,
485 * but not sure the assumption is still valid even when partitions are created on the md
486 * devices, surprisingly which seems to be possible, see PR #22973.
487 * For "drbd", see the commit message of fee854ee8ccde0cd28e0f925dea18cce35f3993d. */
488 if (STARTSWITH_SET(val, "dm-", "md", "drbd"))
489 goto irrelevant;
490
491 r = sd_device_get_devtype(dev, &val);
492 if (r < 0 && r != -ENOENT)
493 return log_device_debug_errno(dev, r, "Failed to get devtype: %m");
494 if (r >= 0 && streq(val, "partition")) {
495 r = sd_device_get_parent(dev, &dev);
496 if (r == -ENOENT) /* The device may be already removed. */
497 goto irrelevant;
498 if (r < 0)
499 return log_device_debug_errno(dev, r, "Failed to get parent device: %m");
500 }
501
502 r = sd_device_get_devname(dev, &val);
503 if (r == -ENOENT)
504 goto irrelevant;
505 if (r < 0)
506 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
507
508 if (ret_device)
509 *ret_device = dev;
510 if (ret_devname)
511 *ret_devname = val;
512 return 1;
513
514 irrelevant:
515 if (ret_device)
516 *ret_device = NULL;
517 if (ret_devname)
518 *ret_devname = NULL;
519 return 0;
520 }
521
522 static int worker_lock_whole_disk(sd_device *dev, int *ret_fd) {
523 _cleanup_close_ int fd = -1;
524 sd_device *dev_whole_disk;
525 const char *val;
526 int r;
527
528 assert(dev);
529 assert(ret_fd);
530
531 /* Take a shared lock on the device node; this establishes a concept of device "ownership" to
532 * serialize device access. External processes holding an exclusive lock will cause udev to skip the
533 * event handling; in the case udev acquired the lock, the external process can block until udev has
534 * finished its event handling. */
535
536 r = device_get_whole_disk(dev, &dev_whole_disk, &val);
537 if (r < 0)
538 return r;
539 if (r == 0)
540 goto nolock;
541
542 fd = sd_device_open(dev_whole_disk, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
543 if (fd < 0) {
544 bool ignore = ERRNO_IS_DEVICE_ABSENT(fd);
545
546 log_device_debug_errno(dev, fd, "Failed to open '%s'%s: %m", val, ignore ? ", ignoring" : "");
547 if (!ignore)
548 return fd;
549
550 goto nolock;
551 }
552
553 if (flock(fd, LOCK_SH|LOCK_NB) < 0)
554 return log_device_debug_errno(dev, errno, "Failed to flock(%s): %m", val);
555
556 *ret_fd = TAKE_FD(fd);
557 return 1;
558
559 nolock:
560 *ret_fd = -1;
561 return 0;
562 }
563
564 static int worker_mark_block_device_read_only(sd_device *dev) {
565 _cleanup_close_ int fd = -1;
566 const char *val;
567 int state = 1, r;
568
569 assert(dev);
570
571 if (!arg_blockdev_read_only)
572 return 0;
573
574 /* Do this only once, when the block device is new. If the device is later retriggered let's not
575 * toggle the bit again, so that people can boot up with full read-only mode and then unset the bit
576 * for specific devices only. */
577 if (!device_for_action(dev, SD_DEVICE_ADD))
578 return 0;
579
580 r = sd_device_get_subsystem(dev, &val);
581 if (r < 0)
582 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
583
584 if (!streq(val, "block"))
585 return 0;
586
587 r = sd_device_get_sysname(dev, &val);
588 if (r < 0)
589 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
590
591 /* Exclude synthetic devices for now, this is supposed to be a safety feature to avoid modification
592 * of physical devices, and what sits on top of those doesn't really matter if we don't allow the
593 * underlying block devices to receive changes. */
594 if (STARTSWITH_SET(val, "dm-", "md", "drbd", "loop", "nbd", "zram"))
595 return 0;
596
597 fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
598 if (fd < 0)
599 return log_device_debug_errno(dev, fd, "Failed to open '%s', ignoring: %m", val);
600
601 if (ioctl(fd, BLKROSET, &state) < 0)
602 return log_device_warning_errno(dev, errno, "Failed to mark block device '%s' read-only: %m", val);
603
604 log_device_info(dev, "Successfully marked block device '%s' read-only.", val);
605 return 0;
606 }
607
608 static int worker_process_device(Manager *manager, sd_device *dev) {
609 _cleanup_(udev_event_freep) UdevEvent *udev_event = NULL;
610 _cleanup_close_ int fd_lock = -1;
611 int r;
612
613 assert(manager);
614 assert(dev);
615
616 log_device_uevent(dev, "Processing device");
617
618 udev_event = udev_event_new(dev, arg_exec_delay_usec, manager->rtnl, manager->log_level);
619 if (!udev_event)
620 return -ENOMEM;
621
622 /* If this is a block device and the device is locked currently via the BSD advisory locks,
623 * someone else is using it exclusively. We don't run our udev rules now to not interfere.
624 * Instead of processing the event, we requeue the event and will try again after a delay.
625 *
626 * The user-facing side of this: https://systemd.io/BLOCK_DEVICE_LOCKING */
627 r = worker_lock_whole_disk(dev, &fd_lock);
628 if (r == -EAGAIN)
629 return EVENT_RESULT_TRY_AGAIN;
630 if (r < 0)
631 return r;
632
633 (void) worker_mark_block_device_read_only(dev);
634
635 /* apply rules, create node, symlinks */
636 r = udev_event_execute_rules(
637 udev_event,
638 manager->inotify_fd,
639 arg_event_timeout_usec,
640 arg_timeout_signal,
641 manager->properties,
642 manager->rules);
643 if (r < 0)
644 return r;
645
646 udev_event_execute_run(udev_event, arg_event_timeout_usec, arg_timeout_signal);
647
648 if (!manager->rtnl)
649 /* in case rtnl was initialized */
650 manager->rtnl = sd_netlink_ref(udev_event->rtnl);
651
652 r = udev_event_process_inotify_watch(udev_event, manager->inotify_fd);
653 if (r < 0)
654 return r;
655
656 log_device_uevent(dev, "Device processed");
657 return 0;
658 }
659
660 static int worker_device_monitor_handler(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
661 Manager *manager = userdata;
662 int r;
663
664 assert(dev);
665 assert(manager);
666
667 r = worker_process_device(manager, dev);
668 if (r == EVENT_RESULT_TRY_AGAIN)
669 /* if we couldn't acquire the flock(), then requeue the event */
670 log_device_debug(dev, "Block device is currently locked, requeueing the event.");
671 else {
672 if (r < 0)
673 log_device_warning_errno(dev, r, "Failed to process device, ignoring: %m");
674
675 /* send processed event back to libudev listeners */
676 device_broadcast(monitor, dev, r);
677 }
678
679 /* send udevd the result of the event execution */
680 r = worker_send_result(manager, r);
681 if (r < 0)
682 log_device_warning_errno(dev, r, "Failed to send signal to main daemon, ignoring: %m");
683
684 /* Reset the log level, as it might be changed by "OPTIONS=log_level=". */
685 log_set_max_level(manager->log_level);
686
687 return 1;
688 }
689
690 static int worker_main(Manager *_manager, sd_device_monitor *monitor, sd_device *first_device) {
691 _cleanup_(sd_device_unrefp) sd_device *dev = first_device;
692 _cleanup_(manager_freep) Manager *manager = _manager;
693 int r;
694
695 assert(manager);
696 assert(monitor);
697 assert(dev);
698
699 assert_se(unsetenv("NOTIFY_SOCKET") == 0);
700
701 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, -1) >= 0);
702
703 /* Reset OOM score, we only protect the main daemon. */
704 r = set_oom_score_adjust(0);
705 if (r < 0)
706 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
707
708 /* Clear unnecessary data in Manager object. */
709 manager_clear_for_worker(manager);
710
711 r = sd_event_new(&manager->event);
712 if (r < 0)
713 return log_error_errno(r, "Failed to allocate event loop: %m");
714
715 r = sd_event_add_signal(manager->event, NULL, SIGTERM, NULL, NULL);
716 if (r < 0)
717 return log_error_errno(r, "Failed to set SIGTERM event: %m");
718
719 r = sd_device_monitor_attach_event(monitor, manager->event);
720 if (r < 0)
721 return log_error_errno(r, "Failed to attach event loop to device monitor: %m");
722
723 r = sd_device_monitor_start(monitor, worker_device_monitor_handler, manager);
724 if (r < 0)
725 return log_error_errno(r, "Failed to start device monitor: %m");
726
727 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(monitor), "worker-device-monitor");
728
729 /* Process first device */
730 (void) worker_device_monitor_handler(monitor, dev, manager);
731
732 r = sd_event_loop(manager->event);
733 if (r < 0)
734 return log_error_errno(r, "Event loop failed: %m");
735
736 return 0;
737 }
738
739 static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
740 Event *event = userdata;
741
742 assert(event);
743 assert(event->worker);
744
745 kill_and_sigcont(event->worker->pid, arg_timeout_signal);
746 event->worker->state = WORKER_KILLED;
747
748 log_device_error(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" killed", event->worker->pid, event->seqnum);
749
750 return 1;
751 }
752
753 static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
754 Event *event = userdata;
755
756 assert(event);
757 assert(event->worker);
758
759 log_device_warning(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" is taking a long time", event->worker->pid, event->seqnum);
760
761 return 1;
762 }
763
764 static void worker_attach_event(Worker *worker, Event *event) {
765 sd_event *e;
766
767 assert(worker);
768 assert(worker->manager);
769 assert(event);
770 assert(!event->worker);
771 assert(!worker->event);
772
773 worker->state = WORKER_RUNNING;
774 worker->event = event;
775 event->state = EVENT_RUNNING;
776 event->worker = worker;
777
778 e = worker->manager->event;
779
780 (void) sd_event_add_time_relative(e, &event->timeout_warning_event, CLOCK_MONOTONIC,
781 udev_warn_timeout(arg_event_timeout_usec), USEC_PER_SEC,
782 on_event_timeout_warning, event);
783
784 (void) sd_event_add_time_relative(e, &event->timeout_event, CLOCK_MONOTONIC,
785 arg_event_timeout_usec, USEC_PER_SEC,
786 on_event_timeout, event);
787 }
788
789 static int worker_spawn(Manager *manager, Event *event) {
790 _cleanup_(sd_device_monitor_unrefp) sd_device_monitor *worker_monitor = NULL;
791 Worker *worker;
792 pid_t pid;
793 int r;
794
795 /* listen for new events */
796 r = device_monitor_new_full(&worker_monitor, MONITOR_GROUP_NONE, -1);
797 if (r < 0)
798 return r;
799
800 /* allow the main daemon netlink address to send devices to the worker */
801 r = device_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
802 if (r < 0)
803 return log_error_errno(r, "Worker: Failed to set unicast sender: %m");
804
805 r = device_monitor_enable_receiving(worker_monitor);
806 if (r < 0)
807 return log_error_errno(r, "Worker: Failed to enable receiving of device: %m");
808
809 r = safe_fork(NULL, FORK_DEATHSIG, &pid);
810 if (r < 0) {
811 event->state = EVENT_QUEUED;
812 return log_error_errno(r, "Failed to fork() worker: %m");
813 }
814 if (r == 0) {
815 DEVICE_TRACE_POINT(worker_spawned, event->dev, getpid());
816
817 /* Worker process */
818 r = worker_main(manager, worker_monitor, sd_device_ref(event->dev));
819 log_close();
820 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
821 }
822
823 r = worker_new(&worker, manager, worker_monitor, pid);
824 if (r < 0)
825 return log_error_errno(r, "Failed to create worker object: %m");
826
827 worker_attach_event(worker, event);
828
829 log_device_debug(event->dev, "Worker ["PID_FMT"] is forked for processing SEQNUM=%"PRIu64".", pid, event->seqnum);
830 return 0;
831 }
832
833 static int event_run(Event *event) {
834 static bool log_children_max_reached = true;
835 Manager *manager;
836 Worker *worker;
837 int r;
838
839 assert(event);
840 assert(event->manager);
841
842 log_device_uevent(event->dev, "Device ready for processing");
843
844 manager = event->manager;
845 HASHMAP_FOREACH(worker, manager->workers) {
846 if (worker->state != WORKER_IDLE)
847 continue;
848
849 r = device_monitor_send_device(manager->monitor, worker->monitor, event->dev);
850 if (r < 0) {
851 log_device_error_errno(event->dev, r, "Worker ["PID_FMT"] did not accept message, killing the worker: %m",
852 worker->pid);
853 (void) kill(worker->pid, SIGKILL);
854 worker->state = WORKER_KILLED;
855 continue;
856 }
857 worker_attach_event(worker, event);
858 return 1; /* event is now processing. */
859 }
860
861 if (hashmap_size(manager->workers) >= arg_children_max) {
862 /* Avoid spamming the debug logs if the limit is already reached and
863 * many events still need to be processed */
864 if (log_children_max_reached && arg_children_max > 1) {
865 log_debug("Maximum number (%u) of children reached.", hashmap_size(manager->workers));
866 log_children_max_reached = false;
867 }
868 return 0; /* no free worker */
869 }
870
871 /* Re-enable the debug message for the next batch of events */
872 log_children_max_reached = true;
873
874 /* start new worker and pass initial device */
875 r = worker_spawn(manager, event);
876 if (r < 0)
877 return r;
878
879 return 1; /* event is now processing. */
880 }
881
882 static int event_is_blocked(Event *event) {
883 Event *loop_event = NULL;
884 int r;
885
886 /* lookup event for identical, parent, child device */
887
888 assert(event);
889 assert(event->manager);
890 assert(event->blocker_seqnum <= event->seqnum);
891
892 if (event->retry_again_next_usec > 0) {
893 usec_t now_usec;
894
895 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
896 if (r < 0)
897 return r;
898
899 if (event->retry_again_next_usec <= now_usec)
900 return true;
901 }
902
903 if (event->blocker_seqnum == event->seqnum)
904 /* we have checked previously and no blocker found */
905 return false;
906
907 LIST_FOREACH(event, e, event->manager->events) {
908 loop_event = e;
909
910 /* we already found a later event, earlier cannot block us, no need to check again */
911 if (loop_event->seqnum < event->blocker_seqnum)
912 continue;
913
914 /* event we checked earlier still exists, no need to check again */
915 if (loop_event->seqnum == event->blocker_seqnum)
916 return true;
917
918 /* found ourself, no later event can block us */
919 if (loop_event->seqnum >= event->seqnum)
920 goto no_blocker;
921
922 /* found event we have not checked */
923 break;
924 }
925
926 assert(loop_event);
927 assert(loop_event->seqnum > event->blocker_seqnum &&
928 loop_event->seqnum < event->seqnum);
929
930 /* check if queue contains events we depend on */
931 LIST_FOREACH(event, e, loop_event) {
932 loop_event = e;
933
934 /* found ourself, no later event can block us */
935 if (loop_event->seqnum >= event->seqnum)
936 goto no_blocker;
937
938 if (streq_ptr(loop_event->id, event->id))
939 break;
940
941 if (devpath_conflict(event->devpath, loop_event->devpath) ||
942 devpath_conflict(event->devpath, loop_event->devpath_old) ||
943 devpath_conflict(event->devpath_old, loop_event->devpath))
944 break;
945
946 if (event->devnode && streq_ptr(event->devnode, loop_event->devnode))
947 break;
948 }
949
950 assert(loop_event);
951
952 log_device_debug(event->dev, "SEQNUM=%" PRIu64 " blocked by SEQNUM=%" PRIu64,
953 event->seqnum, loop_event->seqnum);
954
955 event->blocker_seqnum = loop_event->seqnum;
956 return true;
957
958 no_blocker:
959 event->blocker_seqnum = event->seqnum;
960 return false;
961 }
962
963 static int event_queue_start(Manager *manager) {
964 int r;
965
966 assert(manager);
967
968 if (!manager->events || manager->exit || manager->stop_exec_queue)
969 return 0;
970
971 r = event_source_disable(manager->kill_workers_event);
972 if (r < 0)
973 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
974
975 manager_reload(manager, /* force = */ false);
976
977 LIST_FOREACH(event, event, manager->events) {
978 if (event->state != EVENT_QUEUED)
979 continue;
980
981 /* do not start event if parent or child event is still running or queued */
982 r = event_is_blocked(event);
983 if (r > 0)
984 continue;
985 if (r < 0)
986 log_device_warning_errno(event->dev, r,
987 "Failed to check dependencies for event (SEQNUM=%"PRIu64", ACTION=%s), "
988 "assuming there is no blocking event, ignoring: %m",
989 event->seqnum,
990 strna(device_action_to_string(event->action)));
991
992 r = event_run(event);
993 if (r <= 0) /* 0 means there are no idle workers. Let's escape from the loop. */
994 return r;
995 }
996
997 return 0;
998 }
999
1000 static int event_requeue(Event *event) {
1001 usec_t now_usec;
1002 int r;
1003
1004 assert(event);
1005 assert(event->manager);
1006 assert(event->manager->event);
1007
1008 event->timeout_warning_event = sd_event_source_disable_unref(event->timeout_warning_event);
1009 event->timeout_event = sd_event_source_disable_unref(event->timeout_event);
1010
1011 /* add a short delay to suppress busy loop */
1012 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
1013 if (r < 0)
1014 return log_device_warning_errno(event->dev, r,
1015 "Failed to get current time, "
1016 "skipping event (SEQNUM=%"PRIu64", ACTION=%s): %m",
1017 event->seqnum, strna(device_action_to_string(event->action)));
1018
1019 if (event->retry_again_timeout_usec > 0 && event->retry_again_timeout_usec <= now_usec)
1020 return log_device_warning_errno(event->dev, SYNTHETIC_ERRNO(ETIMEDOUT),
1021 "The underlying block device is locked by a process more than %s, "
1022 "skipping event (SEQNUM=%"PRIu64", ACTION=%s).",
1023 FORMAT_TIMESPAN(EVENT_RETRY_TIMEOUT_USEC, USEC_PER_MINUTE),
1024 event->seqnum, strna(device_action_to_string(event->action)));
1025
1026 event->retry_again_next_usec = usec_add(now_usec, EVENT_RETRY_INTERVAL_USEC);
1027 if (event->retry_again_timeout_usec == 0)
1028 event->retry_again_timeout_usec = usec_add(now_usec, EVENT_RETRY_TIMEOUT_USEC);
1029
1030 if (event->worker && event->worker->event == event)
1031 event->worker->event = NULL;
1032 event->worker = NULL;
1033
1034 event->state = EVENT_QUEUED;
1035 return 0;
1036 }
1037
1038 static int event_queue_assume_block_device_unlocked(Manager *manager, sd_device *dev) {
1039 const char *devname;
1040 int r;
1041
1042 /* When a new event for a block device is queued or we get an inotify event, assume that the
1043 * device is not locked anymore. The assumption may not be true, but that should not cause any
1044 * issues, as in that case events will be requeued soon. */
1045
1046 r = device_get_whole_disk(dev, NULL, &devname);
1047 if (r <= 0)
1048 return r;
1049
1050 LIST_FOREACH(event, event, manager->events) {
1051 const char *event_devname;
1052
1053 if (event->state != EVENT_QUEUED)
1054 continue;
1055
1056 if (event->retry_again_next_usec == 0)
1057 continue;
1058
1059 if (device_get_whole_disk(event->dev, NULL, &event_devname) <= 0)
1060 continue;
1061
1062 if (!streq(devname, event_devname))
1063 continue;
1064
1065 event->retry_again_next_usec = 0;
1066 }
1067
1068 return 0;
1069 }
1070
1071 static int event_queue_insert(Manager *manager, sd_device *dev) {
1072 const char *devpath, *devpath_old = NULL, *id = NULL, *devnode = NULL;
1073 sd_device_action_t action;
1074 uint64_t seqnum;
1075 Event *event;
1076 int r;
1077
1078 assert(manager);
1079 assert(dev);
1080
1081 /* only one process can add events to the queue */
1082 assert(manager->pid == getpid_cached());
1083
1084 /* We only accepts devices received by device monitor. */
1085 r = sd_device_get_seqnum(dev, &seqnum);
1086 if (r < 0)
1087 return r;
1088
1089 r = sd_device_get_action(dev, &action);
1090 if (r < 0)
1091 return r;
1092
1093 r = sd_device_get_devpath(dev, &devpath);
1094 if (r < 0)
1095 return r;
1096
1097 r = sd_device_get_property_value(dev, "DEVPATH_OLD", &devpath_old);
1098 if (r < 0 && r != -ENOENT)
1099 return r;
1100
1101 r = device_get_device_id(dev, &id);
1102 if (r < 0 && r != -ENOENT)
1103 return r;
1104
1105 r = sd_device_get_devname(dev, &devnode);
1106 if (r < 0 && r != -ENOENT)
1107 return r;
1108
1109 event = new(Event, 1);
1110 if (!event)
1111 return -ENOMEM;
1112
1113 *event = (Event) {
1114 .manager = manager,
1115 .dev = sd_device_ref(dev),
1116 .seqnum = seqnum,
1117 .action = action,
1118 .id = id,
1119 .devpath = devpath,
1120 .devpath_old = devpath_old,
1121 .devnode = devnode,
1122 .state = EVENT_QUEUED,
1123 };
1124
1125 if (!manager->events) {
1126 r = touch("/run/udev/queue");
1127 if (r < 0)
1128 log_warning_errno(r, "Failed to touch /run/udev/queue, ignoring: %m");
1129 }
1130
1131 LIST_APPEND(event, manager->events, event);
1132
1133 log_device_uevent(dev, "Device is queued");
1134
1135 return 0;
1136 }
1137
1138 static int on_uevent(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
1139 Manager *manager = userdata;
1140 int r;
1141
1142 assert(manager);
1143
1144 DEVICE_TRACE_POINT(kernel_uevent_received, dev);
1145
1146 device_ensure_usec_initialized(dev, NULL);
1147
1148 r = event_queue_insert(manager, dev);
1149 if (r < 0) {
1150 log_device_error_errno(dev, r, "Failed to insert device into event queue: %m");
1151 return 1;
1152 }
1153
1154 (void) event_queue_assume_block_device_unlocked(manager, dev);
1155
1156 /* we have fresh events, try to schedule them */
1157 event_queue_start(manager);
1158
1159 return 1;
1160 }
1161
1162 static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1163 Manager *manager = userdata;
1164
1165 assert(manager);
1166
1167 for (;;) {
1168 EventResult result;
1169 struct iovec iovec = IOVEC_MAKE(&result, sizeof(result));
1170 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control;
1171 struct msghdr msghdr = {
1172 .msg_iov = &iovec,
1173 .msg_iovlen = 1,
1174 .msg_control = &control,
1175 .msg_controllen = sizeof(control),
1176 };
1177 ssize_t size;
1178 struct ucred *ucred;
1179 Worker *worker;
1180
1181 size = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT);
1182 if (size == -EINTR)
1183 continue;
1184 if (size == -EAGAIN)
1185 /* nothing more to read */
1186 break;
1187 if (size < 0)
1188 return log_error_errno(size, "Failed to receive message: %m");
1189
1190 cmsg_close_all(&msghdr);
1191
1192 if (size != sizeof(result)) {
1193 log_warning("Ignoring worker message with invalid size %zi bytes", size);
1194 continue;
1195 }
1196
1197 ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred);
1198 if (!ucred || ucred->pid <= 0) {
1199 log_warning("Ignoring worker message without valid PID");
1200 continue;
1201 }
1202
1203 /* lookup worker who sent the signal */
1204 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
1205 if (!worker) {
1206 log_debug("Worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
1207 continue;
1208 }
1209
1210 if (worker->state == WORKER_KILLING) {
1211 worker->state = WORKER_KILLED;
1212 (void) kill(worker->pid, SIGTERM);
1213 } else if (worker->state != WORKER_KILLED)
1214 worker->state = WORKER_IDLE;
1215
1216 /* worker returned */
1217 if (result == EVENT_RESULT_TRY_AGAIN &&
1218 event_requeue(worker->event) < 0)
1219 device_broadcast(manager->monitor, worker->event->dev, -ETIMEDOUT);
1220
1221 /* When event_requeue() succeeds, worker->event is NULL, and event_free() handles NULL gracefully. */
1222 event_free(worker->event);
1223 }
1224
1225 /* we have free workers, try to schedule events */
1226 event_queue_start(manager);
1227
1228 return 1;
1229 }
1230
1231 /* receive the udevd message from userspace */
1232 static int on_ctrl_msg(UdevCtrl *uctrl, UdevCtrlMessageType type, const UdevCtrlMessageValue *value, void *userdata) {
1233 Manager *manager = userdata;
1234 int r;
1235
1236 assert(value);
1237 assert(manager);
1238
1239 switch (type) {
1240 case UDEV_CTRL_SET_LOG_LEVEL:
1241 log_debug("Received udev control message (SET_LOG_LEVEL), setting log_level=%i", value->intval);
1242 log_set_max_level(value->intval);
1243 manager->log_level = value->intval;
1244 manager_kill_workers(manager, false);
1245 break;
1246 case UDEV_CTRL_STOP_EXEC_QUEUE:
1247 log_debug("Received udev control message (STOP_EXEC_QUEUE)");
1248 manager->stop_exec_queue = true;
1249 break;
1250 case UDEV_CTRL_START_EXEC_QUEUE:
1251 log_debug("Received udev control message (START_EXEC_QUEUE)");
1252 manager->stop_exec_queue = false;
1253 /* It is not necessary to call event_queue_start() here, as it will be called in on_post() if necessary. */
1254 break;
1255 case UDEV_CTRL_RELOAD:
1256 log_debug("Received udev control message (RELOAD)");
1257 manager_reload(manager, /* force = */ true);
1258 break;
1259 case UDEV_CTRL_SET_ENV: {
1260 _unused_ _cleanup_free_ char *old_val = NULL;
1261 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL;
1262 const char *eq;
1263
1264 eq = strchr(value->buf, '=');
1265 if (!eq) {
1266 log_error("Invalid key format '%s'", value->buf);
1267 return 1;
1268 }
1269
1270 key = strndup(value->buf, eq - value->buf);
1271 if (!key) {
1272 log_oom();
1273 return 1;
1274 }
1275
1276 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1277
1278 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1279 if (r < 0) {
1280 log_oom();
1281 return 1;
1282 }
1283
1284 eq++;
1285 if (isempty(eq)) {
1286 log_debug("Received udev control message (ENV), unsetting '%s'", key);
1287
1288 r = hashmap_put(manager->properties, key, NULL);
1289 if (r < 0) {
1290 log_oom();
1291 return 1;
1292 }
1293 } else {
1294 val = strdup(eq);
1295 if (!val) {
1296 log_oom();
1297 return 1;
1298 }
1299
1300 log_debug("Received udev control message (ENV), setting '%s=%s'", key, val);
1301
1302 r = hashmap_put(manager->properties, key, val);
1303 if (r < 0) {
1304 log_oom();
1305 return 1;
1306 }
1307 }
1308
1309 key = val = NULL;
1310 manager_kill_workers(manager, false);
1311 break;
1312 }
1313 case UDEV_CTRL_SET_CHILDREN_MAX:
1314 if (value->intval <= 0) {
1315 log_debug("Received invalid udev control message (SET_MAX_CHILDREN, %i), ignoring.", value->intval);
1316 return 0;
1317 }
1318
1319 log_debug("Received udev control message (SET_MAX_CHILDREN), setting children_max=%i", value->intval);
1320 arg_children_max = value->intval;
1321
1322 notify_ready();
1323 break;
1324 case UDEV_CTRL_PING:
1325 log_debug("Received udev control message (PING)");
1326 break;
1327 case UDEV_CTRL_EXIT:
1328 log_debug("Received udev control message (EXIT)");
1329 manager_exit(manager);
1330 break;
1331 default:
1332 log_debug("Received unknown udev control message, ignoring");
1333 }
1334
1335 return 1;
1336 }
1337
1338 static int synthesize_change_one(sd_device *dev, sd_device *target) {
1339 int r;
1340
1341 if (DEBUG_LOGGING) {
1342 const char *syspath = NULL;
1343 (void) sd_device_get_syspath(target, &syspath);
1344 log_device_debug(dev, "device is closed, synthesising 'change' on %s", strna(syspath));
1345 }
1346
1347 r = sd_device_trigger(target, SD_DEVICE_CHANGE);
1348 if (r < 0)
1349 return log_device_debug_errno(target, r, "Failed to trigger 'change' uevent: %m");
1350
1351 DEVICE_TRACE_POINT(synthetic_change_event, dev);
1352
1353 return 0;
1354 }
1355
1356 static int synthesize_change(sd_device *dev) {
1357 const char *subsystem, *sysname, *devtype;
1358 int r;
1359
1360 r = sd_device_get_subsystem(dev, &subsystem);
1361 if (r < 0)
1362 return r;
1363
1364 r = sd_device_get_devtype(dev, &devtype);
1365 if (r < 0)
1366 return r;
1367
1368 r = sd_device_get_sysname(dev, &sysname);
1369 if (r < 0)
1370 return r;
1371
1372 if (streq_ptr(subsystem, "block") &&
1373 streq_ptr(devtype, "disk") &&
1374 !startswith(sysname, "dm-")) {
1375 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1376 bool part_table_read = false, has_partitions = false;
1377 sd_device *d;
1378 int fd;
1379
1380 /* Try to re-read the partition table. This only succeeds if none of the devices is
1381 * busy. The kernel returns 0 if no partition table is found, and we will not get an
1382 * event for the disk. */
1383 fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
1384 if (fd >= 0) {
1385 r = flock(fd, LOCK_EX|LOCK_NB);
1386 if (r >= 0)
1387 r = ioctl(fd, BLKRRPART, 0);
1388
1389 close(fd);
1390 if (r >= 0)
1391 part_table_read = true;
1392 }
1393
1394 /* search for partitions */
1395 r = sd_device_enumerator_new(&e);
1396 if (r < 0)
1397 return r;
1398
1399 r = sd_device_enumerator_allow_uninitialized(e);
1400 if (r < 0)
1401 return r;
1402
1403 r = sd_device_enumerator_add_match_parent(e, dev);
1404 if (r < 0)
1405 return r;
1406
1407 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1408 if (r < 0)
1409 return r;
1410
1411 FOREACH_DEVICE(e, d) {
1412 const char *t;
1413
1414 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1415 continue;
1416
1417 has_partitions = true;
1418 break;
1419 }
1420
1421 /* We have partitions and re-read the table, the kernel already sent out a "change"
1422 * event for the disk, and "remove/add" for all partitions. */
1423 if (part_table_read && has_partitions)
1424 return 0;
1425
1426 /* We have partitions but re-reading the partition table did not work, synthesize
1427 * "change" for the disk and all partitions. */
1428 (void) synthesize_change_one(dev, dev);
1429
1430 FOREACH_DEVICE(e, d) {
1431 const char *t;
1432
1433 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1434 continue;
1435
1436 (void) synthesize_change_one(dev, d);
1437 }
1438
1439 } else
1440 (void) synthesize_change_one(dev, dev);
1441
1442 return 0;
1443 }
1444
1445 static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1446 Manager *manager = userdata;
1447 union inotify_event_buffer buffer;
1448 ssize_t l;
1449 int r;
1450
1451 assert(manager);
1452
1453 r = event_source_disable(manager->kill_workers_event);
1454 if (r < 0)
1455 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1456
1457 l = read(fd, &buffer, sizeof(buffer));
1458 if (l < 0) {
1459 if (ERRNO_IS_TRANSIENT(errno))
1460 return 1;
1461
1462 return log_error_errno(errno, "Failed to read inotify fd: %m");
1463 }
1464
1465 FOREACH_INOTIFY_EVENT_WARN(e, buffer, l) {
1466 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1467 const char *devnode;
1468
1469 r = device_new_from_watch_handle(&dev, e->wd);
1470 if (r < 0) {
1471 log_debug_errno(r, "Failed to create sd_device object from watch handle, ignoring: %m");
1472 continue;
1473 }
1474
1475 if (sd_device_get_devname(dev, &devnode) < 0)
1476 continue;
1477
1478 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
1479 if (e->mask & IN_CLOSE_WRITE) {
1480 (void) event_queue_assume_block_device_unlocked(manager, dev);
1481 (void) synthesize_change(dev);
1482 }
1483
1484 /* Do not handle IN_IGNORED here. It should be handled by worker in 'remove' uevent;
1485 * udev_event_execute_rules() -> event_execute_rules_on_remove() -> udev_watch_end(). */
1486 }
1487
1488 return 1;
1489 }
1490
1491 static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1492 Manager *manager = userdata;
1493
1494 assert(manager);
1495
1496 manager_exit(manager);
1497
1498 return 1;
1499 }
1500
1501 static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1502 Manager *manager = userdata;
1503
1504 assert(manager);
1505
1506 manager_reload(manager, /* force = */ true);
1507
1508 return 1;
1509 }
1510
1511 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata) {
1512 Worker *worker = ASSERT_PTR(userdata);
1513 Manager *manager = ASSERT_PTR(worker->manager);
1514 sd_device *dev = worker->event ? ASSERT_PTR(worker->event->dev) : NULL;
1515 EventResult result;
1516 int r;
1517
1518 assert(si);
1519
1520 switch (si->si_code) {
1521 case CLD_EXITED:
1522 if (si->si_status == 0)
1523 log_device_debug(dev, "Worker ["PID_FMT"] exited.", si->si_pid);
1524 else
1525 log_device_warning(dev, "Worker ["PID_FMT"] exited with return code %i.",
1526 si->si_pid, si->si_status);
1527 result = EVENT_RESULT_EXIT_STATUS_BASE + si->si_status;
1528 break;
1529
1530 case CLD_KILLED:
1531 case CLD_DUMPED:
1532 log_device_warning(dev, "Worker ["PID_FMT"] terminated by signal %i (%s).",
1533 si->si_pid, si->si_status, signal_to_string(si->si_status));
1534 result = EVENT_RESULT_SIGNAL_BASE + si->si_status;
1535 break;
1536
1537 default:
1538 assert_not_reached();
1539 }
1540
1541 if (result != EVENT_RESULT_SUCCESS && dev) {
1542 /* delete state from disk */
1543 device_delete_db(dev);
1544 device_tag_index(dev, NULL, false);
1545
1546 /* Forward kernel event to libudev listeners */
1547 device_broadcast(manager->monitor, dev, result);
1548 }
1549
1550 worker_free(worker);
1551
1552 /* we can start new workers, try to schedule events */
1553 event_queue_start(manager);
1554
1555 /* Disable unnecessary cleanup event */
1556 if (hashmap_isempty(manager->workers)) {
1557 r = event_source_disable(manager->kill_workers_event);
1558 if (r < 0)
1559 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1560 }
1561
1562 return 1;
1563 }
1564
1565 static int on_post(sd_event_source *s, void *userdata) {
1566 Manager *manager = userdata;
1567
1568 assert(manager);
1569
1570 if (manager->events) {
1571 /* Try to process pending events if idle workers exist. Why is this necessary?
1572 * When a worker finished an event and became idle, even if there was a pending event,
1573 * the corresponding device might have been locked and the processing of the event
1574 * delayed for a while, preventing the worker from processing the event immediately.
1575 * Now, the device may be unlocked. Let's try again! */
1576 event_queue_start(manager);
1577 return 1;
1578 }
1579
1580 /* There are no queued events. Let's remove /run/udev/queue and clean up the idle processes. */
1581
1582 if (unlink("/run/udev/queue") < 0) {
1583 if (errno != ENOENT)
1584 log_warning_errno(errno, "Failed to unlink /run/udev/queue, ignoring: %m");
1585 } else
1586 log_debug("No events are queued, removing /run/udev/queue.");
1587
1588 if (!hashmap_isempty(manager->workers)) {
1589 /* There are idle workers */
1590 (void) event_reset_time_relative(manager->event, &manager->kill_workers_event,
1591 CLOCK_MONOTONIC, 3 * USEC_PER_SEC, USEC_PER_SEC,
1592 on_kill_workers_event, manager,
1593 0, "kill-workers-event", false);
1594 return 1;
1595 }
1596
1597 /* There are no idle workers. */
1598
1599 if (manager->exit)
1600 return sd_event_exit(manager->event, 0);
1601
1602 if (manager->cgroup)
1603 /* cleanup possible left-over processes in our cgroup */
1604 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1605
1606 return 1;
1607 }
1608
1609 static int listen_fds(int *ret_ctrl, int *ret_netlink) {
1610 int ctrl_fd = -1, netlink_fd = -1;
1611 int fd, n;
1612
1613 assert(ret_ctrl);
1614 assert(ret_netlink);
1615
1616 n = sd_listen_fds(true);
1617 if (n < 0)
1618 return n;
1619
1620 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1621 if (sd_is_socket(fd, AF_UNIX, SOCK_SEQPACKET, -1) > 0) {
1622 if (ctrl_fd >= 0)
1623 return -EINVAL;
1624 ctrl_fd = fd;
1625 continue;
1626 }
1627
1628 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
1629 if (netlink_fd >= 0)
1630 return -EINVAL;
1631 netlink_fd = fd;
1632 continue;
1633 }
1634
1635 return -EINVAL;
1636 }
1637
1638 *ret_ctrl = ctrl_fd;
1639 *ret_netlink = netlink_fd;
1640
1641 return 0;
1642 }
1643
1644 /*
1645 * read the kernel command line, in case we need to get into debug mode
1646 * udev.log_level=<level> syslog priority
1647 * udev.children_max=<number of workers> events are fully serialized if set to 1
1648 * udev.exec_delay=<number of seconds> delay execution of every executed program
1649 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1650 * udev.blockdev_read_only<=bool> mark all block devices read-only when they appear
1651 */
1652 static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
1653 int r;
1654
1655 assert(key);
1656
1657 if (proc_cmdline_key_streq(key, "udev.log_level") ||
1658 proc_cmdline_key_streq(key, "udev.log_priority")) { /* kept for backward compatibility */
1659
1660 if (proc_cmdline_value_missing(key, value))
1661 return 0;
1662
1663 r = log_level_from_string(value);
1664 if (r >= 0)
1665 log_set_max_level(r);
1666
1667 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1668
1669 if (proc_cmdline_value_missing(key, value))
1670 return 0;
1671
1672 r = parse_sec(value, &arg_event_timeout_usec);
1673
1674 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1675
1676 if (proc_cmdline_value_missing(key, value))
1677 return 0;
1678
1679 r = safe_atou(value, &arg_children_max);
1680
1681 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1682
1683 if (proc_cmdline_value_missing(key, value))
1684 return 0;
1685
1686 r = parse_sec(value, &arg_exec_delay_usec);
1687
1688 } else if (proc_cmdline_key_streq(key, "udev.timeout_signal")) {
1689
1690 if (proc_cmdline_value_missing(key, value))
1691 return 0;
1692
1693 r = signal_from_string(value);
1694 if (r > 0)
1695 arg_timeout_signal = r;
1696
1697 } else if (proc_cmdline_key_streq(key, "udev.blockdev_read_only")) {
1698
1699 if (!value)
1700 arg_blockdev_read_only = true;
1701 else {
1702 r = parse_boolean(value);
1703 if (r < 0)
1704 log_warning_errno(r, "Failed to parse udev.blockdev-read-only argument, ignoring: %s", value);
1705 else
1706 arg_blockdev_read_only = r;
1707 }
1708
1709 if (arg_blockdev_read_only)
1710 log_notice("All physical block devices will be marked read-only.");
1711
1712 return 0;
1713
1714 } else {
1715 if (startswith(key, "udev."))
1716 log_warning("Unknown udev kernel command line option \"%s\", ignoring.", key);
1717
1718 return 0;
1719 }
1720
1721 if (r < 0)
1722 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1723
1724 return 0;
1725 }
1726
1727 static int help(void) {
1728 _cleanup_free_ char *link = NULL;
1729 int r;
1730
1731 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1732 if (r < 0)
1733 return log_oom();
1734
1735 printf("%s [OPTIONS...]\n\n"
1736 "Rule-based manager for device events and files.\n\n"
1737 " -h --help Print this message\n"
1738 " -V --version Print version of the program\n"
1739 " -d --daemon Detach and run in the background\n"
1740 " -D --debug Enable debug output\n"
1741 " -c --children-max=INT Set maximum number of workers\n"
1742 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1743 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1744 " -N --resolve-names=early|late|never\n"
1745 " When to resolve users and groups\n"
1746 "\nSee the %s for details.\n",
1747 program_invocation_short_name,
1748 link);
1749
1750 return 0;
1751 }
1752
1753 static int parse_argv(int argc, char *argv[]) {
1754 enum {
1755 ARG_TIMEOUT_SIGNAL,
1756 };
1757
1758 static const struct option options[] = {
1759 { "daemon", no_argument, NULL, 'd' },
1760 { "debug", no_argument, NULL, 'D' },
1761 { "children-max", required_argument, NULL, 'c' },
1762 { "exec-delay", required_argument, NULL, 'e' },
1763 { "event-timeout", required_argument, NULL, 't' },
1764 { "resolve-names", required_argument, NULL, 'N' },
1765 { "help", no_argument, NULL, 'h' },
1766 { "version", no_argument, NULL, 'V' },
1767 { "timeout-signal", required_argument, NULL, ARG_TIMEOUT_SIGNAL },
1768 {}
1769 };
1770
1771 int c, r;
1772
1773 assert(argc >= 0);
1774 assert(argv);
1775
1776 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
1777 switch (c) {
1778
1779 case 'd':
1780 arg_daemonize = true;
1781 break;
1782 case 'c':
1783 r = safe_atou(optarg, &arg_children_max);
1784 if (r < 0)
1785 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
1786 break;
1787 case 'e':
1788 r = parse_sec(optarg, &arg_exec_delay_usec);
1789 if (r < 0)
1790 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
1791 break;
1792 case ARG_TIMEOUT_SIGNAL:
1793 r = signal_from_string(optarg);
1794 if (r <= 0)
1795 log_warning_errno(r, "Failed to parse --timeout-signal= value '%s', ignoring: %m", optarg);
1796 else
1797 arg_timeout_signal = r;
1798
1799 break;
1800 case 't':
1801 r = parse_sec(optarg, &arg_event_timeout_usec);
1802 if (r < 0)
1803 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
1804 break;
1805 case 'D':
1806 arg_debug = true;
1807 break;
1808 case 'N': {
1809 ResolveNameTiming t;
1810
1811 t = resolve_name_timing_from_string(optarg);
1812 if (t < 0)
1813 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1814 else
1815 arg_resolve_name_timing = t;
1816 break;
1817 }
1818 case 'h':
1819 return help();
1820 case 'V':
1821 printf("%s\n", GIT_VERSION);
1822 return 0;
1823 case '?':
1824 return -EINVAL;
1825 default:
1826 assert_not_reached();
1827
1828 }
1829 }
1830
1831 return 1;
1832 }
1833
1834 static int create_subcgroup(char **ret) {
1835 _cleanup_free_ char *cgroup = NULL, *subcgroup = NULL;
1836 int r;
1837
1838 if (getppid() != 1)
1839 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "Not invoked by PID1.");
1840
1841 r = sd_booted();
1842 if (r < 0)
1843 return log_debug_errno(r, "Failed to check if systemd is running: %m");
1844 if (r == 0)
1845 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "systemd is not running.");
1846
1847 /* Get our own cgroup, we regularly kill everything udev has left behind.
1848 * We only do this on systemd systems, and only if we are directly spawned
1849 * by PID1. Otherwise we are not guaranteed to have a dedicated cgroup. */
1850
1851 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
1852 if (r < 0) {
1853 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
1854 return log_debug_errno(r, "Dedicated cgroup not found: %m");
1855 return log_debug_errno(r, "Failed to get cgroup: %m");
1856 }
1857
1858 r = cg_get_xattr_bool(SYSTEMD_CGROUP_CONTROLLER, cgroup, "trusted.delegate");
1859 if (IN_SET(r, 0, -ENODATA))
1860 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "The cgroup %s is not delegated to us.", cgroup);
1861 if (r < 0)
1862 return log_debug_errno(r, "Failed to read trusted.delegate attribute: %m");
1863
1864 /* We are invoked with our own delegated cgroup tree, let's move us one level down, so that we
1865 * don't collide with the "no processes in inner nodes" rule of cgroups, when the service
1866 * manager invokes the ExecReload= job in the .control/ subcgroup. */
1867
1868 subcgroup = path_join(cgroup, "/udev");
1869 if (!subcgroup)
1870 return log_oom_debug();
1871
1872 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, subcgroup, 0);
1873 if (r < 0)
1874 return log_debug_errno(r, "Failed to create %s subcgroup: %m", subcgroup);
1875
1876 log_debug("Created %s subcgroup.", subcgroup);
1877 if (ret)
1878 *ret = TAKE_PTR(subcgroup);
1879 return 0;
1880 }
1881
1882 static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent) {
1883 _cleanup_(manager_freep) Manager *manager = NULL;
1884 _cleanup_free_ char *cgroup = NULL;
1885 int r;
1886
1887 assert(ret);
1888
1889 (void) create_subcgroup(&cgroup);
1890
1891 manager = new(Manager, 1);
1892 if (!manager)
1893 return log_oom();
1894
1895 *manager = (Manager) {
1896 .inotify_fd = -1,
1897 .worker_watch = { -1, -1 },
1898 .cgroup = TAKE_PTR(cgroup),
1899 };
1900
1901 r = udev_ctrl_new_from_fd(&manager->ctrl, fd_ctrl);
1902 if (r < 0)
1903 return log_error_errno(r, "Failed to initialize udev control socket: %m");
1904
1905 r = udev_ctrl_enable_receiving(manager->ctrl);
1906 if (r < 0)
1907 return log_error_errno(r, "Failed to bind udev control socket: %m");
1908
1909 r = device_monitor_new_full(&manager->monitor, MONITOR_GROUP_KERNEL, fd_uevent);
1910 if (r < 0)
1911 return log_error_errno(r, "Failed to initialize device monitor: %m");
1912
1913 /* Bump receiver buffer, but only if we are not called via socket activation, as in that
1914 * case systemd sets the receive buffer size for us, and the value in the .socket unit
1915 * should take full effect. */
1916 if (fd_uevent < 0) {
1917 r = sd_device_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
1918 if (r < 0)
1919 log_warning_errno(r, "Failed to set receive buffer size for device monitor, ignoring: %m");
1920 }
1921
1922 r = device_monitor_enable_receiving(manager->monitor);
1923 if (r < 0)
1924 return log_error_errno(r, "Failed to bind netlink socket: %m");
1925
1926 manager->log_level = log_get_max_level();
1927
1928 *ret = TAKE_PTR(manager);
1929
1930 return 0;
1931 }
1932
1933 static int main_loop(Manager *manager) {
1934 int fd_worker, r;
1935
1936 manager->pid = getpid_cached();
1937
1938 /* unnamed socket from workers to the main daemon */
1939 r = socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1940 if (r < 0)
1941 return log_error_errno(errno, "Failed to create socketpair for communicating with workers: %m");
1942
1943 fd_worker = manager->worker_watch[READ_END];
1944
1945 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
1946 if (r < 0)
1947 return log_error_errno(r, "Failed to enable SO_PASSCRED: %m");
1948
1949 manager->inotify_fd = inotify_init1(IN_CLOEXEC);
1950 if (manager->inotify_fd < 0)
1951 return log_error_errno(errno, "Failed to create inotify descriptor: %m");
1952
1953 udev_watch_restore(manager->inotify_fd);
1954
1955 /* block and listen to all signals on signalfd */
1956 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
1957
1958 r = sd_event_default(&manager->event);
1959 if (r < 0)
1960 return log_error_errno(r, "Failed to allocate event loop: %m");
1961
1962 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1963 if (r < 0)
1964 return log_error_errno(r, "Failed to create SIGINT event source: %m");
1965
1966 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1967 if (r < 0)
1968 return log_error_errno(r, "Failed to create SIGTERM event source: %m");
1969
1970 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1971 if (r < 0)
1972 return log_error_errno(r, "Failed to create SIGHUP event source: %m");
1973
1974 r = sd_event_set_watchdog(manager->event, true);
1975 if (r < 0)
1976 return log_error_errno(r, "Failed to create watchdog event source: %m");
1977
1978 r = udev_ctrl_attach_event(manager->ctrl, manager->event);
1979 if (r < 0)
1980 return log_error_errno(r, "Failed to attach event to udev control: %m");
1981
1982 r = udev_ctrl_start(manager->ctrl, on_ctrl_msg, manager);
1983 if (r < 0)
1984 return log_error_errno(r, "Failed to start device monitor: %m");
1985
1986 /* This needs to be after the inotify and uevent handling, to make sure
1987 * that the ping is send back after fully processing the pending uevents
1988 * (including the synthetic ones we may create due to inotify events).
1989 */
1990 r = sd_event_source_set_priority(udev_ctrl_get_event_source(manager->ctrl), SD_EVENT_PRIORITY_IDLE);
1991 if (r < 0)
1992 return log_error_errno(r, "Failed to set IDLE event priority for udev control event source: %m");
1993
1994 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->inotify_fd, EPOLLIN, on_inotify, manager);
1995 if (r < 0)
1996 return log_error_errno(r, "Failed to create inotify event source: %m");
1997
1998 r = sd_device_monitor_attach_event(manager->monitor, manager->event);
1999 if (r < 0)
2000 return log_error_errno(r, "Failed to attach event to device monitor: %m");
2001
2002 r = sd_device_monitor_start(manager->monitor, on_uevent, manager);
2003 if (r < 0)
2004 return log_error_errno(r, "Failed to start device monitor: %m");
2005
2006 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(manager->monitor), "device-monitor");
2007
2008 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
2009 if (r < 0)
2010 return log_error_errno(r, "Failed to create worker event source: %m");
2011
2012 r = sd_event_add_post(manager->event, NULL, on_post, manager);
2013 if (r < 0)
2014 return log_error_errno(r, "Failed to create post event source: %m");
2015
2016 manager->last_usec = now(CLOCK_MONOTONIC);
2017
2018 udev_builtin_init();
2019
2020 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
2021 if (r < 0)
2022 return log_error_errno(r, "Failed to read udev rules: %m");
2023
2024 r = udev_rules_apply_static_dev_perms(manager->rules);
2025 if (r < 0)
2026 log_warning_errno(r, "Failed to apply permissions on static device nodes, ignoring: %m");
2027
2028 notify_ready();
2029
2030 r = sd_event_loop(manager->event);
2031 if (r < 0)
2032 log_error_errno(r, "Event loop failed: %m");
2033
2034 sd_notify(false,
2035 "STOPPING=1\n"
2036 "STATUS=Shutting down...");
2037 return r;
2038 }
2039
2040 int run_udevd(int argc, char *argv[]) {
2041 _cleanup_(manager_freep) Manager *manager = NULL;
2042 int fd_ctrl = -1, fd_uevent = -1;
2043 int r;
2044
2045 log_set_target(LOG_TARGET_AUTO);
2046 log_open();
2047 udev_parse_config_full(&arg_children_max, &arg_exec_delay_usec, &arg_event_timeout_usec, &arg_resolve_name_timing, &arg_timeout_signal);
2048 log_parse_environment();
2049 log_open(); /* Done again to update after reading configuration. */
2050
2051 r = parse_argv(argc, argv);
2052 if (r <= 0)
2053 return r;
2054
2055 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
2056 if (r < 0)
2057 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
2058
2059 if (arg_debug) {
2060 log_set_target(LOG_TARGET_CONSOLE);
2061 log_set_max_level(LOG_DEBUG);
2062 }
2063
2064 r = must_be_root();
2065 if (r < 0)
2066 return r;
2067
2068 if (arg_children_max == 0) {
2069 unsigned long cpu_limit, mem_limit, cpu_count = 1;
2070
2071 r = cpus_in_affinity_mask();
2072 if (r < 0)
2073 log_warning_errno(r, "Failed to determine number of local CPUs, ignoring: %m");
2074 else
2075 cpu_count = r;
2076
2077 cpu_limit = cpu_count * 2 + 16;
2078 mem_limit = MAX(physical_memory() / (128UL*1024*1024), 10U);
2079
2080 arg_children_max = MIN(cpu_limit, mem_limit);
2081 arg_children_max = MIN(WORKER_NUM_MAX, arg_children_max);
2082
2083 log_debug("Set children_max to %u", arg_children_max);
2084 }
2085
2086 /* set umask before creating any file/directory */
2087 umask(022);
2088
2089 r = mac_selinux_init();
2090 if (r < 0)
2091 return r;
2092
2093 r = RET_NERRNO(mkdir("/run/udev", 0755));
2094 if (r < 0 && r != -EEXIST)
2095 return log_error_errno(r, "Failed to create /run/udev: %m");
2096
2097 r = listen_fds(&fd_ctrl, &fd_uevent);
2098 if (r < 0)
2099 return log_error_errno(r, "Failed to listen on fds: %m");
2100
2101 r = manager_new(&manager, fd_ctrl, fd_uevent);
2102 if (r < 0)
2103 return log_error_errno(r, "Failed to create manager: %m");
2104
2105 if (arg_daemonize) {
2106 pid_t pid;
2107
2108 log_info("Starting systemd-udevd version " GIT_VERSION);
2109
2110 /* connect /dev/null to stdin, stdout, stderr */
2111 if (log_get_max_level() < LOG_DEBUG) {
2112 r = make_null_stdio();
2113 if (r < 0)
2114 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
2115 }
2116
2117 pid = fork();
2118 if (pid < 0)
2119 return log_error_errno(errno, "Failed to fork daemon: %m");
2120 if (pid > 0)
2121 /* parent */
2122 return 0;
2123
2124 /* child */
2125 (void) setsid();
2126 }
2127
2128 return main_loop(manager);
2129 }