]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/udev/udevd.c
Merge pull request #24404 from thatguystone/socket-jobs
[thirdparty/systemd.git] / src / udev / udevd.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
6 */
7
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <getopt.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <sys/epoll.h>
16 #include <sys/file.h>
17 #include <sys/inotify.h>
18 #include <sys/ioctl.h>
19 #include <sys/mount.h>
20 #include <sys/prctl.h>
21 #include <sys/signalfd.h>
22 #include <sys/stat.h>
23 #include <sys/time.h>
24 #include <sys/wait.h>
25 #include <unistd.h>
26
27 #include "sd-daemon.h"
28 #include "sd-event.h"
29
30 #include "alloc-util.h"
31 #include "cgroup-setup.h"
32 #include "cgroup-util.h"
33 #include "cpu-set-util.h"
34 #include "dev-setup.h"
35 #include "device-monitor-private.h"
36 #include "device-private.h"
37 #include "device-util.h"
38 #include "errno-list.h"
39 #include "event-util.h"
40 #include "fd-util.h"
41 #include "fileio.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "hashmap.h"
45 #include "inotify-util.h"
46 #include "io-util.h"
47 #include "limits-util.h"
48 #include "list.h"
49 #include "main-func.h"
50 #include "mkdir.h"
51 #include "netlink-util.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "pretty-print.h"
55 #include "proc-cmdline.h"
56 #include "process-util.h"
57 #include "selinux-util.h"
58 #include "signal-util.h"
59 #include "socket-util.h"
60 #include "string-util.h"
61 #include "strv.h"
62 #include "strxcpyx.h"
63 #include "syslog-util.h"
64 #include "udevd.h"
65 #include "udev-builtin.h"
66 #include "udev-ctrl.h"
67 #include "udev-event.h"
68 #include "udev-util.h"
69 #include "udev-watch.h"
70 #include "user-util.h"
71 #include "version.h"
72
73 #define WORKER_NUM_MAX 2048U
74 #define EVENT_RETRY_INTERVAL_USEC (200 * USEC_PER_MSEC)
75 #define EVENT_RETRY_TIMEOUT_USEC (3 * USEC_PER_MINUTE)
76
77 static bool arg_debug = false;
78 static int arg_daemonize = false;
79 static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
80 static unsigned arg_children_max = 0;
81 static usec_t arg_exec_delay_usec = 0;
82 static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
83 static int arg_timeout_signal = SIGKILL;
84 static bool arg_blockdev_read_only = false;
85
86 typedef struct Event Event;
87 typedef struct Worker Worker;
88
89 typedef struct Manager {
90 sd_event *event;
91 Hashmap *workers;
92 LIST_HEAD(Event, events);
93 char *cgroup;
94 pid_t pid; /* the process that originally allocated the manager object */
95 int log_level;
96
97 UdevRules *rules;
98 Hashmap *properties;
99
100 sd_netlink *rtnl;
101
102 sd_device_monitor *monitor;
103 UdevCtrl *ctrl;
104 int worker_watch[2];
105
106 /* used by udev-watch */
107 int inotify_fd;
108 sd_event_source *inotify_event;
109
110 sd_event_source *kill_workers_event;
111
112 usec_t last_usec;
113
114 bool stop_exec_queue;
115 bool exit;
116 } Manager;
117
118 typedef enum EventState {
119 EVENT_UNDEF,
120 EVENT_QUEUED,
121 EVENT_RUNNING,
122 } EventState;
123
124 typedef struct Event {
125 Manager *manager;
126 Worker *worker;
127 EventState state;
128
129 sd_device *dev;
130
131 sd_device_action_t action;
132 uint64_t seqnum;
133 uint64_t blocker_seqnum;
134 const char *id;
135 const char *devpath;
136 const char *devpath_old;
137 const char *devnode;
138
139 /* Used when the device is locked by another program. */
140 usec_t retry_again_next_usec;
141 usec_t retry_again_timeout_usec;
142 sd_event_source *retry_event_source;
143
144 sd_event_source *timeout_warning_event;
145 sd_event_source *timeout_event;
146
147 LIST_FIELDS(Event, event);
148 } Event;
149
150 typedef enum WorkerState {
151 WORKER_UNDEF,
152 WORKER_RUNNING,
153 WORKER_IDLE,
154 WORKER_KILLED,
155 WORKER_KILLING,
156 } WorkerState;
157
158 typedef struct Worker {
159 Manager *manager;
160 pid_t pid;
161 sd_event_source *child_event_source;
162 sd_device_monitor *monitor;
163 WorkerState state;
164 Event *event;
165 } Worker;
166
167 /* passed from worker to main process */
168 typedef enum EventResult {
169 EVENT_RESULT_NERRNO_MIN = -ERRNO_MAX,
170 EVENT_RESULT_NERRNO_MAX = -1,
171 EVENT_RESULT_SUCCESS = 0,
172 EVENT_RESULT_EXIT_STATUS_BASE = 0,
173 EVENT_RESULT_EXIT_STATUS_MAX = 255,
174 EVENT_RESULT_TRY_AGAIN = 256, /* when the block device is locked by another process. */
175 EVENT_RESULT_SIGNAL_BASE = 257,
176 EVENT_RESULT_SIGNAL_MAX = EVENT_RESULT_SIGNAL_BASE + _NSIG,
177 _EVENT_RESULT_MAX,
178 _EVENT_RESULT_INVALID = -EINVAL,
179 } EventResult;
180
181 static Event *event_free(Event *event) {
182 if (!event)
183 return NULL;
184
185 assert(event->manager);
186
187 LIST_REMOVE(event, event->manager->events, event);
188 sd_device_unref(event->dev);
189
190 /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
191 * main process. */
192 sd_event_source_unref(event->retry_event_source);
193 sd_event_source_unref(event->timeout_warning_event);
194 sd_event_source_unref(event->timeout_event);
195
196 if (event->worker)
197 event->worker->event = NULL;
198
199 return mfree(event);
200 }
201
202 static void event_queue_cleanup(Manager *manager, EventState match_state) {
203 LIST_FOREACH(event, event, manager->events) {
204 if (match_state != EVENT_UNDEF && match_state != event->state)
205 continue;
206
207 event_free(event);
208 }
209 }
210
211 static Worker *worker_free(Worker *worker) {
212 if (!worker)
213 return NULL;
214
215 if (worker->manager)
216 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
217
218 sd_event_source_unref(worker->child_event_source);
219 sd_device_monitor_unref(worker->monitor);
220 event_free(worker->event);
221
222 return mfree(worker);
223 }
224
225 DEFINE_TRIVIAL_CLEANUP_FUNC(Worker*, worker_free);
226 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(worker_hash_op, void, trivial_hash_func, trivial_compare_func, Worker, worker_free);
227
228 static void manager_clear_for_worker(Manager *manager) {
229 assert(manager);
230
231 /* Do not use sd_event_source_disable_unref() here, as this is called by both workers and the
232 * main process. */
233 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
234 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
235
236 manager->event = sd_event_unref(manager->event);
237
238 manager->workers = hashmap_free(manager->workers);
239 event_queue_cleanup(manager, EVENT_UNDEF);
240
241 manager->monitor = sd_device_monitor_unref(manager->monitor);
242 manager->ctrl = udev_ctrl_unref(manager->ctrl);
243
244 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
245 }
246
247 static Manager* manager_free(Manager *manager) {
248 if (!manager)
249 return NULL;
250
251 udev_builtin_exit();
252
253 manager_clear_for_worker(manager);
254
255 sd_netlink_unref(manager->rtnl);
256
257 hashmap_free_free_free(manager->properties);
258 udev_rules_free(manager->rules);
259
260 safe_close(manager->inotify_fd);
261 safe_close_pair(manager->worker_watch);
262
263 free(manager->cgroup);
264 return mfree(manager);
265 }
266
267 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
268
269 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata);
270
271 static int worker_new(Worker **ret, Manager *manager, sd_device_monitor *worker_monitor, pid_t pid) {
272 _cleanup_(worker_freep) Worker *worker = NULL;
273 int r;
274
275 assert(ret);
276 assert(manager);
277 assert(worker_monitor);
278 assert(pid > 1);
279
280 /* close monitor, but keep address around */
281 device_monitor_disconnect(worker_monitor);
282
283 worker = new(Worker, 1);
284 if (!worker)
285 return -ENOMEM;
286
287 *worker = (Worker) {
288 .monitor = sd_device_monitor_ref(worker_monitor),
289 .pid = pid,
290 };
291
292 r = sd_event_add_child(manager->event, &worker->child_event_source, pid, WEXITED, on_sigchld, worker);
293 if (r < 0)
294 return r;
295
296 r = hashmap_ensure_put(&manager->workers, &worker_hash_op, PID_TO_PTR(pid), worker);
297 if (r < 0)
298 return r;
299
300 worker->manager = manager;
301
302 *ret = TAKE_PTR(worker);
303 return 0;
304 }
305
306 static void manager_kill_workers(Manager *manager, bool force) {
307 Worker *worker;
308
309 assert(manager);
310
311 HASHMAP_FOREACH(worker, manager->workers) {
312 if (worker->state == WORKER_KILLED)
313 continue;
314
315 if (worker->state == WORKER_RUNNING && !force) {
316 worker->state = WORKER_KILLING;
317 continue;
318 }
319
320 worker->state = WORKER_KILLED;
321 (void) kill(worker->pid, SIGTERM);
322 }
323 }
324
325 static void manager_exit(Manager *manager) {
326 assert(manager);
327
328 manager->exit = true;
329
330 sd_notify(false,
331 "STOPPING=1\n"
332 "STATUS=Starting shutdown...");
333
334 /* close sources of new events and discard buffered events */
335 manager->ctrl = udev_ctrl_unref(manager->ctrl);
336
337 manager->inotify_event = sd_event_source_disable_unref(manager->inotify_event);
338 manager->inotify_fd = safe_close(manager->inotify_fd);
339
340 manager->monitor = sd_device_monitor_unref(manager->monitor);
341
342 /* discard queued events and kill workers */
343 event_queue_cleanup(manager, EVENT_QUEUED);
344 manager_kill_workers(manager, true);
345 }
346
347 static void notify_ready(void) {
348 int r;
349
350 r = sd_notifyf(false,
351 "READY=1\n"
352 "STATUS=Processing with %u children at max", arg_children_max);
353 if (r < 0)
354 log_warning_errno(r, "Failed to send readiness notification, ignoring: %m");
355 }
356
357 /* reload requested, HUP signal received, rules changed, builtin changed */
358 static void manager_reload(Manager *manager, bool force) {
359 _cleanup_(udev_rules_freep) UdevRules *rules = NULL;
360 usec_t now_usec;
361 int r;
362
363 assert(manager);
364
365 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &now_usec) >= 0);
366 if (!force && now_usec < usec_add(manager->last_usec, 3 * USEC_PER_SEC))
367 /* check for changed config, every 3 seconds at most */
368 return;
369 manager->last_usec = now_usec;
370
371 /* Reload SELinux label database, to make the child inherit the up-to-date database. */
372 mac_selinux_maybe_reload();
373
374 /* Nothing changed. It is not necessary to reload. */
375 if (!udev_rules_should_reload(manager->rules) && !udev_builtin_should_reload())
376 return;
377
378 sd_notify(false,
379 "RELOADING=1\n"
380 "STATUS=Flushing configuration...");
381
382 manager_kill_workers(manager, false);
383
384 udev_builtin_exit();
385 udev_builtin_init();
386
387 r = udev_rules_load(&rules, arg_resolve_name_timing);
388 if (r < 0)
389 log_warning_errno(r, "Failed to read udev rules, using the previously loaded rules, ignoring: %m");
390 else
391 udev_rules_free_and_replace(manager->rules, rules);
392
393 notify_ready();
394 }
395
396 static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
397 Manager *manager = userdata;
398
399 assert(manager);
400
401 log_debug("Cleanup idle workers");
402 manager_kill_workers(manager, false);
403
404 return 1;
405 }
406
407 static void device_broadcast(sd_device_monitor *monitor, sd_device *dev, EventResult result) {
408 int r;
409
410 assert(dev);
411
412 /* On exit, manager->monitor is already NULL. */
413 if (!monitor)
414 return;
415
416 if (result != EVENT_RESULT_SUCCESS) {
417 (void) device_add_property(dev, "UDEV_WORKER_FAILED", "1");
418
419 switch (result) {
420 case EVENT_RESULT_NERRNO_MIN ... EVENT_RESULT_NERRNO_MAX: {
421 const char *str;
422
423 (void) device_add_propertyf(dev, "UDEV_WORKER_ERRNO", "%i", -result);
424
425 str = errno_to_name(result);
426 if (str)
427 (void) device_add_property(dev, "UDEV_WORKER_ERRNO_NAME", str);
428 break;
429 }
430 case EVENT_RESULT_EXIT_STATUS_BASE ... EVENT_RESULT_EXIT_STATUS_MAX:
431 (void) device_add_propertyf(dev, "UDEV_WORKER_EXIT_STATUS", "%i", result - EVENT_RESULT_EXIT_STATUS_BASE);
432 break;
433
434 case EVENT_RESULT_TRY_AGAIN:
435 assert_not_reached();
436 break;
437
438 case EVENT_RESULT_SIGNAL_BASE ... EVENT_RESULT_SIGNAL_MAX: {
439 const char *str;
440
441 (void) device_add_propertyf(dev, "UDEV_WORKER_SIGNAL", "%i", result - EVENT_RESULT_SIGNAL_BASE);
442
443 str = signal_to_string(result - EVENT_RESULT_SIGNAL_BASE);
444 if (str)
445 (void) device_add_property(dev, "UDEV_WORKER_SIGNAL_NAME", str);
446 break;
447 }
448 default:
449 log_device_warning(dev, "Unknown event result \"%i\", ignoring.", result);
450 }
451 }
452
453 r = device_monitor_send_device(monitor, NULL, dev);
454 if (r < 0)
455 log_device_warning_errno(dev, r,
456 "Failed to broadcast event to libudev listeners, ignoring: %m");
457 }
458
459 static int worker_send_result(Manager *manager, EventResult result) {
460 assert(manager);
461 assert(manager->worker_watch[WRITE_END] >= 0);
462
463 return loop_write(manager->worker_watch[WRITE_END], &result, sizeof(result), false);
464 }
465
466 static int device_get_whole_disk(sd_device *dev, sd_device **ret_device, const char **ret_devname) {
467 const char *val;
468 int r;
469
470 assert(dev);
471
472 if (device_for_action(dev, SD_DEVICE_REMOVE))
473 goto irrelevant;
474
475 r = sd_device_get_subsystem(dev, &val);
476 if (r < 0)
477 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
478
479 if (!streq(val, "block"))
480 goto irrelevant;
481
482 r = sd_device_get_sysname(dev, &val);
483 if (r < 0)
484 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
485
486 /* Exclude the following devices:
487 * For "dm-", see the comment added by e918a1b5a94f270186dca59156354acd2a596494.
488 * For "md", see the commit message of 2e5b17d01347d3c3118be2b8ad63d20415dbb1f0,
489 * but not sure the assumption is still valid even when partitions are created on the md
490 * devices, surprisingly which seems to be possible, see PR #22973.
491 * For "drbd", see the commit message of fee854ee8ccde0cd28e0f925dea18cce35f3993d. */
492 if (STARTSWITH_SET(val, "dm-", "md", "drbd"))
493 goto irrelevant;
494
495 r = sd_device_get_devtype(dev, &val);
496 if (r < 0 && r != -ENOENT)
497 return log_device_debug_errno(dev, r, "Failed to get devtype: %m");
498 if (r >= 0 && streq(val, "partition")) {
499 r = sd_device_get_parent(dev, &dev);
500 if (r == -ENOENT) /* The device may be already removed. */
501 goto irrelevant;
502 if (r < 0)
503 return log_device_debug_errno(dev, r, "Failed to get parent device: %m");
504 }
505
506 r = sd_device_get_devname(dev, &val);
507 if (r == -ENOENT)
508 goto irrelevant;
509 if (r < 0)
510 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
511
512 if (ret_device)
513 *ret_device = dev;
514 if (ret_devname)
515 *ret_devname = val;
516 return 1;
517
518 irrelevant:
519 if (ret_device)
520 *ret_device = NULL;
521 if (ret_devname)
522 *ret_devname = NULL;
523 return 0;
524 }
525
526 static int worker_lock_whole_disk(sd_device *dev, int *ret_fd) {
527 _cleanup_close_ int fd = -1;
528 sd_device *dev_whole_disk;
529 const char *val;
530 int r;
531
532 assert(dev);
533 assert(ret_fd);
534
535 /* Take a shared lock on the device node; this establishes a concept of device "ownership" to
536 * serialize device access. External processes holding an exclusive lock will cause udev to skip the
537 * event handling; in the case udev acquired the lock, the external process can block until udev has
538 * finished its event handling. */
539
540 r = device_get_whole_disk(dev, &dev_whole_disk, &val);
541 if (r < 0)
542 return r;
543 if (r == 0)
544 goto nolock;
545
546 fd = sd_device_open(dev_whole_disk, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
547 if (fd < 0) {
548 bool ignore = ERRNO_IS_DEVICE_ABSENT(fd);
549
550 log_device_debug_errno(dev, fd, "Failed to open '%s'%s: %m", val, ignore ? ", ignoring" : "");
551 if (!ignore)
552 return fd;
553
554 goto nolock;
555 }
556
557 if (flock(fd, LOCK_SH|LOCK_NB) < 0)
558 return log_device_debug_errno(dev, errno, "Failed to flock(%s): %m", val);
559
560 *ret_fd = TAKE_FD(fd);
561 return 1;
562
563 nolock:
564 *ret_fd = -1;
565 return 0;
566 }
567
568 static int worker_mark_block_device_read_only(sd_device *dev) {
569 _cleanup_close_ int fd = -1;
570 const char *val;
571 int state = 1, r;
572
573 assert(dev);
574
575 if (!arg_blockdev_read_only)
576 return 0;
577
578 /* Do this only once, when the block device is new. If the device is later retriggered let's not
579 * toggle the bit again, so that people can boot up with full read-only mode and then unset the bit
580 * for specific devices only. */
581 if (!device_for_action(dev, SD_DEVICE_ADD))
582 return 0;
583
584 r = sd_device_get_subsystem(dev, &val);
585 if (r < 0)
586 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
587
588 if (!streq(val, "block"))
589 return 0;
590
591 r = sd_device_get_sysname(dev, &val);
592 if (r < 0)
593 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
594
595 /* Exclude synthetic devices for now, this is supposed to be a safety feature to avoid modification
596 * of physical devices, and what sits on top of those doesn't really matter if we don't allow the
597 * underlying block devices to receive changes. */
598 if (STARTSWITH_SET(val, "dm-", "md", "drbd", "loop", "nbd", "zram"))
599 return 0;
600
601 fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
602 if (fd < 0)
603 return log_device_debug_errno(dev, fd, "Failed to open '%s', ignoring: %m", val);
604
605 if (ioctl(fd, BLKROSET, &state) < 0)
606 return log_device_warning_errno(dev, errno, "Failed to mark block device '%s' read-only: %m", val);
607
608 log_device_info(dev, "Successfully marked block device '%s' read-only.", val);
609 return 0;
610 }
611
612 static int worker_process_device(Manager *manager, sd_device *dev) {
613 _cleanup_(udev_event_freep) UdevEvent *udev_event = NULL;
614 _cleanup_close_ int fd_lock = -1;
615 int r;
616
617 assert(manager);
618 assert(dev);
619
620 log_device_uevent(dev, "Processing device");
621
622 udev_event = udev_event_new(dev, arg_exec_delay_usec, manager->rtnl, manager->log_level);
623 if (!udev_event)
624 return -ENOMEM;
625
626 /* If this is a block device and the device is locked currently via the BSD advisory locks,
627 * someone else is using it exclusively. We don't run our udev rules now to not interfere.
628 * Instead of processing the event, we requeue the event and will try again after a delay.
629 *
630 * The user-facing side of this: https://systemd.io/BLOCK_DEVICE_LOCKING */
631 r = worker_lock_whole_disk(dev, &fd_lock);
632 if (r == -EAGAIN)
633 return EVENT_RESULT_TRY_AGAIN;
634 if (r < 0)
635 return r;
636
637 (void) worker_mark_block_device_read_only(dev);
638
639 /* apply rules, create node, symlinks */
640 r = udev_event_execute_rules(
641 udev_event,
642 manager->inotify_fd,
643 arg_event_timeout_usec,
644 arg_timeout_signal,
645 manager->properties,
646 manager->rules);
647 if (r < 0)
648 return r;
649
650 udev_event_execute_run(udev_event, arg_event_timeout_usec, arg_timeout_signal);
651
652 if (!manager->rtnl)
653 /* in case rtnl was initialized */
654 manager->rtnl = sd_netlink_ref(udev_event->rtnl);
655
656 r = udev_event_process_inotify_watch(udev_event, manager->inotify_fd);
657 if (r < 0)
658 return r;
659
660 log_device_uevent(dev, "Device processed");
661 return 0;
662 }
663
664 static int worker_device_monitor_handler(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
665 Manager *manager = userdata;
666 int r;
667
668 assert(dev);
669 assert(manager);
670
671 r = worker_process_device(manager, dev);
672 if (r == EVENT_RESULT_TRY_AGAIN)
673 /* if we couldn't acquire the flock(), then requeue the event */
674 log_device_debug(dev, "Block device is currently locked, requeueing the event.");
675 else {
676 if (r < 0)
677 log_device_warning_errno(dev, r, "Failed to process device, ignoring: %m");
678
679 /* send processed event back to libudev listeners */
680 device_broadcast(monitor, dev, r);
681 }
682
683 /* send udevd the result of the event execution */
684 r = worker_send_result(manager, r);
685 if (r < 0)
686 log_device_warning_errno(dev, r, "Failed to send signal to main daemon, ignoring: %m");
687
688 /* Reset the log level, as it might be changed by "OPTIONS=log_level=". */
689 log_set_max_level(manager->log_level);
690
691 return 1;
692 }
693
694 static int worker_main(Manager *_manager, sd_device_monitor *monitor, sd_device *first_device) {
695 _cleanup_(sd_device_unrefp) sd_device *dev = first_device;
696 _cleanup_(manager_freep) Manager *manager = _manager;
697 int r;
698
699 assert(manager);
700 assert(monitor);
701 assert(dev);
702
703 assert_se(unsetenv("NOTIFY_SOCKET") == 0);
704
705 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, -1) >= 0);
706
707 /* Reset OOM score, we only protect the main daemon. */
708 r = set_oom_score_adjust(0);
709 if (r < 0)
710 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
711
712 /* Clear unnecessary data in Manager object. */
713 manager_clear_for_worker(manager);
714
715 r = sd_event_new(&manager->event);
716 if (r < 0)
717 return log_error_errno(r, "Failed to allocate event loop: %m");
718
719 r = sd_event_add_signal(manager->event, NULL, SIGTERM, NULL, NULL);
720 if (r < 0)
721 return log_error_errno(r, "Failed to set SIGTERM event: %m");
722
723 r = sd_device_monitor_attach_event(monitor, manager->event);
724 if (r < 0)
725 return log_error_errno(r, "Failed to attach event loop to device monitor: %m");
726
727 r = sd_device_monitor_start(monitor, worker_device_monitor_handler, manager);
728 if (r < 0)
729 return log_error_errno(r, "Failed to start device monitor: %m");
730
731 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(monitor), "worker-device-monitor");
732
733 /* Process first device */
734 (void) worker_device_monitor_handler(monitor, dev, manager);
735
736 r = sd_event_loop(manager->event);
737 if (r < 0)
738 return log_error_errno(r, "Event loop failed: %m");
739
740 return 0;
741 }
742
743 static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
744 Event *event = userdata;
745
746 assert(event);
747 assert(event->worker);
748
749 kill_and_sigcont(event->worker->pid, arg_timeout_signal);
750 event->worker->state = WORKER_KILLED;
751
752 log_device_error(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" killed", event->worker->pid, event->seqnum);
753
754 return 1;
755 }
756
757 static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
758 Event *event = userdata;
759
760 assert(event);
761 assert(event->worker);
762
763 log_device_warning(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" is taking a long time", event->worker->pid, event->seqnum);
764
765 return 1;
766 }
767
768 static void worker_attach_event(Worker *worker, Event *event) {
769 sd_event *e;
770
771 assert(worker);
772 assert(worker->manager);
773 assert(event);
774 assert(!event->worker);
775 assert(!worker->event);
776
777 worker->state = WORKER_RUNNING;
778 worker->event = event;
779 event->state = EVENT_RUNNING;
780 event->worker = worker;
781
782 e = worker->manager->event;
783
784 (void) sd_event_add_time_relative(e, &event->timeout_warning_event, CLOCK_MONOTONIC,
785 udev_warn_timeout(arg_event_timeout_usec), USEC_PER_SEC,
786 on_event_timeout_warning, event);
787
788 (void) sd_event_add_time_relative(e, &event->timeout_event, CLOCK_MONOTONIC,
789 arg_event_timeout_usec, USEC_PER_SEC,
790 on_event_timeout, event);
791 }
792
793 static int worker_spawn(Manager *manager, Event *event) {
794 _cleanup_(sd_device_monitor_unrefp) sd_device_monitor *worker_monitor = NULL;
795 Worker *worker;
796 pid_t pid;
797 int r;
798
799 /* listen for new events */
800 r = device_monitor_new_full(&worker_monitor, MONITOR_GROUP_NONE, -1);
801 if (r < 0)
802 return r;
803
804 (void) sd_device_monitor_set_description(worker_monitor, "worker");
805
806 /* allow the main daemon netlink address to send devices to the worker */
807 r = device_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
808 if (r < 0)
809 return log_error_errno(r, "Worker: Failed to set unicast sender: %m");
810
811 r = device_monitor_enable_receiving(worker_monitor);
812 if (r < 0)
813 return log_error_errno(r, "Worker: Failed to enable receiving of device: %m");
814
815 r = safe_fork(NULL, FORK_DEATHSIG, &pid);
816 if (r < 0) {
817 event->state = EVENT_QUEUED;
818 return log_error_errno(r, "Failed to fork() worker: %m");
819 }
820 if (r == 0) {
821 DEVICE_TRACE_POINT(worker_spawned, event->dev, getpid());
822
823 /* Worker process */
824 r = worker_main(manager, worker_monitor, sd_device_ref(event->dev));
825 log_close();
826 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
827 }
828
829 r = worker_new(&worker, manager, worker_monitor, pid);
830 if (r < 0)
831 return log_error_errno(r, "Failed to create worker object: %m");
832
833 worker_attach_event(worker, event);
834
835 log_device_debug(event->dev, "Worker ["PID_FMT"] is forked for processing SEQNUM=%"PRIu64".", pid, event->seqnum);
836 return 0;
837 }
838
839 static int event_run(Event *event) {
840 static bool log_children_max_reached = true;
841 Manager *manager;
842 Worker *worker;
843 int r;
844
845 assert(event);
846 assert(event->manager);
847
848 log_device_uevent(event->dev, "Device ready for processing");
849
850 (void) event_source_disable(event->retry_event_source);
851
852 manager = event->manager;
853 HASHMAP_FOREACH(worker, manager->workers) {
854 if (worker->state != WORKER_IDLE)
855 continue;
856
857 r = device_monitor_send_device(manager->monitor, worker->monitor, event->dev);
858 if (r < 0) {
859 log_device_error_errno(event->dev, r, "Worker ["PID_FMT"] did not accept message, killing the worker: %m",
860 worker->pid);
861 (void) kill(worker->pid, SIGKILL);
862 worker->state = WORKER_KILLED;
863 continue;
864 }
865 worker_attach_event(worker, event);
866 return 1; /* event is now processing. */
867 }
868
869 if (hashmap_size(manager->workers) >= arg_children_max) {
870 /* Avoid spamming the debug logs if the limit is already reached and
871 * many events still need to be processed */
872 if (log_children_max_reached && arg_children_max > 1) {
873 log_debug("Maximum number (%u) of children reached.", hashmap_size(manager->workers));
874 log_children_max_reached = false;
875 }
876 return 0; /* no free worker */
877 }
878
879 /* Re-enable the debug message for the next batch of events */
880 log_children_max_reached = true;
881
882 /* start new worker and pass initial device */
883 r = worker_spawn(manager, event);
884 if (r < 0)
885 return r;
886
887 return 1; /* event is now processing. */
888 }
889
890 static int event_is_blocked(Event *event) {
891 Event *loop_event = NULL;
892 int r;
893
894 /* lookup event for identical, parent, child device */
895
896 assert(event);
897 assert(event->manager);
898 assert(event->blocker_seqnum <= event->seqnum);
899
900 if (event->retry_again_next_usec > 0) {
901 usec_t now_usec;
902
903 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
904 if (r < 0)
905 return r;
906
907 if (event->retry_again_next_usec > now_usec)
908 return true;
909 }
910
911 if (event->blocker_seqnum == event->seqnum)
912 /* we have checked previously and no blocker found */
913 return false;
914
915 LIST_FOREACH(event, e, event->manager->events) {
916 loop_event = e;
917
918 /* we already found a later event, earlier cannot block us, no need to check again */
919 if (loop_event->seqnum < event->blocker_seqnum)
920 continue;
921
922 /* event we checked earlier still exists, no need to check again */
923 if (loop_event->seqnum == event->blocker_seqnum)
924 return true;
925
926 /* found ourself, no later event can block us */
927 if (loop_event->seqnum >= event->seqnum)
928 goto no_blocker;
929
930 /* found event we have not checked */
931 break;
932 }
933
934 assert(loop_event);
935 assert(loop_event->seqnum > event->blocker_seqnum &&
936 loop_event->seqnum < event->seqnum);
937
938 /* check if queue contains events we depend on */
939 LIST_FOREACH(event, e, loop_event) {
940 loop_event = e;
941
942 /* found ourself, no later event can block us */
943 if (loop_event->seqnum >= event->seqnum)
944 goto no_blocker;
945
946 if (streq_ptr(loop_event->id, event->id))
947 break;
948
949 if (devpath_conflict(event->devpath, loop_event->devpath) ||
950 devpath_conflict(event->devpath, loop_event->devpath_old) ||
951 devpath_conflict(event->devpath_old, loop_event->devpath))
952 break;
953
954 if (event->devnode && streq_ptr(event->devnode, loop_event->devnode))
955 break;
956 }
957
958 assert(loop_event);
959
960 log_device_debug(event->dev, "SEQNUM=%" PRIu64 " blocked by SEQNUM=%" PRIu64,
961 event->seqnum, loop_event->seqnum);
962
963 event->blocker_seqnum = loop_event->seqnum;
964 return true;
965
966 no_blocker:
967 event->blocker_seqnum = event->seqnum;
968 return false;
969 }
970
971 static int event_queue_start(Manager *manager) {
972 int r;
973
974 assert(manager);
975
976 if (!manager->events || manager->exit || manager->stop_exec_queue)
977 return 0;
978
979 r = event_source_disable(manager->kill_workers_event);
980 if (r < 0)
981 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
982
983 manager_reload(manager, /* force = */ false);
984
985 LIST_FOREACH(event, event, manager->events) {
986 if (event->state != EVENT_QUEUED)
987 continue;
988
989 /* do not start event if parent or child event is still running or queued */
990 r = event_is_blocked(event);
991 if (r > 0)
992 continue;
993 if (r < 0)
994 log_device_warning_errno(event->dev, r,
995 "Failed to check dependencies for event (SEQNUM=%"PRIu64", ACTION=%s), "
996 "assuming there is no blocking event, ignoring: %m",
997 event->seqnum,
998 strna(device_action_to_string(event->action)));
999
1000 r = event_run(event);
1001 if (r <= 0) /* 0 means there are no idle workers. Let's escape from the loop. */
1002 return r;
1003 }
1004
1005 return 0;
1006 }
1007
1008 static int on_event_retry(sd_event_source *s, uint64_t usec, void *userdata) {
1009 /* This does nothing. The on_post() callback will start the event if there exists an idle worker. */
1010 return 1;
1011 }
1012
1013 static int event_requeue(Event *event) {
1014 usec_t now_usec;
1015 int r;
1016
1017 assert(event);
1018 assert(event->manager);
1019 assert(event->manager->event);
1020
1021 event->timeout_warning_event = sd_event_source_disable_unref(event->timeout_warning_event);
1022 event->timeout_event = sd_event_source_disable_unref(event->timeout_event);
1023
1024 /* add a short delay to suppress busy loop */
1025 r = sd_event_now(event->manager->event, CLOCK_BOOTTIME, &now_usec);
1026 if (r < 0)
1027 return log_device_warning_errno(event->dev, r,
1028 "Failed to get current time, "
1029 "skipping event (SEQNUM=%"PRIu64", ACTION=%s): %m",
1030 event->seqnum, strna(device_action_to_string(event->action)));
1031
1032 if (event->retry_again_timeout_usec > 0 && event->retry_again_timeout_usec <= now_usec)
1033 return log_device_warning_errno(event->dev, SYNTHETIC_ERRNO(ETIMEDOUT),
1034 "The underlying block device is locked by a process more than %s, "
1035 "skipping event (SEQNUM=%"PRIu64", ACTION=%s).",
1036 FORMAT_TIMESPAN(EVENT_RETRY_TIMEOUT_USEC, USEC_PER_MINUTE),
1037 event->seqnum, strna(device_action_to_string(event->action)));
1038
1039 event->retry_again_next_usec = usec_add(now_usec, EVENT_RETRY_INTERVAL_USEC);
1040 if (event->retry_again_timeout_usec == 0)
1041 event->retry_again_timeout_usec = usec_add(now_usec, EVENT_RETRY_TIMEOUT_USEC);
1042
1043 r = event_reset_time_relative(event->manager->event, &event->retry_event_source,
1044 CLOCK_MONOTONIC, EVENT_RETRY_INTERVAL_USEC, 0,
1045 on_event_retry, NULL,
1046 0, "retry-event", true);
1047 if (r < 0)
1048 return log_device_warning_errno(event->dev, r, "Failed to reset timer event source for retrying event, "
1049 "skipping event (SEQNUM=%"PRIu64", ACTION=%s): %m",
1050 event->seqnum, strna(device_action_to_string(event->action)));
1051
1052 if (event->worker && event->worker->event == event)
1053 event->worker->event = NULL;
1054 event->worker = NULL;
1055
1056 event->state = EVENT_QUEUED;
1057 return 0;
1058 }
1059
1060 static int event_queue_assume_block_device_unlocked(Manager *manager, sd_device *dev) {
1061 const char *devname;
1062 int r;
1063
1064 /* When a new event for a block device is queued or we get an inotify event, assume that the
1065 * device is not locked anymore. The assumption may not be true, but that should not cause any
1066 * issues, as in that case events will be requeued soon. */
1067
1068 r = device_get_whole_disk(dev, NULL, &devname);
1069 if (r <= 0)
1070 return r;
1071
1072 LIST_FOREACH(event, event, manager->events) {
1073 const char *event_devname;
1074
1075 if (event->state != EVENT_QUEUED)
1076 continue;
1077
1078 if (event->retry_again_next_usec == 0)
1079 continue;
1080
1081 if (device_get_whole_disk(event->dev, NULL, &event_devname) <= 0)
1082 continue;
1083
1084 if (!streq(devname, event_devname))
1085 continue;
1086
1087 event->retry_again_next_usec = 0;
1088 }
1089
1090 return 0;
1091 }
1092
1093 static int event_queue_insert(Manager *manager, sd_device *dev) {
1094 const char *devpath, *devpath_old = NULL, *id = NULL, *devnode = NULL;
1095 sd_device_action_t action;
1096 uint64_t seqnum;
1097 Event *event;
1098 int r;
1099
1100 assert(manager);
1101 assert(dev);
1102
1103 /* only one process can add events to the queue */
1104 assert(manager->pid == getpid_cached());
1105
1106 /* We only accepts devices received by device monitor. */
1107 r = sd_device_get_seqnum(dev, &seqnum);
1108 if (r < 0)
1109 return r;
1110
1111 r = sd_device_get_action(dev, &action);
1112 if (r < 0)
1113 return r;
1114
1115 r = sd_device_get_devpath(dev, &devpath);
1116 if (r < 0)
1117 return r;
1118
1119 r = sd_device_get_property_value(dev, "DEVPATH_OLD", &devpath_old);
1120 if (r < 0 && r != -ENOENT)
1121 return r;
1122
1123 r = device_get_device_id(dev, &id);
1124 if (r < 0 && r != -ENOENT)
1125 return r;
1126
1127 r = sd_device_get_devname(dev, &devnode);
1128 if (r < 0 && r != -ENOENT)
1129 return r;
1130
1131 event = new(Event, 1);
1132 if (!event)
1133 return -ENOMEM;
1134
1135 *event = (Event) {
1136 .manager = manager,
1137 .dev = sd_device_ref(dev),
1138 .seqnum = seqnum,
1139 .action = action,
1140 .id = id,
1141 .devpath = devpath,
1142 .devpath_old = devpath_old,
1143 .devnode = devnode,
1144 .state = EVENT_QUEUED,
1145 };
1146
1147 if (!manager->events) {
1148 r = touch("/run/udev/queue");
1149 if (r < 0)
1150 log_warning_errno(r, "Failed to touch /run/udev/queue, ignoring: %m");
1151 }
1152
1153 LIST_APPEND(event, manager->events, event);
1154
1155 log_device_uevent(dev, "Device is queued");
1156
1157 return 0;
1158 }
1159
1160 static int on_uevent(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
1161 Manager *manager = userdata;
1162 int r;
1163
1164 assert(manager);
1165
1166 DEVICE_TRACE_POINT(kernel_uevent_received, dev);
1167
1168 device_ensure_usec_initialized(dev, NULL);
1169
1170 r = event_queue_insert(manager, dev);
1171 if (r < 0) {
1172 log_device_error_errno(dev, r, "Failed to insert device into event queue: %m");
1173 return 1;
1174 }
1175
1176 (void) event_queue_assume_block_device_unlocked(manager, dev);
1177
1178 return 1;
1179 }
1180
1181 static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1182 Manager *manager = userdata;
1183
1184 assert(manager);
1185
1186 for (;;) {
1187 EventResult result;
1188 struct iovec iovec = IOVEC_MAKE(&result, sizeof(result));
1189 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control;
1190 struct msghdr msghdr = {
1191 .msg_iov = &iovec,
1192 .msg_iovlen = 1,
1193 .msg_control = &control,
1194 .msg_controllen = sizeof(control),
1195 };
1196 ssize_t size;
1197 struct ucred *ucred;
1198 Worker *worker;
1199
1200 size = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT);
1201 if (size == -EINTR)
1202 continue;
1203 if (size == -EAGAIN)
1204 /* nothing more to read */
1205 break;
1206 if (size < 0)
1207 return log_error_errno(size, "Failed to receive message: %m");
1208
1209 cmsg_close_all(&msghdr);
1210
1211 if (size != sizeof(result)) {
1212 log_warning("Ignoring worker message with invalid size %zi bytes", size);
1213 continue;
1214 }
1215
1216 ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred);
1217 if (!ucred || ucred->pid <= 0) {
1218 log_warning("Ignoring worker message without valid PID");
1219 continue;
1220 }
1221
1222 /* lookup worker who sent the signal */
1223 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
1224 if (!worker) {
1225 log_debug("Worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
1226 continue;
1227 }
1228
1229 if (worker->state == WORKER_KILLING) {
1230 worker->state = WORKER_KILLED;
1231 (void) kill(worker->pid, SIGTERM);
1232 } else if (worker->state != WORKER_KILLED)
1233 worker->state = WORKER_IDLE;
1234
1235 /* worker returned */
1236 if (result == EVENT_RESULT_TRY_AGAIN &&
1237 event_requeue(worker->event) < 0)
1238 device_broadcast(manager->monitor, worker->event->dev, -ETIMEDOUT);
1239
1240 /* When event_requeue() succeeds, worker->event is NULL, and event_free() handles NULL gracefully. */
1241 event_free(worker->event);
1242 }
1243
1244 return 1;
1245 }
1246
1247 /* receive the udevd message from userspace */
1248 static int on_ctrl_msg(UdevCtrl *uctrl, UdevCtrlMessageType type, const UdevCtrlMessageValue *value, void *userdata) {
1249 Manager *manager = userdata;
1250 int r;
1251
1252 assert(value);
1253 assert(manager);
1254
1255 switch (type) {
1256 case UDEV_CTRL_SET_LOG_LEVEL:
1257 if ((value->intval & LOG_PRIMASK) != value->intval) {
1258 log_debug("Received invalid udev control message (SET_LOG_LEVEL, %i), ignoring.", value->intval);
1259 break;
1260 }
1261
1262 log_debug("Received udev control message (SET_LOG_LEVEL), setting log_level=%i", value->intval);
1263
1264 r = log_get_max_level();
1265 if (r == value->intval)
1266 break;
1267
1268 log_set_max_level(value->intval);
1269 manager->log_level = value->intval;
1270 manager_kill_workers(manager, false);
1271 break;
1272 case UDEV_CTRL_STOP_EXEC_QUEUE:
1273 log_debug("Received udev control message (STOP_EXEC_QUEUE)");
1274 manager->stop_exec_queue = true;
1275 break;
1276 case UDEV_CTRL_START_EXEC_QUEUE:
1277 log_debug("Received udev control message (START_EXEC_QUEUE)");
1278 manager->stop_exec_queue = false;
1279 /* It is not necessary to call event_queue_start() here, as it will be called in on_post() if necessary. */
1280 break;
1281 case UDEV_CTRL_RELOAD:
1282 log_debug("Received udev control message (RELOAD)");
1283 manager_reload(manager, /* force = */ true);
1284 break;
1285 case UDEV_CTRL_SET_ENV: {
1286 _unused_ _cleanup_free_ char *old_val = NULL;
1287 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL;
1288 const char *eq;
1289
1290 eq = strchr(value->buf, '=');
1291 if (!eq) {
1292 log_error("Invalid key format '%s'", value->buf);
1293 return 1;
1294 }
1295
1296 key = strndup(value->buf, eq - value->buf);
1297 if (!key) {
1298 log_oom();
1299 return 1;
1300 }
1301
1302 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1303
1304 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1305 if (r < 0) {
1306 log_oom();
1307 return 1;
1308 }
1309
1310 eq++;
1311 if (isempty(eq)) {
1312 log_debug("Received udev control message (ENV), unsetting '%s'", key);
1313
1314 r = hashmap_put(manager->properties, key, NULL);
1315 if (r < 0) {
1316 log_oom();
1317 return 1;
1318 }
1319 } else {
1320 val = strdup(eq);
1321 if (!val) {
1322 log_oom();
1323 return 1;
1324 }
1325
1326 log_debug("Received udev control message (ENV), setting '%s=%s'", key, val);
1327
1328 r = hashmap_put(manager->properties, key, val);
1329 if (r < 0) {
1330 log_oom();
1331 return 1;
1332 }
1333 }
1334
1335 key = val = NULL;
1336 manager_kill_workers(manager, false);
1337 break;
1338 }
1339 case UDEV_CTRL_SET_CHILDREN_MAX:
1340 if (value->intval <= 0) {
1341 log_debug("Received invalid udev control message (SET_MAX_CHILDREN, %i), ignoring.", value->intval);
1342 return 0;
1343 }
1344
1345 log_debug("Received udev control message (SET_MAX_CHILDREN), setting children_max=%i", value->intval);
1346 arg_children_max = value->intval;
1347
1348 notify_ready();
1349 break;
1350 case UDEV_CTRL_PING:
1351 log_debug("Received udev control message (PING)");
1352 break;
1353 case UDEV_CTRL_EXIT:
1354 log_debug("Received udev control message (EXIT)");
1355 manager_exit(manager);
1356 break;
1357 default:
1358 log_debug("Received unknown udev control message, ignoring");
1359 }
1360
1361 return 1;
1362 }
1363
1364 static int synthesize_change_one(sd_device *dev, sd_device *target) {
1365 int r;
1366
1367 if (DEBUG_LOGGING) {
1368 const char *syspath = NULL;
1369 (void) sd_device_get_syspath(target, &syspath);
1370 log_device_debug(dev, "device is closed, synthesising 'change' on %s", strna(syspath));
1371 }
1372
1373 r = sd_device_trigger(target, SD_DEVICE_CHANGE);
1374 if (r < 0)
1375 return log_device_debug_errno(target, r, "Failed to trigger 'change' uevent: %m");
1376
1377 DEVICE_TRACE_POINT(synthetic_change_event, dev);
1378
1379 return 0;
1380 }
1381
1382 static int synthesize_change(sd_device *dev) {
1383 const char *subsystem, *sysname, *devtype;
1384 int r;
1385
1386 r = sd_device_get_subsystem(dev, &subsystem);
1387 if (r < 0)
1388 return r;
1389
1390 r = sd_device_get_devtype(dev, &devtype);
1391 if (r < 0)
1392 return r;
1393
1394 r = sd_device_get_sysname(dev, &sysname);
1395 if (r < 0)
1396 return r;
1397
1398 if (streq_ptr(subsystem, "block") &&
1399 streq_ptr(devtype, "disk") &&
1400 !startswith(sysname, "dm-")) {
1401 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1402 bool part_table_read = false, has_partitions = false;
1403 sd_device *d;
1404 int fd;
1405
1406 /* Try to re-read the partition table. This only succeeds if none of the devices is
1407 * busy. The kernel returns 0 if no partition table is found, and we will not get an
1408 * event for the disk. */
1409 fd = sd_device_open(dev, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
1410 if (fd >= 0) {
1411 r = flock(fd, LOCK_EX|LOCK_NB);
1412 if (r >= 0)
1413 r = ioctl(fd, BLKRRPART, 0);
1414
1415 close(fd);
1416 if (r >= 0)
1417 part_table_read = true;
1418 }
1419
1420 /* search for partitions */
1421 r = sd_device_enumerator_new(&e);
1422 if (r < 0)
1423 return r;
1424
1425 r = sd_device_enumerator_allow_uninitialized(e);
1426 if (r < 0)
1427 return r;
1428
1429 r = sd_device_enumerator_add_match_parent(e, dev);
1430 if (r < 0)
1431 return r;
1432
1433 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1434 if (r < 0)
1435 return r;
1436
1437 FOREACH_DEVICE(e, d) {
1438 const char *t;
1439
1440 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1441 continue;
1442
1443 has_partitions = true;
1444 break;
1445 }
1446
1447 /* We have partitions and re-read the table, the kernel already sent out a "change"
1448 * event for the disk, and "remove/add" for all partitions. */
1449 if (part_table_read && has_partitions)
1450 return 0;
1451
1452 /* We have partitions but re-reading the partition table did not work, synthesize
1453 * "change" for the disk and all partitions. */
1454 (void) synthesize_change_one(dev, dev);
1455
1456 FOREACH_DEVICE(e, d) {
1457 const char *t;
1458
1459 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
1460 continue;
1461
1462 (void) synthesize_change_one(dev, d);
1463 }
1464
1465 } else
1466 (void) synthesize_change_one(dev, dev);
1467
1468 return 0;
1469 }
1470
1471 static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
1472 Manager *manager = userdata;
1473 union inotify_event_buffer buffer;
1474 ssize_t l;
1475 int r;
1476
1477 assert(manager);
1478
1479 l = read(fd, &buffer, sizeof(buffer));
1480 if (l < 0) {
1481 if (ERRNO_IS_TRANSIENT(errno))
1482 return 1;
1483
1484 return log_error_errno(errno, "Failed to read inotify fd: %m");
1485 }
1486
1487 FOREACH_INOTIFY_EVENT_WARN(e, buffer, l) {
1488 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1489 const char *devnode;
1490
1491 r = device_new_from_watch_handle(&dev, e->wd);
1492 if (r < 0) {
1493 log_debug_errno(r, "Failed to create sd_device object from watch handle, ignoring: %m");
1494 continue;
1495 }
1496
1497 if (sd_device_get_devname(dev, &devnode) < 0)
1498 continue;
1499
1500 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
1501 if (e->mask & IN_CLOSE_WRITE) {
1502 (void) event_queue_assume_block_device_unlocked(manager, dev);
1503 (void) synthesize_change(dev);
1504 }
1505
1506 /* Do not handle IN_IGNORED here. It should be handled by worker in 'remove' uevent;
1507 * udev_event_execute_rules() -> event_execute_rules_on_remove() -> udev_watch_end(). */
1508 }
1509
1510 return 1;
1511 }
1512
1513 static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1514 Manager *manager = userdata;
1515
1516 assert(manager);
1517
1518 manager_exit(manager);
1519
1520 return 1;
1521 }
1522
1523 static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
1524 Manager *manager = userdata;
1525
1526 assert(manager);
1527
1528 manager_reload(manager, /* force = */ true);
1529
1530 return 1;
1531 }
1532
1533 static int on_sigchld(sd_event_source *s, const siginfo_t *si, void *userdata) {
1534 Worker *worker = ASSERT_PTR(userdata);
1535 Manager *manager = ASSERT_PTR(worker->manager);
1536 sd_device *dev = worker->event ? ASSERT_PTR(worker->event->dev) : NULL;
1537 EventResult result;
1538
1539 assert(si);
1540
1541 switch (si->si_code) {
1542 case CLD_EXITED:
1543 if (si->si_status == 0)
1544 log_device_debug(dev, "Worker ["PID_FMT"] exited.", si->si_pid);
1545 else
1546 log_device_warning(dev, "Worker ["PID_FMT"] exited with return code %i.",
1547 si->si_pid, si->si_status);
1548 result = EVENT_RESULT_EXIT_STATUS_BASE + si->si_status;
1549 break;
1550
1551 case CLD_KILLED:
1552 case CLD_DUMPED:
1553 log_device_warning(dev, "Worker ["PID_FMT"] terminated by signal %i (%s).",
1554 si->si_pid, si->si_status, signal_to_string(si->si_status));
1555 result = EVENT_RESULT_SIGNAL_BASE + si->si_status;
1556 break;
1557
1558 default:
1559 assert_not_reached();
1560 }
1561
1562 if (result != EVENT_RESULT_SUCCESS && dev) {
1563 /* delete state from disk */
1564 device_delete_db(dev);
1565 device_tag_index(dev, NULL, false);
1566
1567 /* Forward kernel event to libudev listeners */
1568 device_broadcast(manager->monitor, dev, result);
1569 }
1570
1571 worker_free(worker);
1572
1573 return 1;
1574 }
1575
1576 static int on_post(sd_event_source *s, void *userdata) {
1577 Manager *manager = userdata;
1578
1579 assert(manager);
1580
1581 if (manager->events) {
1582 /* Try to process pending events if idle workers exist. Why is this necessary?
1583 * When a worker finished an event and became idle, even if there was a pending event,
1584 * the corresponding device might have been locked and the processing of the event
1585 * delayed for a while, preventing the worker from processing the event immediately.
1586 * Now, the device may be unlocked. Let's try again! */
1587 event_queue_start(manager);
1588 return 1;
1589 }
1590
1591 /* There are no queued events. Let's remove /run/udev/queue and clean up the idle processes. */
1592
1593 if (unlink("/run/udev/queue") < 0) {
1594 if (errno != ENOENT)
1595 log_warning_errno(errno, "Failed to unlink /run/udev/queue, ignoring: %m");
1596 } else
1597 log_debug("No events are queued, removing /run/udev/queue.");
1598
1599 if (!hashmap_isempty(manager->workers)) {
1600 /* There are idle workers */
1601 (void) event_reset_time_relative(manager->event, &manager->kill_workers_event,
1602 CLOCK_MONOTONIC, 3 * USEC_PER_SEC, USEC_PER_SEC,
1603 on_kill_workers_event, manager,
1604 0, "kill-workers-event", false);
1605 return 1;
1606 }
1607
1608 /* There are no idle workers. */
1609
1610 if (manager->exit)
1611 return sd_event_exit(manager->event, 0);
1612
1613 if (manager->cgroup)
1614 /* cleanup possible left-over processes in our cgroup */
1615 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1616
1617 return 1;
1618 }
1619
1620 static int listen_fds(int *ret_ctrl, int *ret_netlink) {
1621 int ctrl_fd = -1, netlink_fd = -1;
1622 int fd, n;
1623
1624 assert(ret_ctrl);
1625 assert(ret_netlink);
1626
1627 n = sd_listen_fds(true);
1628 if (n < 0)
1629 return n;
1630
1631 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1632 if (sd_is_socket(fd, AF_UNIX, SOCK_SEQPACKET, -1) > 0) {
1633 if (ctrl_fd >= 0)
1634 return -EINVAL;
1635 ctrl_fd = fd;
1636 continue;
1637 }
1638
1639 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
1640 if (netlink_fd >= 0)
1641 return -EINVAL;
1642 netlink_fd = fd;
1643 continue;
1644 }
1645
1646 return -EINVAL;
1647 }
1648
1649 *ret_ctrl = ctrl_fd;
1650 *ret_netlink = netlink_fd;
1651
1652 return 0;
1653 }
1654
1655 /*
1656 * read the kernel command line, in case we need to get into debug mode
1657 * udev.log_level=<level> syslog priority
1658 * udev.children_max=<number of workers> events are fully serialized if set to 1
1659 * udev.exec_delay=<number of seconds> delay execution of every executed program
1660 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1661 * udev.blockdev_read_only<=bool> mark all block devices read-only when they appear
1662 */
1663 static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
1664 int r;
1665
1666 assert(key);
1667
1668 if (proc_cmdline_key_streq(key, "udev.log_level") ||
1669 proc_cmdline_key_streq(key, "udev.log_priority")) { /* kept for backward compatibility */
1670
1671 if (proc_cmdline_value_missing(key, value))
1672 return 0;
1673
1674 r = log_level_from_string(value);
1675 if (r >= 0)
1676 log_set_max_level(r);
1677
1678 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1679
1680 if (proc_cmdline_value_missing(key, value))
1681 return 0;
1682
1683 r = parse_sec(value, &arg_event_timeout_usec);
1684
1685 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1686
1687 if (proc_cmdline_value_missing(key, value))
1688 return 0;
1689
1690 r = safe_atou(value, &arg_children_max);
1691
1692 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1693
1694 if (proc_cmdline_value_missing(key, value))
1695 return 0;
1696
1697 r = parse_sec(value, &arg_exec_delay_usec);
1698
1699 } else if (proc_cmdline_key_streq(key, "udev.timeout_signal")) {
1700
1701 if (proc_cmdline_value_missing(key, value))
1702 return 0;
1703
1704 r = signal_from_string(value);
1705 if (r > 0)
1706 arg_timeout_signal = r;
1707
1708 } else if (proc_cmdline_key_streq(key, "udev.blockdev_read_only")) {
1709
1710 if (!value)
1711 arg_blockdev_read_only = true;
1712 else {
1713 r = parse_boolean(value);
1714 if (r < 0)
1715 log_warning_errno(r, "Failed to parse udev.blockdev-read-only argument, ignoring: %s", value);
1716 else
1717 arg_blockdev_read_only = r;
1718 }
1719
1720 if (arg_blockdev_read_only)
1721 log_notice("All physical block devices will be marked read-only.");
1722
1723 return 0;
1724
1725 } else {
1726 if (startswith(key, "udev."))
1727 log_warning("Unknown udev kernel command line option \"%s\", ignoring.", key);
1728
1729 return 0;
1730 }
1731
1732 if (r < 0)
1733 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1734
1735 return 0;
1736 }
1737
1738 static int help(void) {
1739 _cleanup_free_ char *link = NULL;
1740 int r;
1741
1742 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1743 if (r < 0)
1744 return log_oom();
1745
1746 printf("%s [OPTIONS...]\n\n"
1747 "Rule-based manager for device events and files.\n\n"
1748 " -h --help Print this message\n"
1749 " -V --version Print version of the program\n"
1750 " -d --daemon Detach and run in the background\n"
1751 " -D --debug Enable debug output\n"
1752 " -c --children-max=INT Set maximum number of workers\n"
1753 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1754 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1755 " -N --resolve-names=early|late|never\n"
1756 " When to resolve users and groups\n"
1757 "\nSee the %s for details.\n",
1758 program_invocation_short_name,
1759 link);
1760
1761 return 0;
1762 }
1763
1764 static int parse_argv(int argc, char *argv[]) {
1765 enum {
1766 ARG_TIMEOUT_SIGNAL,
1767 };
1768
1769 static const struct option options[] = {
1770 { "daemon", no_argument, NULL, 'd' },
1771 { "debug", no_argument, NULL, 'D' },
1772 { "children-max", required_argument, NULL, 'c' },
1773 { "exec-delay", required_argument, NULL, 'e' },
1774 { "event-timeout", required_argument, NULL, 't' },
1775 { "resolve-names", required_argument, NULL, 'N' },
1776 { "help", no_argument, NULL, 'h' },
1777 { "version", no_argument, NULL, 'V' },
1778 { "timeout-signal", required_argument, NULL, ARG_TIMEOUT_SIGNAL },
1779 {}
1780 };
1781
1782 int c, r;
1783
1784 assert(argc >= 0);
1785 assert(argv);
1786
1787 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
1788 switch (c) {
1789
1790 case 'd':
1791 arg_daemonize = true;
1792 break;
1793 case 'c':
1794 r = safe_atou(optarg, &arg_children_max);
1795 if (r < 0)
1796 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
1797 break;
1798 case 'e':
1799 r = parse_sec(optarg, &arg_exec_delay_usec);
1800 if (r < 0)
1801 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
1802 break;
1803 case ARG_TIMEOUT_SIGNAL:
1804 r = signal_from_string(optarg);
1805 if (r <= 0)
1806 log_warning_errno(r, "Failed to parse --timeout-signal= value '%s', ignoring: %m", optarg);
1807 else
1808 arg_timeout_signal = r;
1809
1810 break;
1811 case 't':
1812 r = parse_sec(optarg, &arg_event_timeout_usec);
1813 if (r < 0)
1814 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
1815 break;
1816 case 'D':
1817 arg_debug = true;
1818 break;
1819 case 'N': {
1820 ResolveNameTiming t;
1821
1822 t = resolve_name_timing_from_string(optarg);
1823 if (t < 0)
1824 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1825 else
1826 arg_resolve_name_timing = t;
1827 break;
1828 }
1829 case 'h':
1830 return help();
1831 case 'V':
1832 printf("%s\n", GIT_VERSION);
1833 return 0;
1834 case '?':
1835 return -EINVAL;
1836 default:
1837 assert_not_reached();
1838
1839 }
1840 }
1841
1842 return 1;
1843 }
1844
1845 static int create_subcgroup(char **ret) {
1846 _cleanup_free_ char *cgroup = NULL, *subcgroup = NULL;
1847 int r;
1848
1849 if (getppid() != 1)
1850 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "Not invoked by PID1.");
1851
1852 r = sd_booted();
1853 if (r < 0)
1854 return log_debug_errno(r, "Failed to check if systemd is running: %m");
1855 if (r == 0)
1856 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "systemd is not running.");
1857
1858 /* Get our own cgroup, we regularly kill everything udev has left behind.
1859 * We only do this on systemd systems, and only if we are directly spawned
1860 * by PID1. Otherwise we are not guaranteed to have a dedicated cgroup. */
1861
1862 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
1863 if (r < 0) {
1864 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
1865 return log_debug_errno(r, "Dedicated cgroup not found: %m");
1866 return log_debug_errno(r, "Failed to get cgroup: %m");
1867 }
1868
1869 r = cg_get_xattr_bool(SYSTEMD_CGROUP_CONTROLLER, cgroup, "trusted.delegate");
1870 if (IN_SET(r, 0, -ENODATA))
1871 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "The cgroup %s is not delegated to us.", cgroup);
1872 if (r < 0)
1873 return log_debug_errno(r, "Failed to read trusted.delegate attribute: %m");
1874
1875 /* We are invoked with our own delegated cgroup tree, let's move us one level down, so that we
1876 * don't collide with the "no processes in inner nodes" rule of cgroups, when the service
1877 * manager invokes the ExecReload= job in the .control/ subcgroup. */
1878
1879 subcgroup = path_join(cgroup, "/udev");
1880 if (!subcgroup)
1881 return log_oom_debug();
1882
1883 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, subcgroup, 0);
1884 if (r < 0)
1885 return log_debug_errno(r, "Failed to create %s subcgroup: %m", subcgroup);
1886
1887 log_debug("Created %s subcgroup.", subcgroup);
1888 if (ret)
1889 *ret = TAKE_PTR(subcgroup);
1890 return 0;
1891 }
1892
1893 static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent) {
1894 _cleanup_(manager_freep) Manager *manager = NULL;
1895 _cleanup_free_ char *cgroup = NULL;
1896 int r;
1897
1898 assert(ret);
1899
1900 (void) create_subcgroup(&cgroup);
1901
1902 manager = new(Manager, 1);
1903 if (!manager)
1904 return log_oom();
1905
1906 *manager = (Manager) {
1907 .inotify_fd = -1,
1908 .worker_watch = { -1, -1 },
1909 .cgroup = TAKE_PTR(cgroup),
1910 };
1911
1912 r = udev_ctrl_new_from_fd(&manager->ctrl, fd_ctrl);
1913 if (r < 0)
1914 return log_error_errno(r, "Failed to initialize udev control socket: %m");
1915
1916 r = udev_ctrl_enable_receiving(manager->ctrl);
1917 if (r < 0)
1918 return log_error_errno(r, "Failed to bind udev control socket: %m");
1919
1920 r = device_monitor_new_full(&manager->monitor, MONITOR_GROUP_KERNEL, fd_uevent);
1921 if (r < 0)
1922 return log_error_errno(r, "Failed to initialize device monitor: %m");
1923
1924 /* Bump receiver buffer, but only if we are not called via socket activation, as in that
1925 * case systemd sets the receive buffer size for us, and the value in the .socket unit
1926 * should take full effect. */
1927 if (fd_uevent < 0) {
1928 r = sd_device_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
1929 if (r < 0)
1930 log_warning_errno(r, "Failed to set receive buffer size for device monitor, ignoring: %m");
1931 }
1932
1933 (void) sd_device_monitor_set_description(manager->monitor, "manager");
1934
1935 r = device_monitor_enable_receiving(manager->monitor);
1936 if (r < 0)
1937 return log_error_errno(r, "Failed to bind netlink socket: %m");
1938
1939 manager->log_level = log_get_max_level();
1940
1941 *ret = TAKE_PTR(manager);
1942
1943 return 0;
1944 }
1945
1946 static int main_loop(Manager *manager) {
1947 int fd_worker, r;
1948
1949 manager->pid = getpid_cached();
1950
1951 /* unnamed socket from workers to the main daemon */
1952 r = socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1953 if (r < 0)
1954 return log_error_errno(errno, "Failed to create socketpair for communicating with workers: %m");
1955
1956 fd_worker = manager->worker_watch[READ_END];
1957
1958 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
1959 if (r < 0)
1960 return log_error_errno(r, "Failed to enable SO_PASSCRED: %m");
1961
1962 manager->inotify_fd = inotify_init1(IN_CLOEXEC);
1963 if (manager->inotify_fd < 0)
1964 return log_error_errno(errno, "Failed to create inotify descriptor: %m");
1965
1966 udev_watch_restore(manager->inotify_fd);
1967
1968 /* block and listen to all signals on signalfd */
1969 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
1970
1971 r = sd_event_default(&manager->event);
1972 if (r < 0)
1973 return log_error_errno(r, "Failed to allocate event loop: %m");
1974
1975 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1976 if (r < 0)
1977 return log_error_errno(r, "Failed to create SIGINT event source: %m");
1978
1979 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1980 if (r < 0)
1981 return log_error_errno(r, "Failed to create SIGTERM event source: %m");
1982
1983 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1984 if (r < 0)
1985 return log_error_errno(r, "Failed to create SIGHUP event source: %m");
1986
1987 r = sd_event_set_watchdog(manager->event, true);
1988 if (r < 0)
1989 return log_error_errno(r, "Failed to create watchdog event source: %m");
1990
1991 r = udev_ctrl_attach_event(manager->ctrl, manager->event);
1992 if (r < 0)
1993 return log_error_errno(r, "Failed to attach event to udev control: %m");
1994
1995 r = udev_ctrl_start(manager->ctrl, on_ctrl_msg, manager);
1996 if (r < 0)
1997 return log_error_errno(r, "Failed to start device monitor: %m");
1998
1999 /* This needs to be after the inotify and uevent handling, to make sure
2000 * that the ping is send back after fully processing the pending uevents
2001 * (including the synthetic ones we may create due to inotify events).
2002 */
2003 r = sd_event_source_set_priority(udev_ctrl_get_event_source(manager->ctrl), SD_EVENT_PRIORITY_IDLE);
2004 if (r < 0)
2005 return log_error_errno(r, "Failed to set IDLE event priority for udev control event source: %m");
2006
2007 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->inotify_fd, EPOLLIN, on_inotify, manager);
2008 if (r < 0)
2009 return log_error_errno(r, "Failed to create inotify event source: %m");
2010
2011 r = sd_device_monitor_attach_event(manager->monitor, manager->event);
2012 if (r < 0)
2013 return log_error_errno(r, "Failed to attach event to device monitor: %m");
2014
2015 r = sd_device_monitor_start(manager->monitor, on_uevent, manager);
2016 if (r < 0)
2017 return log_error_errno(r, "Failed to start device monitor: %m");
2018
2019 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(manager->monitor), "device-monitor");
2020
2021 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
2022 if (r < 0)
2023 return log_error_errno(r, "Failed to create worker event source: %m");
2024
2025 r = sd_event_add_post(manager->event, NULL, on_post, manager);
2026 if (r < 0)
2027 return log_error_errno(r, "Failed to create post event source: %m");
2028
2029 manager->last_usec = now(CLOCK_MONOTONIC);
2030
2031 udev_builtin_init();
2032
2033 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
2034 if (r < 0)
2035 return log_error_errno(r, "Failed to read udev rules: %m");
2036
2037 r = udev_rules_apply_static_dev_perms(manager->rules);
2038 if (r < 0)
2039 log_warning_errno(r, "Failed to apply permissions on static device nodes, ignoring: %m");
2040
2041 notify_ready();
2042
2043 r = sd_event_loop(manager->event);
2044 if (r < 0)
2045 log_error_errno(r, "Event loop failed: %m");
2046
2047 sd_notify(false,
2048 "STOPPING=1\n"
2049 "STATUS=Shutting down...");
2050 return r;
2051 }
2052
2053 int run_udevd(int argc, char *argv[]) {
2054 _cleanup_(manager_freep) Manager *manager = NULL;
2055 int fd_ctrl = -1, fd_uevent = -1;
2056 int r;
2057
2058 log_set_target(LOG_TARGET_AUTO);
2059 log_open();
2060 udev_parse_config_full(&arg_children_max, &arg_exec_delay_usec, &arg_event_timeout_usec, &arg_resolve_name_timing, &arg_timeout_signal);
2061 log_parse_environment();
2062 log_open(); /* Done again to update after reading configuration. */
2063
2064 r = parse_argv(argc, argv);
2065 if (r <= 0)
2066 return r;
2067
2068 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
2069 if (r < 0)
2070 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
2071
2072 if (arg_debug) {
2073 log_set_target(LOG_TARGET_CONSOLE);
2074 log_set_max_level(LOG_DEBUG);
2075 }
2076
2077 r = must_be_root();
2078 if (r < 0)
2079 return r;
2080
2081 if (arg_children_max == 0) {
2082 unsigned long cpu_limit, mem_limit, cpu_count = 1;
2083
2084 r = cpus_in_affinity_mask();
2085 if (r < 0)
2086 log_warning_errno(r, "Failed to determine number of local CPUs, ignoring: %m");
2087 else
2088 cpu_count = r;
2089
2090 cpu_limit = cpu_count * 2 + 16;
2091 mem_limit = MAX(physical_memory() / (128UL*1024*1024), 10U);
2092
2093 arg_children_max = MIN(cpu_limit, mem_limit);
2094 arg_children_max = MIN(WORKER_NUM_MAX, arg_children_max);
2095
2096 log_debug("Set children_max to %u", arg_children_max);
2097 }
2098
2099 /* set umask before creating any file/directory */
2100 umask(022);
2101
2102 r = mac_selinux_init();
2103 if (r < 0)
2104 return r;
2105
2106 r = RET_NERRNO(mkdir("/run/udev", 0755));
2107 if (r < 0 && r != -EEXIST)
2108 return log_error_errno(r, "Failed to create /run/udev: %m");
2109
2110 r = listen_fds(&fd_ctrl, &fd_uevent);
2111 if (r < 0)
2112 return log_error_errno(r, "Failed to listen on fds: %m");
2113
2114 r = manager_new(&manager, fd_ctrl, fd_uevent);
2115 if (r < 0)
2116 return log_error_errno(r, "Failed to create manager: %m");
2117
2118 if (arg_daemonize) {
2119 pid_t pid;
2120
2121 log_info("Starting systemd-udevd version " GIT_VERSION);
2122
2123 /* connect /dev/null to stdin, stdout, stderr */
2124 if (log_get_max_level() < LOG_DEBUG) {
2125 r = make_null_stdio();
2126 if (r < 0)
2127 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
2128 }
2129
2130 pid = fork();
2131 if (pid < 0)
2132 return log_error_errno(errno, "Failed to fork daemon: %m");
2133 if (pid > 0)
2134 /* parent */
2135 return 0;
2136
2137 /* child */
2138 (void) setsid();
2139 }
2140
2141 return main_loop(manager);
2142 }