1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
15 #include <sys/epoll.h>
17 #include <sys/inotify.h>
18 #include <sys/ioctl.h>
19 #include <sys/mount.h>
20 #include <sys/prctl.h>
21 #include <sys/signalfd.h>
27 #include "sd-daemon.h"
30 #include "alloc-util.h"
31 #include "cgroup-setup.h"
32 #include "cgroup-util.h"
33 #include "cpu-set-util.h"
34 #include "dev-setup.h"
35 #include "device-monitor-private.h"
36 #include "device-private.h"
37 #include "device-util.h"
38 #include "event-util.h"
41 #include "format-util.h"
44 #include "inotify-util.h"
46 #include "limits-util.h"
48 #include "main-func.h"
50 #include "netlink-util.h"
51 #include "parse-util.h"
52 #include "path-util.h"
53 #include "pretty-print.h"
54 #include "proc-cmdline.h"
55 #include "process-util.h"
56 #include "selinux-util.h"
57 #include "signal-util.h"
58 #include "socket-util.h"
59 #include "string-util.h"
62 #include "syslog-util.h"
64 #include "udev-builtin.h"
65 #include "udev-ctrl.h"
66 #include "udev-event.h"
67 #include "udev-util.h"
68 #include "udev-watch.h"
69 #include "user-util.h"
72 #define WORKER_NUM_MAX 2048U
73 #define EVENT_RETRY_INTERVAL_USEC (200 * USEC_PER_MSEC)
74 #define EVENT_RETRY_TIMEOUT_USEC (3 * USEC_PER_MINUTE)
76 static bool arg_debug
= false;
77 static int arg_daemonize
= false;
78 static ResolveNameTiming arg_resolve_name_timing
= RESOLVE_NAME_EARLY
;
79 static unsigned arg_children_max
= 0;
80 static usec_t arg_exec_delay_usec
= 0;
81 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
82 static int arg_timeout_signal
= SIGKILL
;
83 static bool arg_blockdev_read_only
= false;
85 typedef struct Event Event
;
86 typedef struct Worker Worker
;
88 typedef struct Manager
{
91 LIST_HEAD(Event
, events
);
93 pid_t pid
; /* the process that originally allocated the manager object */
101 sd_device_monitor
*monitor
;
105 /* used by udev-watch */
107 sd_event_source
*inotify_event
;
109 sd_event_source
*kill_workers_event
;
113 bool stop_exec_queue
;
117 typedef enum EventState
{
123 typedef struct Event
{
130 sd_device_action_t action
;
132 uint64_t blocker_seqnum
;
133 usec_t retry_again_next_usec
;
134 usec_t retry_again_timeout_usec
;
136 sd_event_source
*timeout_warning_event
;
137 sd_event_source
*timeout_event
;
139 LIST_FIELDS(Event
, event
);
142 typedef enum WorkerState
{
150 typedef struct Worker
{
153 sd_device_monitor
*monitor
;
158 /* passed from worker to main process */
159 typedef enum EventResult
{
160 EVENT_RESULT_SUCCESS
,
162 EVENT_RESULT_TRY_AGAIN
, /* when the block device is locked by another process. */
164 _EVENT_RESULT_INVALID
= -EINVAL
,
167 static Event
*event_free(Event
*event
) {
171 assert(event
->manager
);
173 LIST_REMOVE(event
, event
->manager
->events
, event
);
174 sd_device_unref(event
->dev
);
176 sd_event_source_disable_unref(event
->timeout_warning_event
);
177 sd_event_source_disable_unref(event
->timeout_event
);
180 event
->worker
->event
= NULL
;
185 static void event_queue_cleanup(Manager
*manager
, EventState match_state
) {
186 LIST_FOREACH(event
, event
, manager
->events
) {
187 if (match_state
!= EVENT_UNDEF
&& match_state
!= event
->state
)
194 static Worker
*worker_free(Worker
*worker
) {
198 assert(worker
->manager
);
200 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
201 sd_device_monitor_unref(worker
->monitor
);
202 event_free(worker
->event
);
204 return mfree(worker
);
207 DEFINE_TRIVIAL_CLEANUP_FUNC(Worker
*, worker_free
);
208 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(worker_hash_op
, void, trivial_hash_func
, trivial_compare_func
, Worker
, worker_free
);
210 static void manager_clear_for_worker(Manager
*manager
) {
213 manager
->inotify_event
= sd_event_source_disable_unref(manager
->inotify_event
);
214 manager
->kill_workers_event
= sd_event_source_disable_unref(manager
->kill_workers_event
);
216 manager
->event
= sd_event_unref(manager
->event
);
218 manager
->workers
= hashmap_free(manager
->workers
);
219 event_queue_cleanup(manager
, EVENT_UNDEF
);
221 manager
->monitor
= sd_device_monitor_unref(manager
->monitor
);
222 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
224 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
227 static Manager
* manager_free(Manager
*manager
) {
233 manager_clear_for_worker(manager
);
235 sd_netlink_unref(manager
->rtnl
);
237 hashmap_free_free_free(manager
->properties
);
238 udev_rules_free(manager
->rules
);
240 safe_close(manager
->inotify_fd
);
241 safe_close_pair(manager
->worker_watch
);
243 free(manager
->cgroup
);
244 return mfree(manager
);
247 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
249 static int worker_new(Worker
**ret
, Manager
*manager
, sd_device_monitor
*worker_monitor
, pid_t pid
) {
250 _cleanup_(worker_freep
) Worker
*worker
= NULL
;
255 assert(worker_monitor
);
258 /* close monitor, but keep address around */
259 device_monitor_disconnect(worker_monitor
);
261 worker
= new(Worker
, 1);
267 .monitor
= sd_device_monitor_ref(worker_monitor
),
271 r
= hashmap_ensure_put(&manager
->workers
, &worker_hash_op
, PID_TO_PTR(pid
), worker
);
275 *ret
= TAKE_PTR(worker
);
280 static void manager_kill_workers(Manager
*manager
, bool force
) {
285 HASHMAP_FOREACH(worker
, manager
->workers
) {
286 if (worker
->state
== WORKER_KILLED
)
289 if (worker
->state
== WORKER_RUNNING
&& !force
) {
290 worker
->state
= WORKER_KILLING
;
294 worker
->state
= WORKER_KILLED
;
295 (void) kill(worker
->pid
, SIGTERM
);
299 static void manager_exit(Manager
*manager
) {
302 manager
->exit
= true;
306 "STATUS=Starting shutdown...");
308 /* close sources of new events and discard buffered events */
309 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
311 manager
->inotify_event
= sd_event_source_disable_unref(manager
->inotify_event
);
312 manager
->inotify_fd
= safe_close(manager
->inotify_fd
);
314 manager
->monitor
= sd_device_monitor_unref(manager
->monitor
);
316 /* discard queued events and kill workers */
317 event_queue_cleanup(manager
, EVENT_QUEUED
);
318 manager_kill_workers(manager
, true);
321 static void notify_ready(void) {
324 r
= sd_notifyf(false,
326 "STATUS=Processing with %u children at max", arg_children_max
);
328 log_warning_errno(r
, "Failed to send readiness notification, ignoring: %m");
331 /* reload requested, HUP signal received, rules changed, builtin changed */
332 static void manager_reload(Manager
*manager
) {
337 "STATUS=Flushing configuration...");
339 manager_kill_workers(manager
, false);
340 manager
->rules
= udev_rules_free(manager
->rules
);
346 static int on_kill_workers_event(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
347 Manager
*manager
= userdata
;
351 log_debug("Cleanup idle workers");
352 manager_kill_workers(manager
, false);
357 static void device_broadcast(sd_device_monitor
*monitor
, sd_device
*dev
) {
362 /* On exit, manager->monitor is already NULL. */
366 r
= device_monitor_send_device(monitor
, NULL
, dev
);
368 log_device_warning_errno(dev
, r
,
369 "Failed to broadcast event to libudev listeners, ignoring: %m");
372 static int worker_send_result(Manager
*manager
, EventResult result
) {
374 assert(manager
->worker_watch
[WRITE_END
] >= 0);
376 return loop_write(manager
->worker_watch
[WRITE_END
], &result
, sizeof(result
), false);
379 static int device_get_block_device(sd_device
*dev
, const char **ret
) {
386 if (device_for_action(dev
, SD_DEVICE_REMOVE
))
389 r
= sd_device_get_subsystem(dev
, &val
);
391 return log_device_debug_errno(dev
, r
, "Failed to get subsystem: %m");
393 if (!streq(val
, "block"))
396 r
= sd_device_get_sysname(dev
, &val
);
398 return log_device_debug_errno(dev
, r
, "Failed to get sysname: %m");
400 if (STARTSWITH_SET(val
, "dm-", "md", "drbd"))
403 r
= sd_device_get_devtype(dev
, &val
);
404 if (r
< 0 && r
!= -ENOENT
)
405 return log_device_debug_errno(dev
, r
, "Failed to get devtype: %m");
406 if (r
>= 0 && streq(val
, "partition")) {
407 r
= sd_device_get_parent(dev
, &dev
);
409 return log_device_debug_errno(dev
, r
, "Failed to get parent device: %m");
412 r
= sd_device_get_devname(dev
, &val
);
416 return log_device_debug_errno(dev
, r
, "Failed to get devname: %m");
426 static int worker_lock_block_device(sd_device
*dev
, int *ret_fd
) {
427 _cleanup_close_
int fd
= -1;
434 /* Take a shared lock on the device node; this establishes a concept of device "ownership" to
435 * serialize device access. External processes holding an exclusive lock will cause udev to skip the
436 * event handling; in the case udev acquired the lock, the external process can block until udev has
437 * finished its event handling. */
439 r
= device_get_block_device(dev
, &val
);
445 fd
= open(val
, O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
447 bool ignore
= ERRNO_IS_DEVICE_ABSENT(errno
);
449 log_device_debug_errno(dev
, errno
, "Failed to open '%s'%s: %m", val
, ignore
? ", ignoring" : "");
456 if (flock(fd
, LOCK_SH
|LOCK_NB
) < 0)
457 return log_device_debug_errno(dev
, errno
, "Failed to flock(%s): %m", val
);
459 *ret_fd
= TAKE_FD(fd
);
467 static int worker_mark_block_device_read_only(sd_device
*dev
) {
468 _cleanup_close_
int fd
= -1;
474 if (!arg_blockdev_read_only
)
477 /* Do this only once, when the block device is new. If the device is later retriggered let's not
478 * toggle the bit again, so that people can boot up with full read-only mode and then unset the bit
479 * for specific devices only. */
480 if (!device_for_action(dev
, SD_DEVICE_ADD
))
483 r
= sd_device_get_subsystem(dev
, &val
);
485 return log_device_debug_errno(dev
, r
, "Failed to get subsystem: %m");
487 if (!streq(val
, "block"))
490 r
= sd_device_get_sysname(dev
, &val
);
492 return log_device_debug_errno(dev
, r
, "Failed to get sysname: %m");
494 /* Exclude synthetic devices for now, this is supposed to be a safety feature to avoid modification
495 * of physical devices, and what sits on top of those doesn't really matter if we don't allow the
496 * underlying block devices to receive changes. */
497 if (STARTSWITH_SET(val
, "dm-", "md", "drbd", "loop", "nbd", "zram"))
500 r
= sd_device_get_devname(dev
, &val
);
504 return log_device_debug_errno(dev
, r
, "Failed to get devname: %m");
506 fd
= open(val
, O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
508 return log_device_debug_errno(dev
, errno
, "Failed to open '%s', ignoring: %m", val
);
510 if (ioctl(fd
, BLKROSET
, &state
) < 0)
511 return log_device_warning_errno(dev
, errno
, "Failed to mark block device '%s' read-only: %m", val
);
513 log_device_info(dev
, "Successfully marked block device '%s' read-only.", val
);
517 static int worker_process_device(Manager
*manager
, sd_device
*dev
) {
518 _cleanup_(udev_event_freep
) UdevEvent
*udev_event
= NULL
;
519 _cleanup_close_
int fd_lock
= -1;
525 log_device_uevent(dev
, "Processing device");
527 udev_event
= udev_event_new(dev
, arg_exec_delay_usec
, manager
->rtnl
, manager
->log_level
);
531 /* If this is a block device and the device is locked currently via the BSD advisory locks,
532 * someone else is using it exclusively. We don't run our udev rules now to not interfere.
533 * Instead of processing the event, we requeue the event and will try again after a delay.
535 * The user-facing side of this: https://systemd.io/BLOCK_DEVICE_LOCKING */
536 r
= worker_lock_block_device(dev
, &fd_lock
);
540 (void) worker_mark_block_device_read_only(dev
);
542 /* apply rules, create node, symlinks */
543 r
= udev_event_execute_rules(
546 arg_event_timeout_usec
,
553 udev_event_execute_run(udev_event
, arg_event_timeout_usec
, arg_timeout_signal
);
556 /* in case rtnl was initialized */
557 manager
->rtnl
= sd_netlink_ref(udev_event
->rtnl
);
559 r
= udev_event_process_inotify_watch(udev_event
, manager
->inotify_fd
);
563 log_device_uevent(dev
, "Device processed");
567 static int worker_device_monitor_handler(sd_device_monitor
*monitor
, sd_device
*dev
, void *userdata
) {
568 Manager
*manager
= userdata
;
575 r
= worker_process_device(manager
, dev
);
577 /* if we couldn't acquire the flock(), then requeue the event */
578 result
= EVENT_RESULT_TRY_AGAIN
;
579 log_device_debug_errno(dev
, r
, "Block device is currently locked, requeueing the event.");
581 result
= EVENT_RESULT_FAILED
;
582 log_device_warning_errno(dev
, r
, "Failed to process device, ignoring: %m");
584 result
= EVENT_RESULT_SUCCESS
;
586 if (result
!= EVENT_RESULT_TRY_AGAIN
)
587 /* send processed event back to libudev listeners */
588 device_broadcast(monitor
, dev
);
590 /* send udevd the result of the event execution */
591 r
= worker_send_result(manager
, result
);
593 log_device_warning_errno(dev
, r
, "Failed to send signal to main daemon, ignoring: %m");
595 /* Reset the log level, as it might be changed by "OPTIONS=log_level=". */
596 log_set_max_level(manager
->log_level
);
601 static int worker_main(Manager
*_manager
, sd_device_monitor
*monitor
, sd_device
*first_device
) {
602 _cleanup_(sd_device_unrefp
) sd_device
*dev
= first_device
;
603 _cleanup_(manager_freep
) Manager
*manager
= _manager
;
610 assert_se(unsetenv("NOTIFY_SOCKET") == 0);
612 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, -1) >= 0);
614 /* Reset OOM score, we only protect the main daemon. */
615 r
= set_oom_score_adjust(0);
617 log_debug_errno(r
, "Failed to reset OOM score, ignoring: %m");
619 /* Clear unnecessary data in Manager object. */
620 manager_clear_for_worker(manager
);
622 r
= sd_event_new(&manager
->event
);
624 return log_error_errno(r
, "Failed to allocate event loop: %m");
626 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, NULL
, NULL
);
628 return log_error_errno(r
, "Failed to set SIGTERM event: %m");
630 r
= sd_device_monitor_attach_event(monitor
, manager
->event
);
632 return log_error_errno(r
, "Failed to attach event loop to device monitor: %m");
634 r
= sd_device_monitor_start(monitor
, worker_device_monitor_handler
, manager
);
636 return log_error_errno(r
, "Failed to start device monitor: %m");
638 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(monitor
), "worker-device-monitor");
640 /* Process first device */
641 (void) worker_device_monitor_handler(monitor
, dev
, manager
);
643 r
= sd_event_loop(manager
->event
);
645 return log_error_errno(r
, "Event loop failed: %m");
650 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
651 Event
*event
= userdata
;
654 assert(event
->worker
);
656 kill_and_sigcont(event
->worker
->pid
, arg_timeout_signal
);
657 event
->worker
->state
= WORKER_KILLED
;
659 log_device_error(event
->dev
, "Worker ["PID_FMT
"] processing SEQNUM=%"PRIu64
" killed", event
->worker
->pid
, event
->seqnum
);
664 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
665 Event
*event
= userdata
;
668 assert(event
->worker
);
670 log_device_warning(event
->dev
, "Worker ["PID_FMT
"] processing SEQNUM=%"PRIu64
" is taking a long time", event
->worker
->pid
, event
->seqnum
);
675 static void worker_attach_event(Worker
*worker
, Event
*event
) {
679 assert(worker
->manager
);
681 assert(!event
->worker
);
682 assert(!worker
->event
);
684 worker
->state
= WORKER_RUNNING
;
685 worker
->event
= event
;
686 event
->state
= EVENT_RUNNING
;
687 event
->worker
= worker
;
689 e
= worker
->manager
->event
;
691 (void) sd_event_add_time_relative(e
, &event
->timeout_warning_event
, CLOCK_MONOTONIC
,
692 udev_warn_timeout(arg_event_timeout_usec
), USEC_PER_SEC
,
693 on_event_timeout_warning
, event
);
695 (void) sd_event_add_time_relative(e
, &event
->timeout_event
, CLOCK_MONOTONIC
,
696 arg_event_timeout_usec
, USEC_PER_SEC
,
697 on_event_timeout
, event
);
700 static int worker_spawn(Manager
*manager
, Event
*event
) {
701 _cleanup_(sd_device_monitor_unrefp
) sd_device_monitor
*worker_monitor
= NULL
;
706 /* listen for new events */
707 r
= device_monitor_new_full(&worker_monitor
, MONITOR_GROUP_NONE
, -1);
711 /* allow the main daemon netlink address to send devices to the worker */
712 r
= device_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
714 return log_error_errno(r
, "Worker: Failed to set unicast sender: %m");
716 r
= device_monitor_enable_receiving(worker_monitor
);
718 return log_error_errno(r
, "Worker: Failed to enable receiving of device: %m");
720 r
= safe_fork(NULL
, FORK_DEATHSIG
, &pid
);
722 event
->state
= EVENT_QUEUED
;
723 return log_error_errno(r
, "Failed to fork() worker: %m");
726 DEVICE_TRACE_POINT(worker_spawned
, event
->dev
, getpid());
729 r
= worker_main(manager
, worker_monitor
, sd_device_ref(event
->dev
));
731 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
734 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
736 return log_error_errno(r
, "Failed to create worker object: %m");
738 worker_attach_event(worker
, event
);
740 log_device_debug(event
->dev
, "Worker ["PID_FMT
"] is forked for processing SEQNUM=%"PRIu64
".", pid
, event
->seqnum
);
744 static int event_run(Event
*event
) {
745 static bool log_children_max_reached
= true;
751 assert(event
->manager
);
753 log_device_uevent(event
->dev
, "Device ready for processing");
755 manager
= event
->manager
;
756 HASHMAP_FOREACH(worker
, manager
->workers
) {
757 if (worker
->state
!= WORKER_IDLE
)
760 r
= device_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
762 log_device_error_errno(event
->dev
, r
, "Worker ["PID_FMT
"] did not accept message, killing the worker: %m",
764 (void) kill(worker
->pid
, SIGKILL
);
765 worker
->state
= WORKER_KILLED
;
768 worker_attach_event(worker
, event
);
769 return 1; /* event is now processing. */
772 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
773 /* Avoid spamming the debug logs if the limit is already reached and
774 * many events still need to be processed */
775 if (log_children_max_reached
&& arg_children_max
> 1) {
776 log_debug("Maximum number (%u) of children reached.", hashmap_size(manager
->workers
));
777 log_children_max_reached
= false;
779 return 0; /* no free worker */
782 /* Re-enable the debug message for the next batch of events */
783 log_children_max_reached
= true;
785 /* start new worker and pass initial device */
786 r
= worker_spawn(manager
, event
);
790 return 1; /* event is now processing. */
793 static int event_is_blocked(Event
*event
) {
794 const char *subsystem
, *devpath
, *devpath_old
= NULL
;
795 dev_t devnum
= makedev(0, 0);
796 Event
*loop_event
= NULL
;
801 /* lookup event for identical, parent, child device */
804 assert(event
->manager
);
805 assert(event
->blocker_seqnum
<= event
->seqnum
);
807 if (event
->retry_again_next_usec
> 0) {
810 r
= sd_event_now(event
->manager
->event
, CLOCK_BOOTTIME
, &now_usec
);
814 if (event
->retry_again_next_usec
<= now_usec
)
818 if (event
->blocker_seqnum
== event
->seqnum
)
819 /* we have checked previously and no blocker found */
822 LIST_FOREACH(event
, e
, event
->manager
->events
) {
825 /* we already found a later event, earlier cannot block us, no need to check again */
826 if (loop_event
->seqnum
< event
->blocker_seqnum
)
829 /* event we checked earlier still exists, no need to check again */
830 if (loop_event
->seqnum
== event
->blocker_seqnum
)
833 /* found ourself, no later event can block us */
834 if (loop_event
->seqnum
>= event
->seqnum
)
837 /* found event we have not checked */
842 assert(loop_event
->seqnum
> event
->blocker_seqnum
&&
843 loop_event
->seqnum
< event
->seqnum
);
845 r
= sd_device_get_subsystem(event
->dev
, &subsystem
);
849 is_block
= streq(subsystem
, "block");
851 r
= sd_device_get_devpath(event
->dev
, &devpath
);
855 devpath_len
= strlen(devpath
);
857 r
= sd_device_get_property_value(event
->dev
, "DEVPATH_OLD", &devpath_old
);
858 if (r
< 0 && r
!= -ENOENT
)
861 r
= sd_device_get_devnum(event
->dev
, &devnum
);
862 if (r
< 0 && r
!= -ENOENT
)
865 r
= sd_device_get_ifindex(event
->dev
, &ifindex
);
866 if (r
< 0 && r
!= -ENOENT
)
869 /* check if queue contains events we depend on */
870 LIST_FOREACH(event
, e
, loop_event
) {
871 size_t loop_devpath_len
, common
;
872 const char *loop_devpath
;
876 /* found ourself, no later event can block us */
877 if (loop_event
->seqnum
>= event
->seqnum
)
880 /* check major/minor */
881 if (major(devnum
) != 0) {
885 if (sd_device_get_subsystem(loop_event
->dev
, &s
) < 0)
888 if (sd_device_get_devnum(loop_event
->dev
, &d
) >= 0 &&
889 devnum
== d
&& is_block
== streq(s
, "block"))
893 /* check network device ifindex */
897 if (sd_device_get_ifindex(loop_event
->dev
, &i
) >= 0 &&
902 if (sd_device_get_devpath(loop_event
->dev
, &loop_devpath
) < 0)
905 /* check our old name */
906 if (devpath_old
&& streq(devpath_old
, loop_devpath
))
909 loop_devpath_len
= strlen(loop_devpath
);
911 /* compare devpath */
912 common
= MIN(devpath_len
, loop_devpath_len
);
914 /* one devpath is contained in the other? */
915 if (!strneq(devpath
, loop_devpath
, common
))
918 /* identical device event found */
919 if (devpath_len
== loop_devpath_len
)
922 /* parent device event found */
923 if (devpath
[common
] == '/')
926 /* child device event found */
927 if (loop_devpath
[common
] == '/')
933 log_device_debug(event
->dev
, "SEQNUM=%" PRIu64
" blocked by SEQNUM=%" PRIu64
,
934 event
->seqnum
, loop_event
->seqnum
);
936 event
->blocker_seqnum
= loop_event
->seqnum
;
940 event
->blocker_seqnum
= event
->seqnum
;
944 static int event_queue_start(Manager
*manager
) {
950 if (LIST_IS_EMPTY(manager
->events
) ||
951 manager
->exit
|| manager
->stop_exec_queue
)
954 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
955 /* check for changed config, every 3 seconds at most */
956 if (manager
->last_usec
== 0 ||
957 usec
> usec_add(manager
->last_usec
, 3 * USEC_PER_SEC
)) {
958 if (udev_rules_check_timestamp(manager
->rules
) ||
959 udev_builtin_validate())
960 manager_reload(manager
);
962 manager
->last_usec
= usec
;
965 r
= event_source_disable(manager
->kill_workers_event
);
967 log_warning_errno(r
, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
971 if (!manager
->rules
) {
972 r
= udev_rules_load(&manager
->rules
, arg_resolve_name_timing
);
974 return log_warning_errno(r
, "Failed to read udev rules: %m");
977 /* fork with up-to-date SELinux label database, so the child inherits the up-to-date db
978 * and, until the next SELinux policy changes, we safe further reloads in future children */
979 mac_selinux_maybe_reload();
981 LIST_FOREACH(event
, event
, manager
->events
) {
982 if (event
->state
!= EVENT_QUEUED
)
985 /* do not start event if parent or child event is still running or queued */
986 r
= event_is_blocked(event
);
990 log_device_warning_errno(event
->dev
, r
,
991 "Failed to check dependencies for event (SEQNUM=%"PRIu64
", ACTION=%s), "
992 "assuming there is no blocking event, ignoring: %m",
994 strna(device_action_to_string(event
->action
)));
996 r
= event_run(event
);
997 if (r
<= 0) /* 0 means there are no idle workers. Let's escape from the loop. */
1004 static int event_requeue(Event
*event
) {
1009 assert(event
->manager
);
1010 assert(event
->manager
->event
);
1012 event
->timeout_warning_event
= sd_event_source_disable_unref(event
->timeout_warning_event
);
1013 event
->timeout_event
= sd_event_source_disable_unref(event
->timeout_event
);
1015 /* add a short delay to suppress busy loop */
1016 r
= sd_event_now(event
->manager
->event
, CLOCK_BOOTTIME
, &now_usec
);
1018 return log_device_warning_errno(event
->dev
, r
,
1019 "Failed to get current time, "
1020 "skipping event (SEQNUM=%"PRIu64
", ACTION=%s): %m",
1021 event
->seqnum
, strna(device_action_to_string(event
->action
)));
1023 if (event
->retry_again_timeout_usec
> 0 && event
->retry_again_timeout_usec
<= now_usec
)
1024 return log_device_warning_errno(event
->dev
, SYNTHETIC_ERRNO(ETIMEDOUT
),
1025 "The underlying block device is locked by a process more than %s, "
1026 "skipping event (SEQNUM=%"PRIu64
", ACTION=%s).",
1027 FORMAT_TIMESPAN(EVENT_RETRY_TIMEOUT_USEC
, USEC_PER_MINUTE
),
1028 event
->seqnum
, strna(device_action_to_string(event
->action
)));
1030 event
->retry_again_next_usec
= usec_add(now_usec
, EVENT_RETRY_INTERVAL_USEC
);
1031 if (event
->retry_again_timeout_usec
== 0)
1032 event
->retry_again_timeout_usec
= usec_add(now_usec
, EVENT_RETRY_TIMEOUT_USEC
);
1034 if (event
->worker
&& event
->worker
->event
== event
)
1035 event
->worker
->event
= NULL
;
1036 event
->worker
= NULL
;
1038 event
->state
= EVENT_QUEUED
;
1042 static int event_queue_assume_block_device_unlocked(Manager
*manager
, sd_device
*dev
) {
1043 const char *devname
;
1046 /* When a new event for a block device is queued or we get an inotify event, assume that the
1047 * device is not locked anymore. The assumption may not be true, but that should not cause any
1048 * issues, as in that case events will be requeued soon. */
1050 r
= device_get_block_device(dev
, &devname
);
1054 LIST_FOREACH(event
, event
, manager
->events
) {
1055 const char *event_devname
;
1057 if (event
->state
!= EVENT_QUEUED
)
1060 if (event
->retry_again_next_usec
== 0)
1063 if (device_get_block_device(event
->dev
, &event_devname
) <= 0)
1066 if (!streq(devname
, event_devname
))
1069 event
->retry_again_next_usec
= 0;
1075 static int event_queue_insert(Manager
*manager
, sd_device
*dev
) {
1076 sd_device_action_t action
;
1084 /* only one process can add events to the queue */
1085 assert(manager
->pid
== getpid_cached());
1087 /* We only accepts devices received by device monitor. */
1088 r
= sd_device_get_seqnum(dev
, &seqnum
);
1092 r
= sd_device_get_action(dev
, &action
);
1096 event
= new(Event
, 1);
1102 .dev
= sd_device_ref(dev
),
1105 .state
= EVENT_QUEUED
,
1108 if (LIST_IS_EMPTY(manager
->events
)) {
1109 r
= touch("/run/udev/queue");
1111 log_warning_errno(r
, "Failed to touch /run/udev/queue, ignoring: %m");
1114 LIST_APPEND(event
, manager
->events
, event
);
1116 log_device_uevent(dev
, "Device is queued");
1121 static int on_uevent(sd_device_monitor
*monitor
, sd_device
*dev
, void *userdata
) {
1122 Manager
*manager
= userdata
;
1127 DEVICE_TRACE_POINT(kernel_uevent_received
, dev
);
1129 device_ensure_usec_initialized(dev
, NULL
);
1131 r
= event_queue_insert(manager
, dev
);
1133 log_device_error_errno(dev
, r
, "Failed to insert device into event queue: %m");
1137 (void) event_queue_assume_block_device_unlocked(manager
, dev
);
1139 /* we have fresh events, try to schedule them */
1140 event_queue_start(manager
);
1145 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1146 Manager
*manager
= userdata
;
1152 struct iovec iovec
= IOVEC_MAKE(&result
, sizeof(result
));
1153 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred
))) control
;
1154 struct msghdr msghdr
= {
1157 .msg_control
= &control
,
1158 .msg_controllen
= sizeof(control
),
1161 struct ucred
*ucred
;
1164 size
= recvmsg_safe(fd
, &msghdr
, MSG_DONTWAIT
);
1167 if (size
== -EAGAIN
)
1168 /* nothing more to read */
1171 return log_error_errno(size
, "Failed to receive message: %m");
1173 cmsg_close_all(&msghdr
);
1175 if (size
!= sizeof(EventResult
)) {
1176 log_warning("Ignoring worker message with invalid size %zi bytes", size
);
1180 ucred
= CMSG_FIND_DATA(&msghdr
, SOL_SOCKET
, SCM_CREDENTIALS
, struct ucred
);
1181 if (!ucred
|| ucred
->pid
<= 0) {
1182 log_warning("Ignoring worker message without valid PID");
1186 /* lookup worker who sent the signal */
1187 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
1189 log_debug("Worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
1193 if (worker
->state
== WORKER_KILLING
) {
1194 worker
->state
= WORKER_KILLED
;
1195 (void) kill(worker
->pid
, SIGTERM
);
1196 } else if (worker
->state
!= WORKER_KILLED
)
1197 worker
->state
= WORKER_IDLE
;
1199 /* worker returned */
1200 if (result
== EVENT_RESULT_TRY_AGAIN
&&
1201 event_requeue(worker
->event
) < 0)
1202 device_broadcast(manager
->monitor
, worker
->event
->dev
);
1204 /* When event_requeue() succeeds, worker->event is NULL, and event_free() handles NULL gracefully. */
1205 event_free(worker
->event
);
1208 /* we have free workers, try to schedule events */
1209 event_queue_start(manager
);
1214 /* receive the udevd message from userspace */
1215 static int on_ctrl_msg(UdevCtrl
*uctrl
, UdevCtrlMessageType type
, const UdevCtrlMessageValue
*value
, void *userdata
) {
1216 Manager
*manager
= userdata
;
1223 case UDEV_CTRL_SET_LOG_LEVEL
:
1224 log_debug("Received udev control message (SET_LOG_LEVEL), setting log_level=%i", value
->intval
);
1225 log_set_max_level(value
->intval
);
1226 manager
->log_level
= value
->intval
;
1227 manager_kill_workers(manager
, false);
1229 case UDEV_CTRL_STOP_EXEC_QUEUE
:
1230 log_debug("Received udev control message (STOP_EXEC_QUEUE)");
1231 manager
->stop_exec_queue
= true;
1233 case UDEV_CTRL_START_EXEC_QUEUE
:
1234 log_debug("Received udev control message (START_EXEC_QUEUE)");
1235 manager
->stop_exec_queue
= false;
1236 event_queue_start(manager
);
1238 case UDEV_CTRL_RELOAD
:
1239 log_debug("Received udev control message (RELOAD)");
1240 manager_reload(manager
);
1242 case UDEV_CTRL_SET_ENV
: {
1243 _unused_ _cleanup_free_
char *old_val
= NULL
;
1244 _cleanup_free_
char *key
= NULL
, *val
= NULL
, *old_key
= NULL
;
1247 eq
= strchr(value
->buf
, '=');
1249 log_error("Invalid key format '%s'", value
->buf
);
1253 key
= strndup(value
->buf
, eq
- value
->buf
);
1259 old_val
= hashmap_remove2(manager
->properties
, key
, (void **) &old_key
);
1261 r
= hashmap_ensure_allocated(&manager
->properties
, &string_hash_ops
);
1269 log_debug("Received udev control message (ENV), unsetting '%s'", key
);
1271 r
= hashmap_put(manager
->properties
, key
, NULL
);
1283 log_debug("Received udev control message (ENV), setting '%s=%s'", key
, val
);
1285 r
= hashmap_put(manager
->properties
, key
, val
);
1293 manager_kill_workers(manager
, false);
1296 case UDEV_CTRL_SET_CHILDREN_MAX
:
1297 if (value
->intval
<= 0) {
1298 log_debug("Received invalid udev control message (SET_MAX_CHILDREN, %i), ignoring.", value
->intval
);
1302 log_debug("Received udev control message (SET_MAX_CHILDREN), setting children_max=%i", value
->intval
);
1303 arg_children_max
= value
->intval
;
1307 case UDEV_CTRL_PING
:
1308 log_debug("Received udev control message (PING)");
1310 case UDEV_CTRL_EXIT
:
1311 log_debug("Received udev control message (EXIT)");
1312 manager_exit(manager
);
1315 log_debug("Received unknown udev control message, ignoring");
1321 static int synthesize_change_one(sd_device
*dev
, sd_device
*target
) {
1324 if (DEBUG_LOGGING
) {
1325 const char *syspath
= NULL
;
1326 (void) sd_device_get_syspath(target
, &syspath
);
1327 log_device_debug(dev
, "device is closed, synthesising 'change' on %s", strna(syspath
));
1330 r
= sd_device_trigger(target
, SD_DEVICE_CHANGE
);
1332 return log_device_debug_errno(target
, r
, "Failed to trigger 'change' uevent: %m");
1334 DEVICE_TRACE_POINT(synthetic_change_event
, dev
);
1339 static int synthesize_change(sd_device
*dev
) {
1340 const char *subsystem
, *sysname
, *devtype
;
1343 r
= sd_device_get_subsystem(dev
, &subsystem
);
1347 r
= sd_device_get_devtype(dev
, &devtype
);
1351 r
= sd_device_get_sysname(dev
, &sysname
);
1355 if (streq_ptr(subsystem
, "block") &&
1356 streq_ptr(devtype
, "disk") &&
1357 !startswith(sysname
, "dm-")) {
1358 _cleanup_(sd_device_enumerator_unrefp
) sd_device_enumerator
*e
= NULL
;
1359 bool part_table_read
= false, has_partitions
= false;
1360 const char *devname
;
1364 r
= sd_device_get_devname(dev
, &devname
);
1368 /* Try to re-read the partition table. This only succeeds if none of the devices is
1369 * busy. The kernel returns 0 if no partition table is found, and we will not get an
1370 * event for the disk. */
1371 fd
= open(devname
, O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1373 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1375 r
= ioctl(fd
, BLKRRPART
, 0);
1379 part_table_read
= true;
1382 /* search for partitions */
1383 r
= sd_device_enumerator_new(&e
);
1387 r
= sd_device_enumerator_allow_uninitialized(e
);
1391 r
= sd_device_enumerator_add_match_parent(e
, dev
);
1395 r
= sd_device_enumerator_add_match_subsystem(e
, "block", true);
1399 FOREACH_DEVICE(e
, d
) {
1402 if (sd_device_get_devtype(d
, &t
) < 0 || !streq(t
, "partition"))
1405 has_partitions
= true;
1409 /* We have partitions and re-read the table, the kernel already sent out a "change"
1410 * event for the disk, and "remove/add" for all partitions. */
1411 if (part_table_read
&& has_partitions
)
1414 /* We have partitions but re-reading the partition table did not work, synthesize
1415 * "change" for the disk and all partitions. */
1416 (void) synthesize_change_one(dev
, dev
);
1418 FOREACH_DEVICE(e
, d
) {
1421 if (sd_device_get_devtype(d
, &t
) < 0 || !streq(t
, "partition"))
1424 (void) synthesize_change_one(dev
, d
);
1428 (void) synthesize_change_one(dev
, dev
);
1433 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1434 Manager
*manager
= userdata
;
1435 union inotify_event_buffer buffer
;
1441 r
= event_source_disable(manager
->kill_workers_event
);
1443 log_warning_errno(r
, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1445 l
= read(fd
, &buffer
, sizeof(buffer
));
1447 if (ERRNO_IS_TRANSIENT(errno
))
1450 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1453 FOREACH_INOTIFY_EVENT_WARN(e
, buffer
, l
) {
1454 _cleanup_(sd_device_unrefp
) sd_device
*dev
= NULL
;
1455 const char *devnode
;
1457 r
= device_new_from_watch_handle(&dev
, e
->wd
);
1459 log_debug_errno(r
, "Failed to create sd_device object from watch handle, ignoring: %m");
1463 if (sd_device_get_devname(dev
, &devnode
) < 0)
1466 log_device_debug(dev
, "Inotify event: %x for %s", e
->mask
, devnode
);
1467 if (e
->mask
& IN_CLOSE_WRITE
) {
1468 (void) event_queue_assume_block_device_unlocked(manager
, dev
);
1469 (void) synthesize_change(dev
);
1472 /* Do not handle IN_IGNORED here. It should be handled by worker in 'remove' uevent;
1473 * udev_event_execute_rules() -> event_execute_rules_on_remove() -> udev_watch_end(). */
1479 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1480 Manager
*manager
= userdata
;
1484 manager_exit(manager
);
1489 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1490 Manager
*manager
= userdata
;
1494 manager_reload(manager
);
1499 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1500 Manager
*manager
= userdata
;
1510 pid
= waitpid(-1, &status
, WNOHANG
);
1514 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1516 log_warning("Worker ["PID_FMT
"] is unknown, ignoring", pid
);
1520 if (WIFEXITED(status
)) {
1521 if (WEXITSTATUS(status
) == 0)
1522 log_debug("Worker ["PID_FMT
"] exited", pid
);
1524 log_warning("Worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1525 } else if (WIFSIGNALED(status
))
1526 log_warning("Worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), signal_to_string(WTERMSIG(status
)));
1527 else if (WIFSTOPPED(status
)) {
1528 log_info("Worker ["PID_FMT
"] stopped", pid
);
1530 } else if (WIFCONTINUED(status
)) {
1531 log_info("Worker ["PID_FMT
"] continued", pid
);
1534 log_warning("Worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1536 if ((!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) && worker
->event
) {
1537 log_device_error(worker
->event
->dev
, "Worker ["PID_FMT
"] failed", pid
);
1539 /* delete state from disk */
1540 device_delete_db(worker
->event
->dev
);
1541 device_tag_index(worker
->event
->dev
, NULL
, false);
1543 /* Forward kernel event to libudev listeners */
1544 device_broadcast(manager
->monitor
, worker
->event
->dev
);
1547 worker_free(worker
);
1550 /* we can start new workers, try to schedule events */
1551 event_queue_start(manager
);
1553 /* Disable unnecessary cleanup event */
1554 if (hashmap_isempty(manager
->workers
)) {
1555 r
= event_source_disable(manager
->kill_workers_event
);
1557 log_warning_errno(r
, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1563 static int on_post(sd_event_source
*s
, void *userdata
) {
1564 Manager
*manager
= userdata
;
1568 if (!LIST_IS_EMPTY(manager
->events
)) {
1569 /* Try to process pending events if idle workers exist. Why is this necessary?
1570 * When a worker finished an event and became idle, even if there was a pending event,
1571 * the corresponding device might have been locked and the processing of the event
1572 * delayed for a while, preventing the worker from processing the event immediately.
1573 * Now, the device may be unlocked. Let's try again! */
1574 event_queue_start(manager
);
1578 /* There are no queued events. Let's remove /run/udev/queue and clean up the idle processes. */
1580 if (unlink("/run/udev/queue") < 0) {
1581 if (errno
!= ENOENT
)
1582 log_warning_errno(errno
, "Failed to unlink /run/udev/queue, ignoring: %m");
1584 log_debug("No events are queued, removing /run/udev/queue.");
1586 if (!hashmap_isempty(manager
->workers
)) {
1587 /* There are idle workers */
1588 (void) event_reset_time(manager
->event
, &manager
->kill_workers_event
, CLOCK_MONOTONIC
,
1589 now(CLOCK_MONOTONIC
) + 3 * USEC_PER_SEC
, USEC_PER_SEC
,
1590 on_kill_workers_event
, manager
, 0, "kill-workers-event", false);
1594 /* There are no idle workers. */
1597 return sd_event_exit(manager
->event
, 0);
1599 if (manager
->cgroup
)
1600 /* cleanup possible left-over processes in our cgroup */
1601 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, CGROUP_IGNORE_SELF
, NULL
, NULL
, NULL
);
1606 static int listen_fds(int *ret_ctrl
, int *ret_netlink
) {
1607 int ctrl_fd
= -1, netlink_fd
= -1;
1611 assert(ret_netlink
);
1613 n
= sd_listen_fds(true);
1617 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1618 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1) > 0) {
1625 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1) > 0) {
1626 if (netlink_fd
>= 0)
1635 *ret_ctrl
= ctrl_fd
;
1636 *ret_netlink
= netlink_fd
;
1642 * read the kernel command line, in case we need to get into debug mode
1643 * udev.log_level=<level> syslog priority
1644 * udev.children_max=<number of workers> events are fully serialized if set to 1
1645 * udev.exec_delay=<number of seconds> delay execution of every executed program
1646 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1647 * udev.blockdev_read_only<=bool> mark all block devices read-only when they appear
1649 static int parse_proc_cmdline_item(const char *key
, const char *value
, void *data
) {
1654 if (proc_cmdline_key_streq(key
, "udev.log_level") ||
1655 proc_cmdline_key_streq(key
, "udev.log_priority")) { /* kept for backward compatibility */
1657 if (proc_cmdline_value_missing(key
, value
))
1660 r
= log_level_from_string(value
);
1662 log_set_max_level(r
);
1664 } else if (proc_cmdline_key_streq(key
, "udev.event_timeout")) {
1666 if (proc_cmdline_value_missing(key
, value
))
1669 r
= parse_sec(value
, &arg_event_timeout_usec
);
1671 } else if (proc_cmdline_key_streq(key
, "udev.children_max")) {
1673 if (proc_cmdline_value_missing(key
, value
))
1676 r
= safe_atou(value
, &arg_children_max
);
1678 } else if (proc_cmdline_key_streq(key
, "udev.exec_delay")) {
1680 if (proc_cmdline_value_missing(key
, value
))
1683 r
= parse_sec(value
, &arg_exec_delay_usec
);
1685 } else if (proc_cmdline_key_streq(key
, "udev.timeout_signal")) {
1687 if (proc_cmdline_value_missing(key
, value
))
1690 r
= signal_from_string(value
);
1692 arg_timeout_signal
= r
;
1694 } else if (proc_cmdline_key_streq(key
, "udev.blockdev_read_only")) {
1697 arg_blockdev_read_only
= true;
1699 r
= parse_boolean(value
);
1701 log_warning_errno(r
, "Failed to parse udev.blockdev-read-only argument, ignoring: %s", value
);
1703 arg_blockdev_read_only
= r
;
1706 if (arg_blockdev_read_only
)
1707 log_notice("All physical block devices will be marked read-only.");
1712 if (startswith(key
, "udev."))
1713 log_warning("Unknown udev kernel command line option \"%s\", ignoring.", key
);
1719 log_warning_errno(r
, "Failed to parse \"%s=%s\", ignoring: %m", key
, value
);
1724 static int help(void) {
1725 _cleanup_free_
char *link
= NULL
;
1728 r
= terminal_urlify_man("systemd-udevd.service", "8", &link
);
1732 printf("%s [OPTIONS...]\n\n"
1733 "Rule-based manager for device events and files.\n\n"
1734 " -h --help Print this message\n"
1735 " -V --version Print version of the program\n"
1736 " -d --daemon Detach and run in the background\n"
1737 " -D --debug Enable debug output\n"
1738 " -c --children-max=INT Set maximum number of workers\n"
1739 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1740 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1741 " -N --resolve-names=early|late|never\n"
1742 " When to resolve users and groups\n"
1743 "\nSee the %s for details.\n",
1744 program_invocation_short_name
,
1750 static int parse_argv(int argc
, char *argv
[]) {
1755 static const struct option options
[] = {
1756 { "daemon", no_argument
, NULL
, 'd' },
1757 { "debug", no_argument
, NULL
, 'D' },
1758 { "children-max", required_argument
, NULL
, 'c' },
1759 { "exec-delay", required_argument
, NULL
, 'e' },
1760 { "event-timeout", required_argument
, NULL
, 't' },
1761 { "resolve-names", required_argument
, NULL
, 'N' },
1762 { "help", no_argument
, NULL
, 'h' },
1763 { "version", no_argument
, NULL
, 'V' },
1764 { "timeout-signal", required_argument
, NULL
, ARG_TIMEOUT_SIGNAL
},
1773 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1777 arg_daemonize
= true;
1780 r
= safe_atou(optarg
, &arg_children_max
);
1782 log_warning_errno(r
, "Failed to parse --children-max= value '%s', ignoring: %m", optarg
);
1785 r
= parse_sec(optarg
, &arg_exec_delay_usec
);
1787 log_warning_errno(r
, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg
);
1789 case ARG_TIMEOUT_SIGNAL
:
1790 r
= signal_from_string(optarg
);
1792 log_warning_errno(r
, "Failed to parse --timeout-signal= value '%s', ignoring: %m", optarg
);
1794 arg_timeout_signal
= r
;
1798 r
= parse_sec(optarg
, &arg_event_timeout_usec
);
1800 log_warning_errno(r
, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg
);
1806 ResolveNameTiming t
;
1808 t
= resolve_name_timing_from_string(optarg
);
1810 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg
);
1812 arg_resolve_name_timing
= t
;
1818 printf("%s\n", GIT_VERSION
);
1823 assert_not_reached();
1831 static int create_subcgroup(char **ret
) {
1832 _cleanup_free_
char *cgroup
= NULL
, *subcgroup
= NULL
;
1836 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP
), "Not invoked by PID1.");
1840 return log_debug_errno(r
, "Failed to check if systemd is running: %m");
1842 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP
), "systemd is not running.");
1844 /* Get our own cgroup, we regularly kill everything udev has left behind.
1845 * We only do this on systemd systems, and only if we are directly spawned
1846 * by PID1. Otherwise we are not guaranteed to have a dedicated cgroup. */
1848 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1850 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
1851 return log_debug_errno(r
, "Dedicated cgroup not found: %m");
1852 return log_debug_errno(r
, "Failed to get cgroup: %m");
1855 r
= cg_get_xattr_bool(SYSTEMD_CGROUP_CONTROLLER
, cgroup
, "trusted.delegate");
1856 if (IN_SET(r
, 0, -ENODATA
))
1857 return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP
), "The cgroup %s is not delegated to us.", cgroup
);
1859 return log_debug_errno(r
, "Failed to read trusted.delegate attribute: %m");
1861 /* We are invoked with our own delegated cgroup tree, let's move us one level down, so that we
1862 * don't collide with the "no processes in inner nodes" rule of cgroups, when the service
1863 * manager invokes the ExecReload= job in the .control/ subcgroup. */
1865 subcgroup
= path_join(cgroup
, "/udev");
1867 return log_oom_debug();
1869 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, subcgroup
, 0);
1871 return log_debug_errno(r
, "Failed to create %s subcgroup: %m", subcgroup
);
1873 log_debug("Created %s subcgroup.", subcgroup
);
1875 *ret
= TAKE_PTR(subcgroup
);
1879 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
) {
1880 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1881 _cleanup_free_
char *cgroup
= NULL
;
1886 (void) create_subcgroup(&cgroup
);
1888 manager
= new(Manager
, 1);
1892 *manager
= (Manager
) {
1894 .worker_watch
= { -1, -1 },
1895 .cgroup
= TAKE_PTR(cgroup
),
1898 r
= udev_ctrl_new_from_fd(&manager
->ctrl
, fd_ctrl
);
1900 return log_error_errno(r
, "Failed to initialize udev control socket: %m");
1902 r
= udev_ctrl_enable_receiving(manager
->ctrl
);
1904 return log_error_errno(r
, "Failed to bind udev control socket: %m");
1906 r
= device_monitor_new_full(&manager
->monitor
, MONITOR_GROUP_KERNEL
, fd_uevent
);
1908 return log_error_errno(r
, "Failed to initialize device monitor: %m");
1910 /* Bump receiver buffer, but only if we are not called via socket activation, as in that
1911 * case systemd sets the receive buffer size for us, and the value in the .socket unit
1912 * should take full effect. */
1913 if (fd_uevent
< 0) {
1914 r
= sd_device_monitor_set_receive_buffer_size(manager
->monitor
, 128 * 1024 * 1024);
1916 log_warning_errno(r
, "Failed to set receive buffer size for device monitor, ignoring: %m");
1919 r
= device_monitor_enable_receiving(manager
->monitor
);
1921 return log_error_errno(r
, "Failed to bind netlink socket: %m");
1923 manager
->log_level
= log_get_max_level();
1925 *ret
= TAKE_PTR(manager
);
1930 static int main_loop(Manager
*manager
) {
1933 manager
->pid
= getpid_cached();
1935 /* unnamed socket from workers to the main daemon */
1936 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1938 return log_error_errno(errno
, "Failed to create socketpair for communicating with workers: %m");
1940 fd_worker
= manager
->worker_watch
[READ_END
];
1942 r
= setsockopt_int(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, true);
1944 return log_error_errno(r
, "Failed to enable SO_PASSCRED: %m");
1946 manager
->inotify_fd
= inotify_init1(IN_CLOEXEC
);
1947 if (manager
->inotify_fd
< 0)
1948 return log_error_errno(errno
, "Failed to create inotify descriptor: %m");
1950 udev_watch_restore(manager
->inotify_fd
);
1952 /* block and listen to all signals on signalfd */
1953 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1955 r
= sd_event_default(&manager
->event
);
1957 return log_error_errno(r
, "Failed to allocate event loop: %m");
1959 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1961 return log_error_errno(r
, "Failed to create SIGINT event source: %m");
1963 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1965 return log_error_errno(r
, "Failed to create SIGTERM event source: %m");
1967 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1969 return log_error_errno(r
, "Failed to create SIGHUP event source: %m");
1971 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1973 return log_error_errno(r
, "Failed to create SIGCHLD event source: %m");
1975 r
= sd_event_set_watchdog(manager
->event
, true);
1977 return log_error_errno(r
, "Failed to create watchdog event source: %m");
1979 r
= udev_ctrl_attach_event(manager
->ctrl
, manager
->event
);
1981 return log_error_errno(r
, "Failed to attach event to udev control: %m");
1983 r
= udev_ctrl_start(manager
->ctrl
, on_ctrl_msg
, manager
);
1985 return log_error_errno(r
, "Failed to start device monitor: %m");
1987 /* This needs to be after the inotify and uevent handling, to make sure
1988 * that the ping is send back after fully processing the pending uevents
1989 * (including the synthetic ones we may create due to inotify events).
1991 r
= sd_event_source_set_priority(udev_ctrl_get_event_source(manager
->ctrl
), SD_EVENT_PRIORITY_IDLE
);
1993 return log_error_errno(r
, "Failed to set IDLE event priority for udev control event source: %m");
1995 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->inotify_fd
, EPOLLIN
, on_inotify
, manager
);
1997 return log_error_errno(r
, "Failed to create inotify event source: %m");
1999 r
= sd_device_monitor_attach_event(manager
->monitor
, manager
->event
);
2001 return log_error_errno(r
, "Failed to attach event to device monitor: %m");
2003 r
= sd_device_monitor_start(manager
->monitor
, on_uevent
, manager
);
2005 return log_error_errno(r
, "Failed to start device monitor: %m");
2007 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(manager
->monitor
), "device-monitor");
2009 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
2011 return log_error_errno(r
, "Failed to create worker event source: %m");
2013 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
2015 return log_error_errno(r
, "Failed to create post event source: %m");
2017 udev_builtin_init();
2019 r
= udev_rules_load(&manager
->rules
, arg_resolve_name_timing
);
2020 if (!manager
->rules
)
2021 return log_error_errno(r
, "Failed to read udev rules: %m");
2023 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
2025 log_error_errno(r
, "Failed to apply permissions on static device nodes: %m");
2029 r
= sd_event_loop(manager
->event
);
2031 log_error_errno(r
, "Event loop failed: %m");
2035 "STATUS=Shutting down...");
2039 int run_udevd(int argc
, char *argv
[]) {
2040 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
2041 int fd_ctrl
= -1, fd_uevent
= -1;
2044 log_set_target(LOG_TARGET_AUTO
);
2046 udev_parse_config_full(&arg_children_max
, &arg_exec_delay_usec
, &arg_event_timeout_usec
, &arg_resolve_name_timing
, &arg_timeout_signal
);
2047 log_parse_environment();
2048 log_open(); /* Done again to update after reading configuration. */
2050 r
= parse_argv(argc
, argv
);
2054 r
= proc_cmdline_parse(parse_proc_cmdline_item
, NULL
, PROC_CMDLINE_STRIP_RD_PREFIX
);
2056 log_warning_errno(r
, "Failed to parse kernel command line, ignoring: %m");
2059 log_set_target(LOG_TARGET_CONSOLE
);
2060 log_set_max_level(LOG_DEBUG
);
2067 if (arg_children_max
== 0) {
2068 unsigned long cpu_limit
, mem_limit
, cpu_count
= 1;
2070 r
= cpus_in_affinity_mask();
2072 log_warning_errno(r
, "Failed to determine number of local CPUs, ignoring: %m");
2076 cpu_limit
= cpu_count
* 2 + 16;
2077 mem_limit
= MAX(physical_memory() / (128UL*1024*1024), 10U);
2079 arg_children_max
= MIN(cpu_limit
, mem_limit
);
2080 arg_children_max
= MIN(WORKER_NUM_MAX
, arg_children_max
);
2082 log_debug("Set children_max to %u", arg_children_max
);
2085 /* set umask before creating any file/directory */
2088 r
= mac_selinux_init();
2092 r
= RET_NERRNO(mkdir("/run/udev", 0755));
2093 if (r
< 0 && r
!= -EEXIST
)
2094 return log_error_errno(r
, "Failed to create /run/udev: %m");
2096 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
2098 return log_error_errno(r
, "Failed to listen on fds: %m");
2100 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
);
2102 return log_error_errno(r
, "Failed to create manager: %m");
2104 if (arg_daemonize
) {
2107 log_info("Starting version " GIT_VERSION
);
2109 /* connect /dev/null to stdin, stdout, stderr */
2110 if (log_get_max_level() < LOG_DEBUG
) {
2111 r
= make_null_stdio();
2113 log_warning_errno(r
, "Failed to redirect standard streams to /dev/null: %m");
2118 return log_error_errno(errno
, "Failed to fork daemon: %m");
2127 return main_loop(manager
);