1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
17 #include <sys/epoll.h>
19 #include <sys/inotify.h>
20 #include <sys/ioctl.h>
21 #include <sys/mount.h>
22 #include <sys/prctl.h>
23 #include <sys/signalfd.h>
24 #include <sys/socket.h>
30 #include "sd-daemon.h"
33 #include "alloc-util.h"
34 #include "cgroup-util.h"
35 #include "cpu-set-util.h"
36 #include "dev-setup.h"
37 #include "device-util.h"
40 #include "format-util.h"
44 #include "libudev-device-internal.h"
46 #include "netlink-util.h"
47 #include "parse-util.h"
48 #include "proc-cmdline.h"
49 #include "process-util.h"
50 #include "selinux-util.h"
51 #include "signal-util.h"
52 #include "socket-util.h"
53 #include "string-util.h"
54 #include "terminal-util.h"
55 #include "udev-builtin.h"
56 #include "udev-ctrl.h"
57 #include "udev-util.h"
58 #include "udev-watch.h"
60 #include "user-util.h"
62 static bool arg_debug
= false;
63 static int arg_daemonize
= false;
64 static int arg_resolve_names
= 1;
65 static unsigned arg_children_max
;
66 static int arg_exec_delay
;
67 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
68 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
70 typedef struct Manager
{
73 LIST_HEAD(struct event
, events
);
75 pid_t pid
; /* the process that originally allocated the manager object */
77 struct udev_rules
*rules
;
80 struct udev_monitor
*monitor
;
81 struct udev_ctrl
*ctrl
;
82 struct udev_ctrl_connection
*ctrl_conn_blocking
;
86 sd_event_source
*ctrl_event
;
87 sd_event_source
*uevent_event
;
88 sd_event_source
*inotify_event
;
89 sd_event_source
*kill_workers_event
;
93 bool stop_exec_queue
:1;
104 LIST_FIELDS(struct event
, event
);
106 struct udev_device
*dev
;
107 struct udev_device
*dev_kernel
;
108 struct worker
*worker
;
109 enum event_state state
;
110 unsigned long long int delaying_seqnum
;
111 unsigned long long int seqnum
;
114 const char *devpath_old
;
118 sd_event_source
*timeout_warning
;
119 sd_event_source
*timeout
;
122 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
134 struct udev_monitor
*monitor
;
135 enum worker_state state
;
139 /* passed from worker to main process */
140 struct worker_message
{
143 static void event_free(struct event
*event
) {
148 assert(event
->manager
);
150 LIST_REMOVE(event
, event
->manager
->events
, event
);
151 udev_device_unref(event
->dev
);
152 udev_device_unref(event
->dev_kernel
);
154 sd_event_source_unref(event
->timeout_warning
);
155 sd_event_source_unref(event
->timeout
);
158 event
->worker
->event
= NULL
;
160 if (LIST_IS_EMPTY(event
->manager
->events
)) {
161 /* only clean up the queue from the process that created it */
162 if (event
->manager
->pid
== getpid_cached()) {
163 r
= unlink("/run/udev/queue");
165 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
172 static void worker_free(struct worker
*worker
) {
176 assert(worker
->manager
);
178 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
179 udev_monitor_unref(worker
->monitor
);
180 event_free(worker
->event
);
185 static void manager_workers_free(Manager
*manager
) {
186 struct worker
*worker
;
191 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
194 manager
->workers
= hashmap_free(manager
->workers
);
197 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
198 _cleanup_free_
struct worker
*worker
= NULL
;
203 assert(worker_monitor
);
206 worker
= new0(struct worker
, 1);
210 worker
->manager
= manager
;
211 /* close monitor, but keep address around */
212 udev_monitor_disconnect(worker_monitor
);
213 worker
->monitor
= udev_monitor_ref(worker_monitor
);
216 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
220 r
= hashmap_put(manager
->workers
, PID_TO_PTR(pid
), worker
);
224 *ret
= TAKE_PTR(worker
);
229 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
230 struct event
*event
= userdata
;
233 assert(event
->worker
);
235 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
236 event
->worker
->state
= WORKER_KILLED
;
238 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
243 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
244 struct event
*event
= userdata
;
248 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
253 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
258 assert(worker
->manager
);
260 assert(!event
->worker
);
261 assert(!worker
->event
);
263 worker
->state
= WORKER_RUNNING
;
264 worker
->event
= event
;
265 event
->state
= EVENT_RUNNING
;
266 event
->worker
= worker
;
268 e
= worker
->manager
->event
;
270 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &usec
) >= 0);
272 (void) sd_event_add_time(e
, &event
->timeout_warning
, CLOCK_MONOTONIC
,
273 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
275 (void) sd_event_add_time(e
, &event
->timeout
, CLOCK_MONOTONIC
,
276 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
279 static void manager_free(Manager
*manager
) {
285 sd_event_source_unref(manager
->ctrl_event
);
286 sd_event_source_unref(manager
->uevent_event
);
287 sd_event_source_unref(manager
->inotify_event
);
288 sd_event_source_unref(manager
->kill_workers_event
);
290 sd_event_unref(manager
->event
);
291 manager_workers_free(manager
);
292 event_queue_cleanup(manager
, EVENT_UNDEF
);
294 udev_monitor_unref(manager
->monitor
);
295 udev_ctrl_unref(manager
->ctrl
);
296 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
298 hashmap_free_free_free(manager
->properties
);
299 udev_rules_unref(manager
->rules
);
301 safe_close(manager
->fd_inotify
);
302 safe_close_pair(manager
->worker_watch
);
307 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
309 static int worker_send_message(int fd
) {
310 struct worker_message message
= {};
312 return loop_write(fd
, &message
, sizeof(message
), false);
315 static bool shall_lock_device(struct udev_device
*dev
) {
318 if (!streq_ptr("block", udev_device_get_subsystem(dev
)))
321 sysname
= udev_device_get_sysname(dev
);
322 return !startswith(sysname
, "dm-") &&
323 !startswith(sysname
, "md") &&
324 !startswith(sysname
, "drbd");
327 static void worker_spawn(Manager
*manager
, struct event
*event
) {
328 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*worker_monitor
= NULL
;
332 /* listen for new events */
333 worker_monitor
= udev_monitor_new_from_netlink(NULL
, NULL
);
334 if (worker_monitor
== NULL
)
336 /* allow the main daemon netlink address to send devices to the worker */
337 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
338 r
= udev_monitor_enable_receiving(worker_monitor
);
340 log_error_errno(r
, "worker: could not enable receiving of device: %m");
345 _cleanup_(udev_device_unrefp
) struct udev_device
*dev
= NULL
;
346 _cleanup_(sd_netlink_unrefp
) sd_netlink
*rtnl
= NULL
;
348 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
349 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
350 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
353 /* take initial device from queue */
354 dev
= TAKE_PTR(event
->dev
);
356 unsetenv("NOTIFY_SOCKET");
358 manager_workers_free(manager
);
359 event_queue_cleanup(manager
, EVENT_UNDEF
);
361 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
362 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
363 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
364 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
366 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
367 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
368 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
369 manager
->kill_workers_event
= sd_event_source_unref(manager
->kill_workers_event
);
371 manager
->event
= sd_event_unref(manager
->event
);
374 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
376 r
= log_error_errno(errno
, "error creating signalfd %m");
379 ep_signal
.data
.fd
= fd_signal
;
381 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
382 ep_monitor
.data
.fd
= fd_monitor
;
384 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
386 r
= log_error_errno(errno
, "error creating epoll fd: %m");
390 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
391 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
392 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
396 /* Request TERM signal if parent exits.
397 Ignore error, not much we can do in that case. */
398 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
400 /* Reset OOM score, we only protect the main daemon. */
401 r
= set_oom_score_adjust(0);
403 log_debug_errno(r
, "Failed to reset OOM score, ignoring: %m");
406 _cleanup_(udev_event_freep
) struct udev_event
*udev_event
= NULL
;
411 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
412 udev_event
= udev_event_new(dev
->device
, arg_exec_delay
, rtnl
);
419 * Take a shared lock on the device node; this establishes
420 * a concept of device "ownership" to serialize device
421 * access. External processes holding an exclusive lock will
422 * cause udev to skip the event handling; in the case udev
423 * acquired the lock, the external process can block until
424 * udev has finished its event handling.
426 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
427 shall_lock_device(dev
)) {
428 struct udev_device
*d
= dev
;
430 if (streq_ptr("partition", udev_device_get_devtype(d
)))
431 d
= udev_device_get_parent(d
);
434 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
435 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
436 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
437 fd_lock
= safe_close(fd_lock
);
443 /* apply rules, create node, symlinks */
444 udev_event_execute_rules(udev_event
,
445 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
449 udev_event_execute_run(udev_event
,
450 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
453 /* in case rtnl was initialized */
454 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
456 /* apply/restore inotify watch */
457 if (udev_event
->inotify_watch
) {
458 udev_watch_begin(dev
->device
);
459 udev_device_update_db(dev
);
464 /* send processed event back to libudev listeners */
465 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
468 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
470 /* send udevd the result of the event execution */
471 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
473 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
474 udev_device_get_seqnum(dev
));
476 dev
= udev_device_unref(dev
);
478 /* wait for more device messages from main udevd, or term signal */
479 while (dev
== NULL
) {
480 struct epoll_event ev
[4];
484 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
488 r
= log_error_errno(errno
, "failed to poll: %m");
492 for (i
= 0; i
< fdcount
; i
++) {
493 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
494 dev
= udev_monitor_receive_device(worker_monitor
);
496 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
497 struct signalfd_siginfo fdsi
;
500 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
501 if (size
!= sizeof(struct signalfd_siginfo
))
503 switch (fdsi
.ssi_signo
) {
512 udev_device_unref(dev
);
513 manager_free(manager
);
515 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
518 event
->state
= EVENT_QUEUED
;
519 log_error_errno(errno
, "fork of child failed: %m");
523 struct worker
*worker
;
525 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
529 worker_attach_event(worker
, event
);
531 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
537 static void event_run(Manager
*manager
, struct event
*event
) {
538 struct worker
*worker
;
544 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
547 if (worker
->state
!= WORKER_IDLE
)
550 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
552 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
554 (void) kill(worker
->pid
, SIGKILL
);
555 worker
->state
= WORKER_KILLED
;
558 worker_attach_event(worker
, event
);
562 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
563 if (arg_children_max
> 1)
564 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
568 /* start new worker and pass initial device */
569 worker_spawn(manager
, event
);
572 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
579 /* only one process can add events to the queue */
580 if (manager
->pid
== 0)
581 manager
->pid
= getpid_cached();
583 assert(manager
->pid
== getpid_cached());
585 event
= new0(struct event
, 1);
589 event
->manager
= manager
;
591 event
->dev_kernel
= udev_device_shallow_clone(dev
);
592 udev_device_copy_properties(event
->dev_kernel
, dev
);
593 event
->seqnum
= udev_device_get_seqnum(dev
);
594 event
->devpath
= udev_device_get_devpath(dev
);
595 event
->devpath_len
= strlen(event
->devpath
);
596 event
->devpath_old
= udev_device_get_devpath_old(dev
);
597 event
->devnum
= udev_device_get_devnum(dev
);
598 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
599 event
->ifindex
= udev_device_get_ifindex(dev
);
601 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
602 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
604 event
->state
= EVENT_QUEUED
;
606 if (LIST_IS_EMPTY(manager
->events
)) {
607 r
= touch("/run/udev/queue");
609 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
612 LIST_APPEND(event
, manager
->events
, event
);
617 static void manager_kill_workers(Manager
*manager
) {
618 struct worker
*worker
;
623 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
624 if (worker
->state
== WORKER_KILLED
)
627 worker
->state
= WORKER_KILLED
;
628 (void) kill(worker
->pid
, SIGTERM
);
632 /* lookup event for identical, parent, child device */
633 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
634 struct event
*loop_event
;
637 /* check if queue contains events we depend on */
638 LIST_FOREACH(event
, loop_event
, manager
->events
) {
639 /* we already found a later event, earlier cannot block us, no need to check again */
640 if (loop_event
->seqnum
< event
->delaying_seqnum
)
643 /* event we checked earlier still exists, no need to check again */
644 if (loop_event
->seqnum
== event
->delaying_seqnum
)
647 /* found ourself, no later event can block us */
648 if (loop_event
->seqnum
>= event
->seqnum
)
651 /* check major/minor */
652 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
655 /* check network device ifindex */
656 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
659 /* check our old name */
660 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
661 event
->delaying_seqnum
= loop_event
->seqnum
;
665 /* compare devpath */
666 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
668 /* one devpath is contained in the other? */
669 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
672 /* identical device event found */
673 if (loop_event
->devpath_len
== event
->devpath_len
) {
674 /* devices names might have changed/swapped in the meantime */
675 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
677 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
679 event
->delaying_seqnum
= loop_event
->seqnum
;
683 /* parent device event found */
684 if (event
->devpath
[common
] == '/') {
685 event
->delaying_seqnum
= loop_event
->seqnum
;
689 /* child device event found */
690 if (loop_event
->devpath
[common
] == '/') {
691 event
->delaying_seqnum
= loop_event
->seqnum
;
695 /* no matching device */
702 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
703 Manager
*manager
= userdata
;
707 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
709 sd_event_exit(manager
->event
, -ETIMEDOUT
);
714 static void manager_exit(Manager
*manager
) {
720 manager
->exit
= true;
724 "STATUS=Starting shutdown...");
726 /* close sources of new events and discard buffered events */
727 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
728 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
730 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
731 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
733 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
734 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
736 /* discard queued events and kill workers */
737 event_queue_cleanup(manager
, EVENT_QUEUED
);
738 manager_kill_workers(manager
);
740 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
742 r
= sd_event_add_time(manager
->event
, NULL
, CLOCK_MONOTONIC
,
743 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
748 /* reload requested, HUP signal received, rules changed, builtin changed */
749 static void manager_reload(Manager
*manager
) {
755 "STATUS=Flushing configuration...");
757 manager_kill_workers(manager
);
758 manager
->rules
= udev_rules_unref(manager
->rules
);
763 "STATUS=Processing with %u children at max", arg_children_max
);
766 static int on_kill_workers_event(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
767 Manager
*manager
= userdata
;
771 log_debug("Cleanup idle workers");
772 manager_kill_workers(manager
);
777 static int manager_enable_kill_workers_event(Manager
*manager
) {
782 if (!manager
->kill_workers_event
)
785 r
= sd_event_source_get_enabled(manager
->kill_workers_event
, &enabled
);
787 log_debug_errno(r
, "Failed to query whether event source for killing idle workers is enabled or not, trying to create new event source: %m");
788 manager
->kill_workers_event
= sd_event_source_unref(manager
->kill_workers_event
);
792 if (enabled
== SD_EVENT_ONESHOT
)
795 r
= sd_event_source_set_time(manager
->kill_workers_event
, now(CLOCK_MONOTONIC
) + 3 * USEC_PER_SEC
);
797 log_debug_errno(r
, "Failed to set time to event source for killing idle workers, trying to create new event source: %m");
798 manager
->kill_workers_event
= sd_event_source_unref(manager
->kill_workers_event
);
802 r
= sd_event_source_set_enabled(manager
->kill_workers_event
, SD_EVENT_ONESHOT
);
804 log_debug_errno(r
, "Failed to enable event source for killing idle workers, trying to create new event source: %m");
805 manager
->kill_workers_event
= sd_event_source_unref(manager
->kill_workers_event
);
812 r
= sd_event_add_time(manager
->event
, &manager
->kill_workers_event
, CLOCK_MONOTONIC
,
813 now(CLOCK_MONOTONIC
) + 3 * USEC_PER_SEC
, USEC_PER_SEC
, on_kill_workers_event
, manager
);
815 return log_warning_errno(r
, "Failed to create timer event for killing idle workers: %m");
820 static int manager_disable_kill_workers_event(Manager
*manager
) {
823 if (!manager
->kill_workers_event
)
826 r
= sd_event_source_set_enabled(manager
->kill_workers_event
, SD_EVENT_OFF
);
828 return log_warning_errno(r
, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
833 static void event_queue_start(Manager
*manager
) {
839 if (LIST_IS_EMPTY(manager
->events
) ||
840 manager
->exit
|| manager
->stop_exec_queue
)
843 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
844 /* check for changed config, every 3 seconds at most */
845 if (manager
->last_usec
== 0 ||
846 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
847 if (udev_rules_check_timestamp(manager
->rules
) ||
848 udev_builtin_validate())
849 manager_reload(manager
);
851 manager
->last_usec
= usec
;
854 (void) manager_disable_kill_workers_event(manager
);
858 if (!manager
->rules
) {
859 manager
->rules
= udev_rules_new(arg_resolve_names
);
864 LIST_FOREACH(event
,event
,manager
->events
) {
865 if (event
->state
!= EVENT_QUEUED
)
868 /* do not start event if parent or child event is still running */
869 if (is_devpath_busy(manager
, event
))
872 event_run(manager
, event
);
876 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
877 struct event
*event
, *tmp
;
879 LIST_FOREACH_SAFE(event
, event
, tmp
, manager
->events
) {
880 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
887 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
888 Manager
*manager
= userdata
;
893 struct worker_message msg
;
894 struct iovec iovec
= {
896 .iov_len
= sizeof(msg
),
899 struct cmsghdr cmsghdr
;
900 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
902 struct msghdr msghdr
= {
905 .msg_control
= &control
,
906 .msg_controllen
= sizeof(control
),
908 struct cmsghdr
*cmsg
;
910 struct ucred
*ucred
= NULL
;
911 struct worker
*worker
;
913 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
917 else if (errno
== EAGAIN
)
918 /* nothing more to read */
921 return log_error_errno(errno
, "failed to receive message: %m");
922 } else if (size
!= sizeof(struct worker_message
)) {
923 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
927 CMSG_FOREACH(cmsg
, &msghdr
) {
928 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
929 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
930 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
931 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
934 if (!ucred
|| ucred
->pid
<= 0) {
935 log_warning_errno(EIO
, "ignoring worker message without valid PID");
939 /* lookup worker who sent the signal */
940 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
942 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
946 if (worker
->state
!= WORKER_KILLED
)
947 worker
->state
= WORKER_IDLE
;
949 /* worker returned */
950 event_free(worker
->event
);
953 /* we have free workers, try to schedule events */
954 event_queue_start(manager
);
959 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
960 Manager
*manager
= userdata
;
961 struct udev_device
*dev
;
966 dev
= udev_monitor_receive_device(manager
->monitor
);
968 udev_device_ensure_usec_initialized(dev
, NULL
);
969 r
= event_queue_insert(manager
, dev
);
971 udev_device_unref(dev
);
973 /* we have fresh events, try to schedule them */
974 event_queue_start(manager
);
980 /* receive the udevd message from userspace */
981 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
982 Manager
*manager
= userdata
;
983 _cleanup_(udev_ctrl_connection_unrefp
) struct udev_ctrl_connection
*ctrl_conn
= NULL
;
984 _cleanup_(udev_ctrl_msg_unrefp
) struct udev_ctrl_msg
*ctrl_msg
= NULL
;
990 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
994 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
998 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
1000 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
1001 log_set_max_level(i
);
1002 manager_kill_workers(manager
);
1005 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
1006 log_debug("udevd message (STOP_EXEC_QUEUE) received");
1007 manager
->stop_exec_queue
= true;
1010 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
1011 log_debug("udevd message (START_EXEC_QUEUE) received");
1012 manager
->stop_exec_queue
= false;
1013 event_queue_start(manager
);
1016 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
1017 log_debug("udevd message (RELOAD) received");
1018 manager_reload(manager
);
1021 str
= udev_ctrl_get_set_env(ctrl_msg
);
1023 _cleanup_free_
char *key
= NULL
, *val
= NULL
, *old_key
= NULL
, *old_val
= NULL
;
1026 eq
= strchr(str
, '=');
1028 log_error("Invalid key format '%s'", str
);
1032 key
= strndup(str
, eq
- str
);
1038 old_val
= hashmap_remove2(manager
->properties
, key
, (void **) &old_key
);
1040 r
= hashmap_ensure_allocated(&manager
->properties
, &string_hash_ops
);
1048 log_debug("udevd message (ENV) received, unset '%s'", key
);
1050 r
= hashmap_put(manager
->properties
, key
, NULL
);
1062 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
1064 r
= hashmap_put(manager
->properties
, key
, val
);
1072 manager_kill_workers(manager
);
1075 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
1077 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
1078 arg_children_max
= i
;
1080 (void) sd_notifyf(false,
1082 "STATUS=Processing with %u children at max", arg_children_max
);
1085 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1086 log_debug("udevd message (SYNC) received");
1088 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1089 log_debug("udevd message (EXIT) received");
1090 manager_exit(manager
);
1091 /* keep reference to block the client until we exit
1092 TODO: deal with several blocking exit requests */
1093 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1099 static int synthesize_change(sd_device
*dev
) {
1100 const char *subsystem
, *sysname
, *devname
, *syspath
, *devtype
;
1101 char filename
[PATH_MAX
];
1104 r
= sd_device_get_subsystem(dev
, &subsystem
);
1108 r
= sd_device_get_sysname(dev
, &sysname
);
1112 r
= sd_device_get_devname(dev
, &devname
);
1116 r
= sd_device_get_syspath(dev
, &syspath
);
1120 r
= sd_device_get_devtype(dev
, &devtype
);
1124 if (streq_ptr("block", subsystem
) &&
1125 streq_ptr("disk", devtype
) &&
1126 !startswith(sysname
, "dm-")) {
1127 _cleanup_(sd_device_enumerator_unrefp
) sd_device_enumerator
*e
= NULL
;
1128 bool part_table_read
= false, has_partitions
= false;
1133 * Try to re-read the partition table. This only succeeds if
1134 * none of the devices is busy. The kernel returns 0 if no
1135 * partition table is found, and we will not get an event for
1138 fd
= open(devname
, O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1140 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1142 r
= ioctl(fd
, BLKRRPART
, 0);
1146 part_table_read
= true;
1149 /* search for partitions */
1150 r
= sd_device_enumerator_new(&e
);
1154 r
= sd_device_enumerator_allow_uninitialized(e
);
1158 r
= sd_device_enumerator_add_match_parent(e
, dev
);
1162 r
= sd_device_enumerator_add_match_subsystem(e
, "block", true);
1166 FOREACH_DEVICE(e
, d
) {
1169 if (sd_device_get_devtype(d
, &t
) < 0 ||
1170 !streq("partition", t
))
1173 has_partitions
= true;
1178 * We have partitions and re-read the table, the kernel already sent
1179 * out a "change" event for the disk, and "remove/add" for all
1182 if (part_table_read
&& has_partitions
)
1186 * We have partitions but re-reading the partition table did not
1187 * work, synthesize "change" for the disk and all partitions.
1189 log_debug("Device '%s' is closed, synthesising 'change'", devname
);
1190 strscpyl(filename
, sizeof(filename
), syspath
, "/uevent", NULL
);
1191 write_string_file(filename
, "change", WRITE_STRING_FILE_DISABLE_BUFFER
);
1193 FOREACH_DEVICE(e
, d
) {
1194 const char *t
, *n
, *s
;
1196 if (sd_device_get_devtype(d
, &t
) < 0 ||
1197 !streq("partition", t
))
1200 if (sd_device_get_devname(d
, &n
) < 0 ||
1201 sd_device_get_syspath(d
, &s
) < 0)
1204 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname
, n
);
1205 strscpyl(filename
, sizeof(filename
), s
, "/uevent", NULL
);
1206 write_string_file(filename
, "change", WRITE_STRING_FILE_DISABLE_BUFFER
);
1212 log_debug("Device %s is closed, synthesising 'change'", devname
);
1213 strscpyl(filename
, sizeof(filename
), syspath
, "/uevent", NULL
);
1214 write_string_file(filename
, "change", WRITE_STRING_FILE_DISABLE_BUFFER
);
1219 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1220 Manager
*manager
= userdata
;
1221 union inotify_event_buffer buffer
;
1222 struct inotify_event
*e
;
1227 (void) manager_disable_kill_workers_event(manager
);
1229 l
= read(fd
, &buffer
, sizeof(buffer
));
1231 if (IN_SET(errno
, EAGAIN
, EINTR
))
1234 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1237 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1238 _cleanup_(sd_device_unrefp
) sd_device
*dev
= NULL
;
1239 const char *devnode
;
1241 if (udev_watch_lookup(e
->wd
, &dev
) <= 0)
1244 if (sd_device_get_devname(dev
, &devnode
) < 0)
1247 log_device_debug(dev
, "Inotify event: %x for %s", e
->mask
, devnode
);
1248 if (e
->mask
& IN_CLOSE_WRITE
)
1249 synthesize_change(dev
);
1250 else if (e
->mask
& IN_IGNORED
)
1251 udev_watch_end(dev
);
1257 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1258 Manager
*manager
= userdata
;
1262 manager_exit(manager
);
1267 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1268 Manager
*manager
= userdata
;
1272 manager_reload(manager
);
1277 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1278 Manager
*manager
= userdata
;
1285 struct worker
*worker
;
1287 pid
= waitpid(-1, &status
, WNOHANG
);
1291 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1293 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1297 if (WIFEXITED(status
)) {
1298 if (WEXITSTATUS(status
) == 0)
1299 log_debug("worker ["PID_FMT
"] exited", pid
);
1301 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1302 } else if (WIFSIGNALED(status
)) {
1303 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), signal_to_string(WTERMSIG(status
)));
1304 } else if (WIFSTOPPED(status
)) {
1305 log_info("worker ["PID_FMT
"] stopped", pid
);
1307 } else if (WIFCONTINUED(status
)) {
1308 log_info("worker ["PID_FMT
"] continued", pid
);
1311 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1313 if ((!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) && worker
->event
) {
1314 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1315 /* delete state from disk */
1316 udev_device_delete_db(worker
->event
->dev
);
1317 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1318 /* forward kernel event without amending it */
1319 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1322 worker_free(worker
);
1325 /* we can start new workers, try to schedule events */
1326 event_queue_start(manager
);
1328 /* Disable unnecessary cleanup event */
1329 if (hashmap_isempty(manager
->workers
) && manager
->kill_workers_event
)
1330 (void) sd_event_source_set_enabled(manager
->kill_workers_event
, SD_EVENT_OFF
);
1335 static int on_post(sd_event_source
*s
, void *userdata
) {
1336 Manager
*manager
= userdata
;
1340 if (!LIST_IS_EMPTY(manager
->events
))
1343 /* There are no pending events. Let's cleanup idle process. */
1345 if (!hashmap_isempty(manager
->workers
)) {
1346 /* There are idle workers */
1347 (void) manager_enable_kill_workers_event(manager
);
1351 /* There are no idle workers. */
1354 return sd_event_exit(manager
->event
, 0);
1356 if (manager
->cgroup
)
1357 /* cleanup possible left-over processes in our cgroup */
1358 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, CGROUP_IGNORE_SELF
, NULL
, NULL
, NULL
);
1363 static int listen_fds(int *rctrl
, int *rnetlink
) {
1364 int ctrl_fd
= -1, netlink_fd
= -1;
1370 n
= sd_listen_fds(true);
1374 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1375 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1382 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1383 if (netlink_fd
>= 0)
1393 _cleanup_(udev_ctrl_unrefp
) struct udev_ctrl
*ctrl
= NULL
;
1395 ctrl
= udev_ctrl_new();
1397 return log_error_errno(EINVAL
, "error initializing udev control socket");
1399 r
= udev_ctrl_enable_receiving(ctrl
);
1401 return log_error_errno(EINVAL
, "error binding udev control socket");
1403 fd
= udev_ctrl_get_fd(ctrl
);
1405 return log_error_errno(EIO
, "could not get ctrl fd");
1407 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1409 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1412 if (netlink_fd
< 0) {
1413 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*monitor
= NULL
;
1415 monitor
= udev_monitor_new_from_netlink(NULL
, "kernel");
1417 return log_error_errno(EINVAL
, "error initializing netlink socket");
1419 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1421 r
= udev_monitor_enable_receiving(monitor
);
1423 return log_error_errno(EINVAL
, "error binding netlink socket");
1425 fd
= udev_monitor_get_fd(monitor
);
1427 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1429 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1431 return log_error_errno(errno
, "could not dup netlink fd: %m");
1435 *rnetlink
= netlink_fd
;
1441 * read the kernel command line, in case we need to get into debug mode
1442 * udev.log_priority=<level> syslog priority
1443 * udev.children_max=<number of workers> events are fully serialized if set to 1
1444 * udev.exec_delay=<number of seconds> delay execution of every executed program
1445 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1447 static int parse_proc_cmdline_item(const char *key
, const char *value
, void *data
) {
1455 if (proc_cmdline_key_streq(key
, "udev.log_priority")) {
1457 if (proc_cmdline_value_missing(key
, value
))
1460 r
= util_log_priority(value
);
1462 log_set_max_level(r
);
1464 } else if (proc_cmdline_key_streq(key
, "udev.event_timeout")) {
1466 if (proc_cmdline_value_missing(key
, value
))
1469 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1471 arg_event_timeout_usec
*= USEC_PER_SEC
;
1472 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1475 } else if (proc_cmdline_key_streq(key
, "udev.children_max")) {
1477 if (proc_cmdline_value_missing(key
, value
))
1480 r
= safe_atou(value
, &arg_children_max
);
1482 } else if (proc_cmdline_key_streq(key
, "udev.exec_delay")) {
1484 if (proc_cmdline_value_missing(key
, value
))
1487 r
= safe_atoi(value
, &arg_exec_delay
);
1489 } else if (startswith(key
, "udev."))
1490 log_warning("Unknown udev kernel command line option \"%s\"", key
);
1493 log_warning_errno(r
, "Failed to parse \"%s=%s\", ignoring: %m", key
, value
);
1498 static int help(void) {
1499 _cleanup_free_
char *link
= NULL
;
1502 r
= terminal_urlify_man("systemd-udevd.service", "8", &link
);
1506 printf("%s [OPTIONS...]\n\n"
1507 "Manages devices.\n\n"
1508 " -h --help Print this message\n"
1509 " -V --version Print version of the program\n"
1510 " -d --daemon Detach and run in the background\n"
1511 " -D --debug Enable debug output\n"
1512 " -c --children-max=INT Set maximum number of workers\n"
1513 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1514 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1515 " -N --resolve-names=early|late|never\n"
1516 " When to resolve users and groups\n"
1517 "\nSee the %s for details.\n"
1518 , program_invocation_short_name
1525 static int parse_argv(int argc
, char *argv
[]) {
1526 static const struct option options
[] = {
1527 { "daemon", no_argument
, NULL
, 'd' },
1528 { "debug", no_argument
, NULL
, 'D' },
1529 { "children-max", required_argument
, NULL
, 'c' },
1530 { "exec-delay", required_argument
, NULL
, 'e' },
1531 { "event-timeout", required_argument
, NULL
, 't' },
1532 { "resolve-names", required_argument
, NULL
, 'N' },
1533 { "help", no_argument
, NULL
, 'h' },
1534 { "version", no_argument
, NULL
, 'V' },
1543 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1549 arg_daemonize
= true;
1552 r
= safe_atou(optarg
, &arg_children_max
);
1554 log_warning("Invalid --children-max ignored: %s", optarg
);
1557 r
= safe_atoi(optarg
, &arg_exec_delay
);
1559 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1562 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1564 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1566 arg_event_timeout_usec
*= USEC_PER_SEC
;
1567 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1574 if (streq(optarg
, "early")) {
1575 arg_resolve_names
= 1;
1576 } else if (streq(optarg
, "late")) {
1577 arg_resolve_names
= 0;
1578 } else if (streq(optarg
, "never")) {
1579 arg_resolve_names
= -1;
1581 log_error("resolve-names must be early, late or never");
1588 printf("%s\n", PACKAGE_VERSION
);
1593 assert_not_reached("Unhandled option");
1601 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1602 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1606 assert(fd_ctrl
>= 0);
1607 assert(fd_uevent
>= 0);
1609 manager
= new0(Manager
, 1);
1613 manager
->fd_inotify
= -1;
1614 manager
->worker_watch
[WRITE_END
] = -1;
1615 manager
->worker_watch
[READ_END
] = -1;
1617 udev_builtin_init();
1619 manager
->rules
= udev_rules_new(arg_resolve_names
);
1620 if (!manager
->rules
)
1621 return log_error_errno(ENOMEM
, "error reading rules");
1623 LIST_HEAD_INIT(manager
->events
);
1625 manager
->cgroup
= cgroup
;
1627 manager
->ctrl
= udev_ctrl_new_from_fd(fd_ctrl
);
1629 return log_error_errno(EINVAL
, "error taking over udev control socket");
1631 manager
->monitor
= udev_monitor_new_from_netlink_fd(NULL
, "kernel", fd_uevent
);
1632 if (!manager
->monitor
)
1633 return log_error_errno(EINVAL
, "error taking over netlink socket");
1635 /* unnamed socket from workers to the main daemon */
1636 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1638 return log_error_errno(errno
, "error creating socketpair: %m");
1640 fd_worker
= manager
->worker_watch
[READ_END
];
1642 r
= setsockopt_int(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, true);
1644 return log_error_errno(r
, "could not enable SO_PASSCRED: %m");
1646 r
= udev_watch_init();
1648 return log_error_errno(r
, "Failed to create inotify descriptor: %m");
1649 manager
->fd_inotify
= r
;
1651 udev_watch_restore();
1653 /* block and listen to all signals on signalfd */
1654 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1656 r
= sd_event_default(&manager
->event
);
1658 return log_error_errno(r
, "could not allocate event loop: %m");
1660 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1662 return log_error_errno(r
, "error creating sigint event source: %m");
1664 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1666 return log_error_errno(r
, "error creating sigterm event source: %m");
1668 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1670 return log_error_errno(r
, "error creating sighup event source: %m");
1672 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1674 return log_error_errno(r
, "error creating sigchld event source: %m");
1676 r
= sd_event_set_watchdog(manager
->event
, true);
1678 return log_error_errno(r
, "error creating watchdog event source: %m");
1680 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1682 return log_error_errno(r
, "error creating ctrl event source: %m");
1684 /* This needs to be after the inotify and uevent handling, to make sure
1685 * that the ping is send back after fully processing the pending uevents
1686 * (including the synthetic ones we may create due to inotify events).
1688 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1690 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1692 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1694 return log_error_errno(r
, "error creating inotify event source: %m");
1696 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1698 return log_error_errno(r
, "error creating uevent event source: %m");
1700 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1702 return log_error_errno(r
, "error creating worker event source: %m");
1704 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1706 return log_error_errno(r
, "error creating post event source: %m");
1708 *ret
= TAKE_PTR(manager
);
1713 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1714 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1717 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1719 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1723 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1725 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1727 (void) sd_notifyf(false,
1729 "STATUS=Processing with %u children at max", arg_children_max
);
1731 r
= sd_event_loop(manager
->event
);
1733 log_error_errno(r
, "event loop failed: %m");
1737 sd_event_get_exit_code(manager
->event
, &r
);
1742 "STATUS=Shutting down...");
1744 udev_ctrl_cleanup(manager
->ctrl
);
1748 int main(int argc
, char *argv
[]) {
1749 _cleanup_free_
char *cgroup
= NULL
;
1750 int fd_ctrl
= -1, fd_uevent
= -1;
1753 log_set_target(LOG_TARGET_AUTO
);
1754 udev_parse_config();
1755 log_parse_environment();
1758 r
= parse_argv(argc
, argv
);
1762 r
= proc_cmdline_parse(parse_proc_cmdline_item
, NULL
, PROC_CMDLINE_STRIP_RD_PREFIX
);
1764 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1767 log_set_target(LOG_TARGET_CONSOLE
);
1768 log_set_max_level(LOG_DEBUG
);
1771 log_set_max_level_realm(LOG_REALM_SYSTEMD
, log_get_max_level());
1777 if (arg_children_max
== 0) {
1779 unsigned long mem_limit
;
1781 arg_children_max
= 8;
1783 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1784 arg_children_max
+= CPU_COUNT(&cpu_set
) * 8;
1786 mem_limit
= physical_memory() / (128LU*1024*1024);
1787 arg_children_max
= MAX(10U, MIN(arg_children_max
, mem_limit
));
1789 log_debug("set children_max to %u", arg_children_max
);
1792 /* set umask before creating any file/directory */
1795 r
= log_error_errno(errno
, "could not change dir to /: %m");
1801 r
= mac_selinux_init();
1803 log_error_errno(r
, "could not initialize labelling: %m");
1807 r
= mkdir_errno_wrapper("/run/udev", 0755);
1808 if (r
< 0 && r
!= -EEXIST
) {
1809 log_error_errno(r
, "could not create /run/udev: %m");
1813 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1815 if (getppid() == 1) {
1816 /* get our own cgroup, we regularly kill everything udev has left behind
1817 we only do this on systemd systems, and only if we are directly spawned
1818 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1819 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1821 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
1822 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1824 log_warning_errno(r
, "failed to get cgroup: %m");
1828 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1830 r
= log_error_errno(r
, "could not listen on fds: %m");
1834 if (arg_daemonize
) {
1837 log_info("starting version " PACKAGE_VERSION
);
1839 /* connect /dev/null to stdin, stdout, stderr */
1840 if (log_get_max_level() < LOG_DEBUG
) {
1841 r
= make_null_stdio();
1843 log_warning_errno(r
, "Failed to redirect standard streams to /dev/null: %m");
1851 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1854 mac_selinux_finish();
1856 _exit(EXIT_SUCCESS
);
1861 r
= set_oom_score_adjust(-1000);
1863 log_debug_errno(r
, "Failed to adjust OOM score, ignoring: %m");
1866 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1869 mac_selinux_finish();
1871 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;