2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
33 #include <sys/prctl.h>
34 #include <sys/socket.h>
35 #include <sys/signalfd.h>
36 #include <sys/epoll.h>
37 #include <sys/mount.h>
40 #include <sys/ioctl.h>
41 #include <sys/inotify.h>
43 #include "sd-daemon.h"
44 #include "rtnl-util.h"
45 #include "cgroup-util.h"
46 #include "process-util.h"
47 #include "dev-setup.h"
49 #include "selinux-util.h"
51 #include "udev-util.h"
52 #include "formats-util.h"
55 static bool arg_debug
= false;
56 static int arg_daemonize
= false;
57 static int arg_resolve_names
= 1;
58 static unsigned arg_children_max
;
59 static int arg_exec_delay
;
60 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
61 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
63 typedef struct Manager
{
66 struct udev_list_node events
;
68 pid_t pid
; /* the process that originally allocated the manager object */
69 sigset_t sigmask_orig
;
71 struct udev_rules
*rules
;
72 struct udev_list properties
;
74 struct udev_monitor
*monitor
;
75 struct udev_ctrl
*ctrl
;
76 struct udev_ctrl_connection
*ctrl_conn_blocking
;
86 bool stop_exec_queue
:1;
98 struct udev_list_node node
;
101 struct udev_device
*dev
;
102 struct udev_device
*dev_kernel
;
103 struct worker
*worker
;
104 enum event_state state
;
105 unsigned long long int delaying_seqnum
;
106 unsigned long long int seqnum
;
109 const char *devpath_old
;
117 static inline struct event
*node_to_event(struct udev_list_node
*node
) {
118 return container_of(node
, struct event
, node
);
121 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
132 struct udev_list_node node
;
135 struct udev_monitor
*monitor
;
136 enum worker_state state
;
140 /* passed from worker to main process */
141 struct worker_message
{
144 static void event_free(struct event
*event
) {
150 udev_list_node_remove(&event
->node
);
151 udev_device_unref(event
->dev
);
152 udev_device_unref(event
->dev_kernel
);
155 event
->worker
->event
= NULL
;
157 assert(event
->manager
);
159 if (udev_list_node_is_empty(&event
->manager
->events
)) {
160 /* only clean up the queue from the process that created it */
161 if (event
->manager
->pid
== getpid()) {
162 r
= unlink("/run/udev/queue");
164 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
171 static void worker_free(struct worker
*worker
) {
175 assert(worker
->manager
);
177 hashmap_remove(worker
->manager
->workers
, UINT_TO_PTR(worker
->pid
));
178 udev_monitor_unref(worker
->monitor
);
179 event_free(worker
->event
);
184 static void manager_workers_free(Manager
*manager
) {
185 struct worker
*worker
;
190 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
193 manager
->workers
= hashmap_free(manager
->workers
);
196 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
197 _cleanup_free_
struct worker
*worker
= NULL
;
202 assert(worker_monitor
);
205 worker
= new0(struct worker
, 1);
209 worker
->refcount
= 1;
210 worker
->manager
= manager
;
211 /* close monitor, but keep address around */
212 udev_monitor_disconnect(worker_monitor
);
213 worker
->monitor
= udev_monitor_ref(worker_monitor
);
216 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
220 r
= hashmap_put(manager
->workers
, UINT_TO_PTR(pid
), worker
);
230 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
231 struct event
*event
= userdata
;
234 assert(event
->worker
);
236 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
237 event
->worker
->state
= WORKER_KILLED
;
239 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
244 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
245 struct event
*event
= userdata
;
249 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
254 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
257 assert(!event
->worker
);
258 assert(!worker
->event
);
260 worker
->state
= WORKER_RUNNING
;
261 worker
->event
= event
;
262 event
->state
= EVENT_RUNNING
;
263 event
->start_usec
= now(CLOCK_MONOTONIC
);
264 event
->warned
= false;
265 event
->worker
= worker
;
268 static void manager_free(Manager
*manager
) {
272 udev_builtin_exit(manager
->udev
);
274 udev_unref(manager
->udev
);
275 manager_workers_free(manager
);
276 event_queue_cleanup(manager
, EVENT_UNDEF
);
278 udev_monitor_unref(manager
->monitor
);
279 udev_ctrl_unref(manager
->ctrl
);
280 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
282 udev_list_cleanup(&manager
->properties
);
283 udev_rules_unref(manager
->rules
);
284 free(manager
->cgroup
);
286 safe_close(manager
->fd_ep
);
287 safe_close(manager
->fd_signal
);
288 safe_close(manager
->fd_inotify
);
289 safe_close_pair(manager
->worker_watch
);
294 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
296 static int worker_send_message(int fd
) {
297 struct worker_message message
= {};
299 return loop_write(fd
, &message
, sizeof(message
), false);
302 static void worker_spawn(Manager
*manager
, struct event
*event
) {
303 struct udev
*udev
= event
->udev
;
304 _cleanup_udev_monitor_unref_
struct udev_monitor
*worker_monitor
= NULL
;
307 /* listen for new events */
308 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
309 if (worker_monitor
== NULL
)
311 /* allow the main daemon netlink address to send devices to the worker */
312 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
313 udev_monitor_enable_receiving(worker_monitor
);
318 struct udev_device
*dev
= NULL
;
319 _cleanup_rtnl_unref_ sd_rtnl
*rtnl
= NULL
;
321 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
322 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
323 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
327 /* take initial device from queue */
331 manager_workers_free(manager
);
332 event_queue_cleanup(manager
, EVENT_UNDEF
);
334 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
335 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
336 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
338 manager
->fd_ep
= safe_close(manager
->fd_ep
);
339 manager
->fd_signal
= safe_close(manager
->fd_signal
);
340 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
341 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
344 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
346 r
= log_error_errno(errno
, "error creating signalfd %m");
349 ep_signal
.data
.fd
= fd_signal
;
351 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
352 ep_monitor
.data
.fd
= fd_monitor
;
354 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
356 r
= log_error_errno(errno
, "error creating epoll fd: %m");
360 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
361 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
362 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
366 /* request TERM signal if parent exits */
367 prctl(PR_SET_PDEATHSIG
, SIGTERM
);
369 /* reset OOM score, we only protect the main daemon */
370 write_string_file("/proc/self/oom_score_adj", "0");
373 struct udev_event
*udev_event
;
376 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
377 udev_event
= udev_event_new(dev
);
378 if (udev_event
== NULL
) {
383 /* needed for SIGCHLD/SIGTERM in spawn() */
384 udev_event
->fd_signal
= fd_signal
;
386 if (arg_exec_delay
> 0)
387 udev_event
->exec_delay
= arg_exec_delay
;
390 * Take a shared lock on the device node; this establishes
391 * a concept of device "ownership" to serialize device
392 * access. External processes holding an exclusive lock will
393 * cause udev to skip the event handling; in the case udev
394 * acquired the lock, the external process can block until
395 * udev has finished its event handling.
397 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
398 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
399 !startswith(udev_device_get_sysname(dev
), "dm-") &&
400 !startswith(udev_device_get_sysname(dev
), "md")) {
401 struct udev_device
*d
= dev
;
403 if (streq_ptr("partition", udev_device_get_devtype(d
)))
404 d
= udev_device_get_parent(d
);
407 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
408 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
409 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
410 fd_lock
= safe_close(fd_lock
);
417 /* needed for renaming netifs */
418 udev_event
->rtnl
= rtnl
;
420 /* apply rules, create node, symlinks */
421 udev_event_execute_rules(udev_event
,
422 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
423 &manager
->properties
,
425 &manager
->sigmask_orig
);
427 udev_event_execute_run(udev_event
,
428 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
429 &manager
->sigmask_orig
);
431 if (udev_event
->rtnl
)
432 /* in case rtnl was initialized */
433 rtnl
= sd_rtnl_ref(udev_event
->rtnl
);
435 /* apply/restore inotify watch */
436 if (udev_event
->inotify_watch
) {
437 udev_watch_begin(udev
, dev
);
438 udev_device_update_db(dev
);
443 /* send processed event back to libudev listeners */
444 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
447 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
449 /* send udevd the result of the event execution */
450 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
452 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
453 udev_device_get_seqnum(dev
));
455 udev_device_unref(dev
);
458 if (udev_event
->sigterm
) {
459 udev_event_unref(udev_event
);
463 udev_event_unref(udev_event
);
465 /* wait for more device messages from main udevd, or term signal */
466 while (dev
== NULL
) {
467 struct epoll_event ev
[4];
471 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
475 r
= log_error_errno(errno
, "failed to poll: %m");
479 for (i
= 0; i
< fdcount
; i
++) {
480 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
481 dev
= udev_monitor_receive_device(worker_monitor
);
483 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
484 struct signalfd_siginfo fdsi
;
487 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
488 if (size
!= sizeof(struct signalfd_siginfo
))
490 switch (fdsi
.ssi_signo
) {
499 udev_device_unref(dev
);
500 manager_free(manager
);
502 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
505 event
->state
= EVENT_QUEUED
;
506 log_error_errno(errno
, "fork of child failed: %m");
510 struct worker
*worker
;
513 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
517 worker_attach_event(worker
, event
);
519 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
525 static void event_run(Manager
*manager
, struct event
*event
) {
526 struct worker
*worker
;
532 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
535 if (worker
->state
!= WORKER_IDLE
)
538 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
540 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
542 kill(worker
->pid
, SIGKILL
);
543 worker
->state
= WORKER_KILLED
;
546 worker_attach_event(worker
, event
);
550 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
551 if (arg_children_max
> 1)
552 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
556 /* start new worker and pass initial device */
557 worker_spawn(manager
, event
);
560 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
567 /* only one process can add events to the queue */
568 if (manager
->pid
== 0)
569 manager
->pid
= getpid();
571 assert(manager
->pid
== getpid());
573 event
= new0(struct event
, 1);
577 event
->udev
= udev_device_get_udev(dev
);
578 event
->manager
= manager
;
580 event
->dev_kernel
= udev_device_shallow_clone(dev
);
581 udev_device_copy_properties(event
->dev_kernel
, dev
);
582 event
->seqnum
= udev_device_get_seqnum(dev
);
583 event
->devpath
= udev_device_get_devpath(dev
);
584 event
->devpath_len
= strlen(event
->devpath
);
585 event
->devpath_old
= udev_device_get_devpath_old(dev
);
586 event
->devnum
= udev_device_get_devnum(dev
);
587 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
588 event
->ifindex
= udev_device_get_ifindex(dev
);
590 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
591 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
593 event
->state
= EVENT_QUEUED
;
595 if (udev_list_node_is_empty(&manager
->events
)) {
596 r
= touch("/run/udev/queue");
598 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
601 udev_list_node_append(&event
->node
, &manager
->events
);
606 static void manager_kill_workers(Manager
*manager
) {
607 struct worker
*worker
;
612 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
613 if (worker
->state
== WORKER_KILLED
)
616 worker
->state
= WORKER_KILLED
;
617 kill(worker
->pid
, SIGTERM
);
621 /* lookup event for identical, parent, child device */
622 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
623 struct udev_list_node
*loop
;
626 /* check if queue contains events we depend on */
627 udev_list_node_foreach(loop
, &manager
->events
) {
628 struct event
*loop_event
= node_to_event(loop
);
630 /* we already found a later event, earlier can not block us, no need to check again */
631 if (loop_event
->seqnum
< event
->delaying_seqnum
)
634 /* event we checked earlier still exists, no need to check again */
635 if (loop_event
->seqnum
== event
->delaying_seqnum
)
638 /* found ourself, no later event can block us */
639 if (loop_event
->seqnum
>= event
->seqnum
)
642 /* check major/minor */
643 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
646 /* check network device ifindex */
647 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
650 /* check our old name */
651 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
652 event
->delaying_seqnum
= loop_event
->seqnum
;
656 /* compare devpath */
657 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
659 /* one devpath is contained in the other? */
660 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
663 /* identical device event found */
664 if (loop_event
->devpath_len
== event
->devpath_len
) {
665 /* devices names might have changed/swapped in the meantime */
666 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
668 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
670 event
->delaying_seqnum
= loop_event
->seqnum
;
674 /* parent device event found */
675 if (event
->devpath
[common
] == '/') {
676 event
->delaying_seqnum
= loop_event
->seqnum
;
680 /* child device event found */
681 if (loop_event
->devpath
[common
] == '/') {
682 event
->delaying_seqnum
= loop_event
->seqnum
;
686 /* no matching device */
693 static void event_queue_start(Manager
*manager
) {
694 struct udev_list_node
*loop
;
698 udev_list_node_foreach(loop
, &manager
->events
) {
699 struct event
*event
= node_to_event(loop
);
701 if (event
->state
!= EVENT_QUEUED
)
704 /* do not start event if parent or child event is still running */
705 if (is_devpath_busy(manager
, event
))
708 event_run(manager
, event
);
712 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
713 struct udev_list_node
*loop
, *tmp
;
715 udev_list_node_foreach_safe(loop
, tmp
, &manager
->events
) {
716 struct event
*event
= node_to_event(loop
);
718 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
725 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
726 Manager
*manager
= userdata
;
731 struct worker_message msg
;
732 struct iovec iovec
= {
734 .iov_len
= sizeof(msg
),
737 struct cmsghdr cmsghdr
;
738 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
740 struct msghdr msghdr
= {
743 .msg_control
= &control
,
744 .msg_controllen
= sizeof(control
),
746 struct cmsghdr
*cmsg
;
748 struct ucred
*ucred
= NULL
;
749 struct worker
*worker
;
751 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
755 else if (errno
== EAGAIN
)
756 /* nothing more to read */
759 return log_error_errno(errno
, "failed to receive message: %m");
760 } else if (size
!= sizeof(struct worker_message
)) {
761 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
765 for (cmsg
= CMSG_FIRSTHDR(&msghdr
); cmsg
; cmsg
= CMSG_NXTHDR(&msghdr
, cmsg
)) {
766 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
767 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
768 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
769 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
772 if (!ucred
|| ucred
->pid
<= 0) {
773 log_warning_errno(EIO
, "ignoring worker message without valid PID");
777 /* lookup worker who sent the signal */
778 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(ucred
->pid
));
780 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
784 if (worker
->state
!= WORKER_KILLED
)
785 worker
->state
= WORKER_IDLE
;
787 /* worker returned */
788 event_free(worker
->event
);
794 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
795 Manager
*manager
= userdata
;
796 struct udev_device
*dev
;
801 dev
= udev_monitor_receive_device(manager
->monitor
);
803 udev_device_ensure_usec_initialized(dev
, NULL
);
804 r
= event_queue_insert(manager
, dev
);
806 udev_device_unref(dev
);
812 /* receive the udevd message from userspace */
813 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
814 Manager
*manager
= userdata
;
815 _cleanup_udev_ctrl_connection_unref_
struct udev_ctrl_connection
*ctrl_conn
= NULL
;
816 _cleanup_udev_ctrl_msg_unref_
struct udev_ctrl_msg
*ctrl_msg
= NULL
;
822 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
826 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
830 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
832 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
833 log_set_max_level(i
);
834 manager_kill_workers(manager
);
837 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
838 log_debug("udevd message (STOP_EXEC_QUEUE) received");
839 manager
->stop_exec_queue
= true;
842 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
843 log_debug("udevd message (START_EXEC_QUEUE) received");
844 manager
->stop_exec_queue
= false;
847 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
848 log_debug("udevd message (RELOAD) received");
849 manager
->reload
= true;
852 str
= udev_ctrl_get_set_env(ctrl_msg
);
854 _cleanup_free_
char *key
= NULL
;
860 val
= strchr(key
, '=');
864 if (val
[0] == '\0') {
865 log_debug("udevd message (ENV) received, unset '%s'", key
);
866 udev_list_entry_add(&manager
->properties
, key
, NULL
);
868 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
869 udev_list_entry_add(&manager
->properties
, key
, val
);
872 log_error("wrong key format '%s'", key
);
874 manager_kill_workers(manager
);
877 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
879 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
880 arg_children_max
= i
;
883 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
884 log_debug("udevd message (SYNC) received");
886 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
887 log_debug("udevd message (EXIT) received");
888 manager
->exit
= true;
889 /* keep reference to block the client until we exit
890 TODO: deal with several blocking exit requests */
891 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
897 static int synthesize_change(struct udev_device
*dev
) {
898 char filename
[UTIL_PATH_SIZE
];
901 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
902 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
903 !startswith(udev_device_get_sysname(dev
), "dm-")) {
904 bool part_table_read
= false;
905 bool has_partitions
= false;
907 struct udev
*udev
= udev_device_get_udev(dev
);
908 _cleanup_udev_enumerate_unref_
struct udev_enumerate
*e
= NULL
;
909 struct udev_list_entry
*item
;
912 * Try to re-read the partition table. This only succeeds if
913 * none of the devices is busy. The kernel returns 0 if no
914 * partition table is found, and we will not get an event for
917 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
919 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
921 r
= ioctl(fd
, BLKRRPART
, 0);
925 part_table_read
= true;
928 /* search for partitions */
929 e
= udev_enumerate_new(udev
);
933 r
= udev_enumerate_add_match_parent(e
, dev
);
937 r
= udev_enumerate_add_match_subsystem(e
, "block");
941 r
= udev_enumerate_scan_devices(e
);
945 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
946 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
948 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
952 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
955 has_partitions
= true;
960 * We have partitions and re-read the table, the kernel already sent
961 * out a "change" event for the disk, and "remove/add" for all
964 if (part_table_read
&& has_partitions
)
968 * We have partitions but re-reading the partition table did not
969 * work, synthesize "change" for the disk and all partitions.
971 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
972 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
973 write_string_file(filename
, "change");
975 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
976 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
978 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
982 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
985 log_debug("device %s closed, synthesising partition '%s' 'change'",
986 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
987 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
988 write_string_file(filename
, "change");
994 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
995 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
996 write_string_file(filename
, "change");
1001 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1002 Manager
*manager
= userdata
;
1003 union inotify_event_buffer buffer
;
1004 struct inotify_event
*e
;
1009 l
= read(fd
, &buffer
, sizeof(buffer
));
1011 if (errno
== EAGAIN
|| errno
== EINTR
)
1014 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1017 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1018 _cleanup_udev_device_unref_
struct udev_device
*dev
= NULL
;
1020 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1024 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1025 if (e
->mask
& IN_CLOSE_WRITE
) {
1026 synthesize_change(dev
);
1028 /* settle might be waiting on us to determine the queue
1029 * state. If we just handled an inotify event, we might have
1030 * generated a "change" event, but we won't have queued up
1031 * the resultant uevent yet. Do that.
1033 on_uevent(NULL
, -1, 0, manager
);
1034 } else if (e
->mask
& IN_IGNORED
)
1035 udev_watch_end(manager
->udev
, dev
);
1041 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1042 Manager
*manager
= userdata
;
1046 manager
->exit
= true;
1051 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1052 Manager
*manager
= userdata
;
1056 manager
->reload
= true;
1061 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1062 Manager
*manager
= userdata
;
1069 struct worker
*worker
;
1071 pid
= waitpid(-1, &status
, WNOHANG
);
1075 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(pid
));
1077 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1081 if (WIFEXITED(status
)) {
1082 if (WEXITSTATUS(status
) == 0)
1083 log_debug("worker ["PID_FMT
"] exited", pid
);
1085 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1086 } else if (WIFSIGNALED(status
)) {
1087 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), strsignal(WTERMSIG(status
)));
1088 } else if (WIFSTOPPED(status
)) {
1089 log_info("worker ["PID_FMT
"] stopped", pid
);
1091 } else if (WIFCONTINUED(status
)) {
1092 log_info("worker ["PID_FMT
"] continued", pid
);
1095 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1097 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1098 if (worker
->event
) {
1099 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1100 /* delete state from disk */
1101 udev_device_delete_db(worker
->event
->dev
);
1102 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1103 /* forward kernel event without amending it */
1104 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1108 worker_free(worker
);
1114 static int systemd_fds(int *rctrl
, int *rnetlink
) {
1115 int ctrl
= -1, netlink
= -1;
1118 n
= sd_listen_fds(true);
1122 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1123 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1130 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1140 if (ctrl
< 0 || netlink
< 0)
1143 log_debug("ctrl=%i netlink=%i", ctrl
, netlink
);
1145 *rnetlink
= netlink
;
1150 * read the kernel command line, in case we need to get into debug mode
1151 * udev.log-priority=<level> syslog priority
1152 * udev.children-max=<number of workers> events are fully serialized if set to 1
1153 * udev.exec-delay=<number of seconds> delay execution of every executed program
1154 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
1156 static int parse_proc_cmdline_item(const char *key
, const char *value
) {
1164 if (startswith(key
, "rd."))
1165 key
+= strlen("rd.");
1167 if (startswith(key
, "udev."))
1168 key
+= strlen("udev.");
1172 if (streq(key
, "log-priority")) {
1175 prio
= util_log_priority(value
);
1176 log_set_max_level(prio
);
1177 } else if (streq(key
, "children-max")) {
1178 r
= safe_atou(value
, &arg_children_max
);
1180 log_warning("invalid udev.children-max ignored: %s", value
);
1181 } else if (streq(key
, "exec-delay")) {
1182 r
= safe_atoi(value
, &arg_exec_delay
);
1184 log_warning("invalid udev.exec-delay ignored: %s", value
);
1185 } else if (streq(key
, "event-timeout")) {
1186 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1188 log_warning("invalid udev.event-timeout ignored: %s", value
);
1190 arg_event_timeout_usec
*= USEC_PER_SEC
;
1191 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1198 static void help(void) {
1199 printf("%s [OPTIONS...]\n\n"
1200 "Manages devices.\n\n"
1201 " -h --help Print this message\n"
1202 " --version Print version of the program\n"
1203 " --daemon Detach and run in the background\n"
1204 " --debug Enable debug output\n"
1205 " --children-max=INT Set maximum number of workers\n"
1206 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1207 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1208 " --resolve-names=early|late|never\n"
1209 " When to resolve users and groups\n"
1210 , program_invocation_short_name
);
1213 static int parse_argv(int argc
, char *argv
[]) {
1214 static const struct option options
[] = {
1215 { "daemon", no_argument
, NULL
, 'd' },
1216 { "debug", no_argument
, NULL
, 'D' },
1217 { "children-max", required_argument
, NULL
, 'c' },
1218 { "exec-delay", required_argument
, NULL
, 'e' },
1219 { "event-timeout", required_argument
, NULL
, 't' },
1220 { "resolve-names", required_argument
, NULL
, 'N' },
1221 { "help", no_argument
, NULL
, 'h' },
1222 { "version", no_argument
, NULL
, 'V' },
1231 while ((c
= getopt_long(argc
, argv
, "c:de:DtN:hV", options
, NULL
)) >= 0) {
1237 arg_daemonize
= true;
1240 r
= safe_atou(optarg
, &arg_children_max
);
1242 log_warning("Invalid --children-max ignored: %s", optarg
);
1245 r
= safe_atoi(optarg
, &arg_exec_delay
);
1247 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1250 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1252 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1254 arg_event_timeout_usec
*= USEC_PER_SEC
;
1255 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1262 if (streq(optarg
, "early")) {
1263 arg_resolve_names
= 1;
1264 } else if (streq(optarg
, "late")) {
1265 arg_resolve_names
= 0;
1266 } else if (streq(optarg
, "never")) {
1267 arg_resolve_names
= -1;
1269 log_error("resolve-names must be early, late or never");
1277 printf("%s\n", VERSION
);
1282 assert_not_reached("Unhandled option");
1290 static int manager_new(Manager
**ret
) {
1291 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1292 struct epoll_event ep_ctrl
= { .events
= EPOLLIN
};
1293 struct epoll_event ep_inotify
= { .events
= EPOLLIN
};
1294 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
1295 struct epoll_event ep_netlink
= { .events
= EPOLLIN
};
1296 struct epoll_event ep_worker
= { .events
= EPOLLIN
};
1302 manager
= new0(Manager
, 1);
1306 manager
->fd_ep
= -1;
1307 manager
->fd_ctrl
= -1;
1308 manager
->fd_uevent
= -1;
1309 manager
->fd_signal
= -1;
1310 manager
->fd_inotify
= -1;
1311 manager
->worker_watch
[WRITE_END
] = -1;
1312 manager
->worker_watch
[READ_END
] = -1;
1314 manager
->udev
= udev_new();
1316 return log_error_errno(errno
, "could not allocate udev context: %m");
1318 udev_builtin_init(manager
->udev
);
1320 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1321 if (!manager
->rules
)
1322 return log_error_errno(ENOMEM
, "error reading rules");
1324 udev_list_node_init(&manager
->events
);
1325 udev_list_init(manager
->udev
, &manager
->properties
, true);
1327 r
= systemd_fds(&manager
->fd_ctrl
, &manager
->fd_uevent
);
1329 /* get control and netlink socket from systemd */
1330 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, manager
->fd_ctrl
);
1332 return log_error_errno(EINVAL
, "error taking over udev control socket");
1334 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", manager
->fd_uevent
);
1335 if (!manager
->monitor
)
1336 return log_error_errno(EINVAL
, "error taking over netlink socket");
1338 /* get our own cgroup, we regularly kill everything udev has left behind */
1339 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &manager
->cgroup
);
1341 log_warning_errno(r
, "failed to get cgroup: %m");
1343 /* open control and netlink socket */
1344 manager
->ctrl
= udev_ctrl_new(manager
->udev
);
1346 return log_error_errno(EINVAL
, "error initializing udev control socket");
1348 manager
->fd_ctrl
= udev_ctrl_get_fd(manager
->ctrl
);
1350 manager
->monitor
= udev_monitor_new_from_netlink(manager
->udev
, "kernel");
1351 if (!manager
->monitor
)
1352 return log_error_errno(EINVAL
, "error initializing netlink socket");
1354 manager
->fd_uevent
= udev_monitor_get_fd(manager
->monitor
);
1356 (void) udev_monitor_set_receive_buffer_size(manager
->monitor
, 128 * 1024 * 1024);
1359 r
= udev_monitor_enable_receiving(manager
->monitor
);
1361 return log_error_errno(EINVAL
, "error binding netlink socket");
1363 r
= udev_ctrl_enable_receiving(manager
->ctrl
);
1365 return log_error_errno(EINVAL
, "error binding udev control socket");
1367 /* unnamed socket from workers to the main daemon */
1368 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1370 return log_error_errno(errno
, "error creating socketpair: %m");
1372 manager
->fd_worker
= manager
->worker_watch
[READ_END
];
1374 r
= setsockopt(manager
->fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1376 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1378 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1379 if (manager
->fd_inotify
< 0)
1380 return log_error_errno(ENOMEM
, "error initializing inotify");
1382 udev_watch_restore(manager
->udev
);
1384 /* block and listen to all signals on signalfd */
1386 sigprocmask(SIG_SETMASK
, &mask
, &manager
->sigmask_orig
);
1387 manager
->fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
1388 if (manager
->fd_signal
< 0)
1389 return log_error_errno(errno
, "error creating signalfd");
1391 ep_ctrl
.data
.fd
= manager
->fd_ctrl
;
1392 ep_inotify
.data
.fd
= manager
->fd_inotify
;
1393 ep_signal
.data
.fd
= manager
->fd_signal
;
1394 ep_netlink
.data
.fd
= manager
->fd_uevent
;
1395 ep_worker
.data
.fd
= manager
->fd_worker
;
1397 manager
->fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
1398 if (manager
->fd_ep
< 0)
1399 return log_error_errno(errno
, "error creating epoll fd: %m");
1401 if (epoll_ctl(manager
->fd_ep
, EPOLL_CTL_ADD
, manager
->fd_ctrl
, &ep_ctrl
) < 0 ||
1402 epoll_ctl(manager
->fd_ep
, EPOLL_CTL_ADD
, manager
->fd_inotify
, &ep_inotify
) < 0 ||
1403 epoll_ctl(manager
->fd_ep
, EPOLL_CTL_ADD
, manager
->fd_signal
, &ep_signal
) < 0 ||
1404 epoll_ctl(manager
->fd_ep
, EPOLL_CTL_ADD
, manager
->fd_uevent
, &ep_netlink
) < 0 ||
1405 epoll_ctl(manager
->fd_ep
, EPOLL_CTL_ADD
, manager
->fd_worker
, &ep_worker
) < 0)
1406 return log_error_errno(errno
, "fail to add fds to epoll: %m");
1414 int main(int argc
, char *argv
[]) {
1415 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1418 log_set_target(LOG_TARGET_AUTO
);
1419 log_parse_environment();
1422 r
= parse_argv(argc
, argv
);
1426 r
= parse_proc_cmdline(parse_proc_cmdline_item
);
1428 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1431 log_set_max_level(LOG_DEBUG
);
1433 if (getuid() != 0) {
1434 r
= log_error_errno(EPERM
, "root privileges required");
1438 if (arg_children_max
== 0) {
1441 arg_children_max
= 8;
1443 if (sched_getaffinity(0, sizeof (cpu_set
), &cpu_set
) == 0) {
1444 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1447 log_debug("set children_max to %u", arg_children_max
);
1450 /* before opening new files, make sure std{in,out,err} fds are in a sane state */
1451 if (arg_daemonize
) {
1454 fd
= open("/dev/null", O_RDWR
);
1456 log_error("cannot open /dev/null");
1458 if (write(STDOUT_FILENO
, 0, 0) < 0)
1459 dup2(fd
, STDOUT_FILENO
);
1460 if (write(STDERR_FILENO
, 0, 0) < 0)
1461 dup2(fd
, STDERR_FILENO
);
1462 if (fd
> STDERR_FILENO
)
1467 /* set umask before creating any file/directory */
1470 r
= log_error_errno(errno
, "could not change dir to /: %m");
1476 r
= mac_selinux_init("/dev");
1478 log_error_errno(r
, "could not initialize labelling: %m");
1482 r
= mkdir("/run/udev", 0755);
1483 if (r
< 0 && errno
!= EEXIST
) {
1484 r
= log_error_errno(errno
, "could not create /run/udev: %m");
1488 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1490 r
= manager_new(&manager
);
1494 log_info("starting version " VERSION
);
1496 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1498 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1500 if (arg_daemonize
) {
1508 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1511 mac_selinux_finish();
1513 _exit(EXIT_SUCCESS
);
1518 write_string_file("/proc/self/oom_score_adj", "-1000");
1520 sd_notify(1, "READY=1");
1523 static usec_t last_usec
;
1524 struct epoll_event ev
[8];
1527 bool is_worker
, is_signal
, is_inotify
, is_uevent
, is_ctrl
;
1530 if (manager
->exit
) {
1531 /* close sources of new events and discard buffered events */
1532 if (manager
->fd_ctrl
>= 0) {
1533 epoll_ctl(manager
->fd_ep
, EPOLL_CTL_DEL
, manager
->fd_ctrl
, NULL
);
1534 manager
->fd_ctrl
= safe_close(manager
->fd_ctrl
);
1537 if (manager
->monitor
) {
1538 epoll_ctl(manager
->fd_ep
, EPOLL_CTL_DEL
, manager
->fd_uevent
, NULL
);
1539 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
1542 if (manager
->fd_inotify
>= 0) {
1543 epoll_ctl(manager
->fd_ep
, EPOLL_CTL_DEL
, manager
->fd_inotify
, NULL
);
1544 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
1547 /* discard queued events and kill workers */
1548 event_queue_cleanup(manager
, EVENT_QUEUED
);
1549 manager_kill_workers(manager
);
1551 /* exit after all has cleaned up */
1552 if (udev_list_node_is_empty(&manager
->events
) && hashmap_isempty(manager
->workers
))
1555 /* timeout at exit for workers to finish */
1556 timeout
= 30 * MSEC_PER_SEC
;
1557 } else if (udev_list_node_is_empty(&manager
->events
) && hashmap_isempty(manager
->workers
)) {
1561 /* cleanup possible left-over processes in our cgroup */
1562 if (manager
->cgroup
)
1563 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, false, true, NULL
);
1565 /* kill idle or hanging workers */
1566 timeout
= 3 * MSEC_PER_SEC
;
1569 fdcount
= epoll_wait(manager
->fd_ep
, ev
, ELEMENTSOF(ev
), timeout
);
1574 struct worker
*worker
;
1578 if (manager
->exit
) {
1579 log_error("timeout, giving up waiting for workers to finish");
1583 /* kill idle workers */
1584 if (udev_list_node_is_empty(&manager
->events
)) {
1585 log_debug("cleanup idle workers");
1586 manager_kill_workers(manager
);
1589 /* check for hanging events */
1590 HASHMAP_FOREACH(worker
, manager
->workers
, j
) {
1591 struct event
*event
= worker
->event
;
1594 if (worker
->state
!= WORKER_RUNNING
)
1599 ts
= now(CLOCK_MONOTONIC
);
1601 if ((ts
- event
->start_usec
) > arg_event_timeout_warn_usec
) {
1602 if ((ts
- event
->start_usec
) > arg_event_timeout_usec
)
1603 on_event_timeout(NULL
, 0, event
);
1604 else if (!event
->warned
) {
1605 on_event_timeout_warning(NULL
, 0, event
);
1606 event
->warned
= true;
1613 is_worker
= is_signal
= is_inotify
= is_uevent
= is_ctrl
= false;
1614 for (i
= 0; i
< fdcount
; i
++) {
1615 if (ev
[i
].data
.fd
== manager
->fd_worker
&& ev
[i
].events
& EPOLLIN
)
1617 else if (ev
[i
].data
.fd
== manager
->fd_uevent
&& ev
[i
].events
& EPOLLIN
)
1619 else if (ev
[i
].data
.fd
== manager
->fd_signal
&& ev
[i
].events
& EPOLLIN
)
1621 else if (ev
[i
].data
.fd
== manager
->fd_inotify
&& ev
[i
].events
& EPOLLIN
)
1623 else if (ev
[i
].data
.fd
== manager
->fd_ctrl
&& ev
[i
].events
& EPOLLIN
)
1627 /* check for changed config, every 3 seconds at most */
1628 if ((now(CLOCK_MONOTONIC
) - last_usec
) > 3 * USEC_PER_SEC
) {
1629 if (udev_rules_check_timestamp(manager
->rules
))
1630 manager
->reload
= true;
1631 if (udev_builtin_validate(manager
->udev
))
1632 manager
->reload
= true;
1634 last_usec
= now(CLOCK_MONOTONIC
);
1637 /* reload requested, HUP signal received, rules changed, builtin changed */
1638 if (manager
->reload
) {
1639 manager_kill_workers(manager
);
1640 manager
->rules
= udev_rules_unref(manager
->rules
);
1641 udev_builtin_exit(manager
->udev
);
1642 manager
->reload
= false;
1645 /* event has finished */
1647 on_worker(NULL
, manager
->fd_worker
, 0, manager
);
1649 /* uevent from kernel */
1651 on_uevent(NULL
, manager
->fd_uevent
, 0, manager
);
1653 /* start new events */
1654 if (!udev_list_node_is_empty(&manager
->events
) && !manager
->exit
&& !manager
->stop_exec_queue
) {
1655 udev_builtin_init(manager
->udev
);
1656 if (!manager
->rules
)
1657 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1659 event_queue_start(manager
);
1663 struct signalfd_siginfo fdsi
;
1666 size
= read(manager
->fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
1667 if (size
== sizeof(struct signalfd_siginfo
)) {
1668 switch (fdsi
.ssi_signo
) {
1671 on_sigterm(NULL
, &fdsi
, manager
);
1674 on_sighup(NULL
, &fdsi
, manager
);
1677 on_sigchld(NULL
, &fdsi
, manager
);
1683 /* we are shutting down, the events below are not handled anymore */
1687 /* device node watch */
1689 on_inotify(NULL
, manager
->fd_inotify
, 0, manager
);
1692 * This needs to be after the inotify handling, to make sure,
1693 * that the ping is send back after the possibly generated
1694 * "change" events by the inotify device node watch.
1697 on_ctrl_msg(NULL
, manager
->fd_ctrl
, 0, manager
);
1702 udev_ctrl_cleanup(manager
->ctrl
);
1703 mac_selinux_finish();
1705 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;