2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
33 #include <sys/prctl.h>
34 #include <sys/socket.h>
35 #include <sys/signalfd.h>
36 #include <sys/epoll.h>
37 #include <sys/mount.h>
40 #include <sys/ioctl.h>
41 #include <sys/inotify.h>
43 #include "sd-daemon.h"
46 #include "signal-util.h"
47 #include "event-util.h"
48 #include "rtnl-util.h"
49 #include "cgroup-util.h"
50 #include "process-util.h"
51 #include "dev-setup.h"
53 #include "selinux-util.h"
55 #include "udev-util.h"
56 #include "formats-util.h"
59 static bool arg_debug
= false;
60 static int arg_daemonize
= false;
61 static int arg_resolve_names
= 1;
62 static unsigned arg_children_max
;
63 static int arg_exec_delay
;
64 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
65 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
67 typedef struct Manager
{
71 struct udev_list_node events
;
73 pid_t pid
; /* the process that originally allocated the manager object */
75 struct udev_rules
*rules
;
76 struct udev_list properties
;
78 struct udev_monitor
*monitor
;
79 struct udev_ctrl
*ctrl
;
80 struct udev_ctrl_connection
*ctrl_conn_blocking
;
84 sd_event_source
*ctrl_event
;
85 sd_event_source
*uevent_event
;
86 sd_event_source
*inotify_event
;
90 bool stop_exec_queue
:1;
101 struct udev_list_node node
;
104 struct udev_device
*dev
;
105 struct udev_device
*dev_kernel
;
106 struct worker
*worker
;
107 enum event_state state
;
108 unsigned long long int delaying_seqnum
;
109 unsigned long long int seqnum
;
112 const char *devpath_old
;
116 sd_event_source
*timeout_warning
;
117 sd_event_source
*timeout
;
120 static inline struct event
*node_to_event(struct udev_list_node
*node
) {
121 return container_of(node
, struct event
, node
);
124 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
135 struct udev_list_node node
;
138 struct udev_monitor
*monitor
;
139 enum worker_state state
;
143 /* passed from worker to main process */
144 struct worker_message
{
147 static void event_free(struct event
*event
) {
153 udev_list_node_remove(&event
->node
);
154 udev_device_unref(event
->dev
);
155 udev_device_unref(event
->dev_kernel
);
157 sd_event_source_unref(event
->timeout_warning
);
158 sd_event_source_unref(event
->timeout
);
161 event
->worker
->event
= NULL
;
163 assert(event
->manager
);
165 if (udev_list_node_is_empty(&event
->manager
->events
)) {
166 /* only clean up the queue from the process that created it */
167 if (event
->manager
->pid
== getpid()) {
168 r
= unlink("/run/udev/queue");
170 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
177 static void worker_free(struct worker
*worker
) {
181 assert(worker
->manager
);
183 hashmap_remove(worker
->manager
->workers
, UINT_TO_PTR(worker
->pid
));
184 udev_monitor_unref(worker
->monitor
);
185 event_free(worker
->event
);
190 static void manager_workers_free(Manager
*manager
) {
191 struct worker
*worker
;
196 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
199 manager
->workers
= hashmap_free(manager
->workers
);
202 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
203 _cleanup_free_
struct worker
*worker
= NULL
;
208 assert(worker_monitor
);
211 worker
= new0(struct worker
, 1);
215 worker
->refcount
= 1;
216 worker
->manager
= manager
;
217 /* close monitor, but keep address around */
218 udev_monitor_disconnect(worker_monitor
);
219 worker
->monitor
= udev_monitor_ref(worker_monitor
);
222 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
226 r
= hashmap_put(manager
->workers
, UINT_TO_PTR(pid
), worker
);
236 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
237 struct event
*event
= userdata
;
240 assert(event
->worker
);
242 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
243 event
->worker
->state
= WORKER_KILLED
;
245 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
250 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
251 struct event
*event
= userdata
;
255 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
260 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
266 assert(worker
->manager
);
268 assert(!event
->worker
);
269 assert(!worker
->event
);
271 worker
->state
= WORKER_RUNNING
;
272 worker
->event
= event
;
273 event
->state
= EVENT_RUNNING
;
274 event
->worker
= worker
;
276 e
= worker
->manager
->event
;
278 r
= sd_event_now(e
, clock_boottime_or_monotonic(), &usec
);
282 (void) sd_event_add_time(e
, &event
->timeout_warning
, clock_boottime_or_monotonic(),
283 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
285 (void) sd_event_add_time(e
, &event
->timeout
, clock_boottime_or_monotonic(),
286 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
289 static void manager_free(Manager
*manager
) {
293 udev_builtin_exit(manager
->udev
);
295 sd_event_source_unref(manager
->ctrl_event
);
296 sd_event_source_unref(manager
->uevent_event
);
297 sd_event_source_unref(manager
->inotify_event
);
299 udev_unref(manager
->udev
);
300 sd_event_unref(manager
->event
);
301 manager_workers_free(manager
);
302 event_queue_cleanup(manager
, EVENT_UNDEF
);
304 udev_monitor_unref(manager
->monitor
);
305 udev_ctrl_unref(manager
->ctrl
);
306 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
308 udev_list_cleanup(&manager
->properties
);
309 udev_rules_unref(manager
->rules
);
311 safe_close(manager
->fd_inotify
);
312 safe_close_pair(manager
->worker_watch
);
317 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
319 static int worker_send_message(int fd
) {
320 struct worker_message message
= {};
322 return loop_write(fd
, &message
, sizeof(message
), false);
325 static void worker_spawn(Manager
*manager
, struct event
*event
) {
326 struct udev
*udev
= event
->udev
;
327 _cleanup_udev_monitor_unref_
struct udev_monitor
*worker_monitor
= NULL
;
330 /* listen for new events */
331 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
332 if (worker_monitor
== NULL
)
334 /* allow the main daemon netlink address to send devices to the worker */
335 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
336 udev_monitor_enable_receiving(worker_monitor
);
341 struct udev_device
*dev
= NULL
;
342 _cleanup_rtnl_unref_ sd_rtnl
*rtnl
= NULL
;
344 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
345 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
346 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
350 /* take initial device from queue */
354 unsetenv("NOTIFY_SOCKET");
356 manager_workers_free(manager
);
357 event_queue_cleanup(manager
, EVENT_UNDEF
);
359 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
360 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
361 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
362 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
363 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
365 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
366 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
367 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
369 manager
->event
= sd_event_unref(manager
->event
);
372 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
374 r
= log_error_errno(errno
, "error creating signalfd %m");
377 ep_signal
.data
.fd
= fd_signal
;
379 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
380 ep_monitor
.data
.fd
= fd_monitor
;
382 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
384 r
= log_error_errno(errno
, "error creating epoll fd: %m");
388 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
389 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
390 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
394 /* request TERM signal if parent exits */
395 prctl(PR_SET_PDEATHSIG
, SIGTERM
);
397 /* reset OOM score, we only protect the main daemon */
398 write_string_file("/proc/self/oom_score_adj", "0");
401 struct udev_event
*udev_event
;
406 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
407 udev_event
= udev_event_new(dev
);
408 if (udev_event
== NULL
) {
413 if (arg_exec_delay
> 0)
414 udev_event
->exec_delay
= arg_exec_delay
;
417 * Take a shared lock on the device node; this establishes
418 * a concept of device "ownership" to serialize device
419 * access. External processes holding an exclusive lock will
420 * cause udev to skip the event handling; in the case udev
421 * acquired the lock, the external process can block until
422 * udev has finished its event handling.
424 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
425 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
426 !startswith(udev_device_get_sysname(dev
), "dm-") &&
427 !startswith(udev_device_get_sysname(dev
), "md")) {
428 struct udev_device
*d
= dev
;
430 if (streq_ptr("partition", udev_device_get_devtype(d
)))
431 d
= udev_device_get_parent(d
);
434 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
435 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
436 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
437 fd_lock
= safe_close(fd_lock
);
444 /* needed for renaming netifs */
445 udev_event
->rtnl
= rtnl
;
447 /* apply rules, create node, symlinks */
448 udev_event_execute_rules(udev_event
,
449 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
450 &manager
->properties
,
453 udev_event_execute_run(udev_event
,
454 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
456 if (udev_event
->rtnl
)
457 /* in case rtnl was initialized */
458 rtnl
= sd_rtnl_ref(udev_event
->rtnl
);
460 /* apply/restore inotify watch */
461 if (udev_event
->inotify_watch
) {
462 udev_watch_begin(udev
, dev
);
463 udev_device_update_db(dev
);
468 /* send processed event back to libudev listeners */
469 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
472 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
474 /* send udevd the result of the event execution */
475 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
477 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
478 udev_device_get_seqnum(dev
));
480 udev_device_unref(dev
);
483 udev_event_unref(udev_event
);
485 /* wait for more device messages from main udevd, or term signal */
486 while (dev
== NULL
) {
487 struct epoll_event ev
[4];
491 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
495 r
= log_error_errno(errno
, "failed to poll: %m");
499 for (i
= 0; i
< fdcount
; i
++) {
500 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
501 dev
= udev_monitor_receive_device(worker_monitor
);
503 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
504 struct signalfd_siginfo fdsi
;
507 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
508 if (size
!= sizeof(struct signalfd_siginfo
))
510 switch (fdsi
.ssi_signo
) {
519 udev_device_unref(dev
);
520 manager_free(manager
);
522 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
525 event
->state
= EVENT_QUEUED
;
526 log_error_errno(errno
, "fork of child failed: %m");
530 struct worker
*worker
;
533 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
537 worker_attach_event(worker
, event
);
539 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
545 static void event_run(Manager
*manager
, struct event
*event
) {
546 struct worker
*worker
;
552 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
555 if (worker
->state
!= WORKER_IDLE
)
558 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
560 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
562 kill(worker
->pid
, SIGKILL
);
563 worker
->state
= WORKER_KILLED
;
566 worker_attach_event(worker
, event
);
570 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
571 if (arg_children_max
> 1)
572 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
576 /* start new worker and pass initial device */
577 worker_spawn(manager
, event
);
580 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
587 /* only one process can add events to the queue */
588 if (manager
->pid
== 0)
589 manager
->pid
= getpid();
591 assert(manager
->pid
== getpid());
593 event
= new0(struct event
, 1);
597 event
->udev
= udev_device_get_udev(dev
);
598 event
->manager
= manager
;
600 event
->dev_kernel
= udev_device_shallow_clone(dev
);
601 udev_device_copy_properties(event
->dev_kernel
, dev
);
602 event
->seqnum
= udev_device_get_seqnum(dev
);
603 event
->devpath
= udev_device_get_devpath(dev
);
604 event
->devpath_len
= strlen(event
->devpath
);
605 event
->devpath_old
= udev_device_get_devpath_old(dev
);
606 event
->devnum
= udev_device_get_devnum(dev
);
607 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
608 event
->ifindex
= udev_device_get_ifindex(dev
);
610 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
611 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
613 event
->state
= EVENT_QUEUED
;
615 if (udev_list_node_is_empty(&manager
->events
)) {
616 r
= touch("/run/udev/queue");
618 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
621 udev_list_node_append(&event
->node
, &manager
->events
);
626 static void manager_kill_workers(Manager
*manager
) {
627 struct worker
*worker
;
632 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
633 if (worker
->state
== WORKER_KILLED
)
636 worker
->state
= WORKER_KILLED
;
637 kill(worker
->pid
, SIGTERM
);
641 /* lookup event for identical, parent, child device */
642 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
643 struct udev_list_node
*loop
;
646 /* check if queue contains events we depend on */
647 udev_list_node_foreach(loop
, &manager
->events
) {
648 struct event
*loop_event
= node_to_event(loop
);
650 /* we already found a later event, earlier can not block us, no need to check again */
651 if (loop_event
->seqnum
< event
->delaying_seqnum
)
654 /* event we checked earlier still exists, no need to check again */
655 if (loop_event
->seqnum
== event
->delaying_seqnum
)
658 /* found ourself, no later event can block us */
659 if (loop_event
->seqnum
>= event
->seqnum
)
662 /* check major/minor */
663 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
666 /* check network device ifindex */
667 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
670 /* check our old name */
671 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
672 event
->delaying_seqnum
= loop_event
->seqnum
;
676 /* compare devpath */
677 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
679 /* one devpath is contained in the other? */
680 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
683 /* identical device event found */
684 if (loop_event
->devpath_len
== event
->devpath_len
) {
685 /* devices names might have changed/swapped in the meantime */
686 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
688 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
690 event
->delaying_seqnum
= loop_event
->seqnum
;
694 /* parent device event found */
695 if (event
->devpath
[common
] == '/') {
696 event
->delaying_seqnum
= loop_event
->seqnum
;
700 /* child device event found */
701 if (loop_event
->devpath
[common
] == '/') {
702 event
->delaying_seqnum
= loop_event
->seqnum
;
706 /* no matching device */
713 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
714 Manager
*manager
= userdata
;
718 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
720 sd_event_exit(manager
->event
, -ETIMEDOUT
);
725 static void manager_exit(Manager
*manager
) {
731 manager
->exit
= true;
735 "STATUS=Starting shutdown...");
737 /* close sources of new events and discard buffered events */
738 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
739 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
741 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
742 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
744 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
745 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
747 /* discard queued events and kill workers */
748 event_queue_cleanup(manager
, EVENT_QUEUED
);
749 manager_kill_workers(manager
);
751 r
= sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
);
755 r
= sd_event_add_time(manager
->event
, NULL
, clock_boottime_or_monotonic(),
756 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
761 /* reload requested, HUP signal received, rules changed, builtin changed */
762 static void manager_reload(Manager
*manager
) {
768 "STATUS=Flushing configuration...");
770 manager_kill_workers(manager
);
771 manager
->rules
= udev_rules_unref(manager
->rules
);
772 udev_builtin_exit(manager
->udev
);
776 "STATUS=Processing...");
779 static void event_queue_start(Manager
*manager
) {
780 struct udev_list_node
*loop
;
786 if (udev_list_node_is_empty(&manager
->events
) ||
787 manager
->exit
|| manager
->stop_exec_queue
)
790 r
= sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
);
792 /* check for changed config, every 3 seconds at most */
793 if (manager
->last_usec
== 0 ||
794 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
795 if (udev_rules_check_timestamp(manager
->rules
) ||
796 udev_builtin_validate(manager
->udev
))
797 manager_reload(manager
);
799 manager
->last_usec
= usec
;
803 udev_builtin_init(manager
->udev
);
805 if (!manager
->rules
) {
806 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
811 udev_list_node_foreach(loop
, &manager
->events
) {
812 struct event
*event
= node_to_event(loop
);
814 if (event
->state
!= EVENT_QUEUED
)
817 /* do not start event if parent or child event is still running */
818 if (is_devpath_busy(manager
, event
))
821 event_run(manager
, event
);
825 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
826 struct udev_list_node
*loop
, *tmp
;
828 udev_list_node_foreach_safe(loop
, tmp
, &manager
->events
) {
829 struct event
*event
= node_to_event(loop
);
831 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
838 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
839 Manager
*manager
= userdata
;
844 struct worker_message msg
;
845 struct iovec iovec
= {
847 .iov_len
= sizeof(msg
),
850 struct cmsghdr cmsghdr
;
851 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
853 struct msghdr msghdr
= {
856 .msg_control
= &control
,
857 .msg_controllen
= sizeof(control
),
859 struct cmsghdr
*cmsg
;
861 struct ucred
*ucred
= NULL
;
862 struct worker
*worker
;
864 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
868 else if (errno
== EAGAIN
)
869 /* nothing more to read */
872 return log_error_errno(errno
, "failed to receive message: %m");
873 } else if (size
!= sizeof(struct worker_message
)) {
874 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
878 for (cmsg
= CMSG_FIRSTHDR(&msghdr
); cmsg
; cmsg
= CMSG_NXTHDR(&msghdr
, cmsg
)) {
879 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
880 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
881 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
882 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
885 if (!ucred
|| ucred
->pid
<= 0) {
886 log_warning_errno(EIO
, "ignoring worker message without valid PID");
890 /* lookup worker who sent the signal */
891 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(ucred
->pid
));
893 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
897 if (worker
->state
!= WORKER_KILLED
)
898 worker
->state
= WORKER_IDLE
;
900 /* worker returned */
901 event_free(worker
->event
);
904 /* we have free workers, try to schedule events */
905 event_queue_start(manager
);
910 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
911 Manager
*manager
= userdata
;
912 struct udev_device
*dev
;
917 dev
= udev_monitor_receive_device(manager
->monitor
);
919 udev_device_ensure_usec_initialized(dev
, NULL
);
920 r
= event_queue_insert(manager
, dev
);
922 udev_device_unref(dev
);
924 /* we have fresh events, try to schedule them */
925 event_queue_start(manager
);
931 /* receive the udevd message from userspace */
932 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
933 Manager
*manager
= userdata
;
934 _cleanup_udev_ctrl_connection_unref_
struct udev_ctrl_connection
*ctrl_conn
= NULL
;
935 _cleanup_udev_ctrl_msg_unref_
struct udev_ctrl_msg
*ctrl_msg
= NULL
;
941 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
945 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
949 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
951 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
952 log_set_max_level(i
);
953 manager_kill_workers(manager
);
956 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
957 log_debug("udevd message (STOP_EXEC_QUEUE) received");
958 manager
->stop_exec_queue
= true;
961 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
962 log_debug("udevd message (START_EXEC_QUEUE) received");
963 manager
->stop_exec_queue
= false;
964 event_queue_start(manager
);
967 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
968 log_debug("udevd message (RELOAD) received");
969 manager_reload(manager
);
972 str
= udev_ctrl_get_set_env(ctrl_msg
);
974 _cleanup_free_
char *key
= NULL
;
980 val
= strchr(key
, '=');
984 if (val
[0] == '\0') {
985 log_debug("udevd message (ENV) received, unset '%s'", key
);
986 udev_list_entry_add(&manager
->properties
, key
, NULL
);
988 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
989 udev_list_entry_add(&manager
->properties
, key
, val
);
992 log_error("wrong key format '%s'", key
);
994 manager_kill_workers(manager
);
997 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
999 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
1000 arg_children_max
= i
;
1003 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1004 log_debug("udevd message (SYNC) received");
1006 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1007 log_debug("udevd message (EXIT) received");
1008 manager_exit(manager
);
1009 /* keep reference to block the client until we exit
1010 TODO: deal with several blocking exit requests */
1011 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1017 static int synthesize_change(struct udev_device
*dev
) {
1018 char filename
[UTIL_PATH_SIZE
];
1021 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1022 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1023 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1024 bool part_table_read
= false;
1025 bool has_partitions
= false;
1027 struct udev
*udev
= udev_device_get_udev(dev
);
1028 _cleanup_udev_enumerate_unref_
struct udev_enumerate
*e
= NULL
;
1029 struct udev_list_entry
*item
;
1032 * Try to re-read the partition table. This only succeeds if
1033 * none of the devices is busy. The kernel returns 0 if no
1034 * partition table is found, and we will not get an event for
1037 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1039 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1041 r
= ioctl(fd
, BLKRRPART
, 0);
1045 part_table_read
= true;
1048 /* search for partitions */
1049 e
= udev_enumerate_new(udev
);
1053 r
= udev_enumerate_add_match_parent(e
, dev
);
1057 r
= udev_enumerate_add_match_subsystem(e
, "block");
1061 r
= udev_enumerate_scan_devices(e
);
1065 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1066 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1068 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1072 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1075 has_partitions
= true;
1080 * We have partitions and re-read the table, the kernel already sent
1081 * out a "change" event for the disk, and "remove/add" for all
1084 if (part_table_read
&& has_partitions
)
1088 * We have partitions but re-reading the partition table did not
1089 * work, synthesize "change" for the disk and all partitions.
1091 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1092 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1093 write_string_file(filename
, "change");
1095 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1096 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1098 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1102 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1105 log_debug("device %s closed, synthesising partition '%s' 'change'",
1106 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1107 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1108 write_string_file(filename
, "change");
1114 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1115 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1116 write_string_file(filename
, "change");
1121 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1122 Manager
*manager
= userdata
;
1123 union inotify_event_buffer buffer
;
1124 struct inotify_event
*e
;
1129 l
= read(fd
, &buffer
, sizeof(buffer
));
1131 if (errno
== EAGAIN
|| errno
== EINTR
)
1134 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1137 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1138 _cleanup_udev_device_unref_
struct udev_device
*dev
= NULL
;
1140 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1144 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1145 if (e
->mask
& IN_CLOSE_WRITE
) {
1146 synthesize_change(dev
);
1148 /* settle might be waiting on us to determine the queue
1149 * state. If we just handled an inotify event, we might have
1150 * generated a "change" event, but we won't have queued up
1151 * the resultant uevent yet. Do that.
1153 on_uevent(NULL
, -1, 0, manager
);
1154 } else if (e
->mask
& IN_IGNORED
)
1155 udev_watch_end(manager
->udev
, dev
);
1161 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1162 Manager
*manager
= userdata
;
1166 manager_exit(manager
);
1171 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1172 Manager
*manager
= userdata
;
1176 manager_reload(manager
);
1181 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1182 Manager
*manager
= userdata
;
1189 struct worker
*worker
;
1191 pid
= waitpid(-1, &status
, WNOHANG
);
1195 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(pid
));
1197 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1201 if (WIFEXITED(status
)) {
1202 if (WEXITSTATUS(status
) == 0)
1203 log_debug("worker ["PID_FMT
"] exited", pid
);
1205 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1206 } else if (WIFSIGNALED(status
)) {
1207 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), strsignal(WTERMSIG(status
)));
1208 } else if (WIFSTOPPED(status
)) {
1209 log_info("worker ["PID_FMT
"] stopped", pid
);
1211 } else if (WIFCONTINUED(status
)) {
1212 log_info("worker ["PID_FMT
"] continued", pid
);
1215 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1217 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1218 if (worker
->event
) {
1219 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1220 /* delete state from disk */
1221 udev_device_delete_db(worker
->event
->dev
);
1222 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1223 /* forward kernel event without amending it */
1224 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1228 worker_free(worker
);
1231 /* we can start new workers, try to schedule events */
1232 event_queue_start(manager
);
1237 static int on_post(sd_event_source
*s
, void *userdata
) {
1238 Manager
*manager
= userdata
;
1243 if (udev_list_node_is_empty(&manager
->events
)) {
1244 /* no pending events */
1245 if (!hashmap_isempty(manager
->workers
)) {
1246 /* there are idle workers */
1247 log_debug("cleanup idle workers");
1248 manager_kill_workers(manager
);
1251 if (manager
->exit
) {
1252 r
= sd_event_exit(manager
->event
, 0);
1255 } else if (manager
->cgroup
)
1256 /* cleanup possible left-over processes in our cgroup */
1257 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, false, true, NULL
);
1264 static int listen_fds(int *rctrl
, int *rnetlink
) {
1265 _cleanup_udev_unref_
struct udev
*udev
= NULL
;
1266 int ctrl_fd
= -1, netlink_fd
= -1;
1272 n
= sd_listen_fds(true);
1276 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1277 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1284 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1285 if (netlink_fd
>= 0)
1295 _cleanup_udev_ctrl_unref_
struct udev_ctrl
*ctrl
= NULL
;
1301 ctrl
= udev_ctrl_new(udev
);
1303 return log_error_errno(EINVAL
, "error initializing udev control socket");
1305 r
= udev_ctrl_enable_receiving(ctrl
);
1307 return log_error_errno(EINVAL
, "error binding udev control socket");
1309 fd
= udev_ctrl_get_fd(ctrl
);
1311 return log_error_errno(EIO
, "could not get ctrl fd");
1313 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1315 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1318 if (netlink_fd
< 0) {
1319 _cleanup_udev_monitor_unref_
struct udev_monitor
*monitor
= NULL
;
1327 monitor
= udev_monitor_new_from_netlink(udev
, "kernel");
1329 return log_error_errno(EINVAL
, "error initializing netlink socket");
1331 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1333 r
= udev_monitor_enable_receiving(monitor
);
1335 return log_error_errno(EINVAL
, "error binding netlink socket");
1337 fd
= udev_monitor_get_fd(monitor
);
1339 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1341 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1343 return log_error_errno(errno
, "could not dup netlink fd: %m");
1347 *rnetlink
= netlink_fd
;
1353 * read the kernel command line, in case we need to get into debug mode
1354 * udev.log-priority=<level> syslog priority
1355 * udev.children-max=<number of workers> events are fully serialized if set to 1
1356 * udev.exec-delay=<number of seconds> delay execution of every executed program
1357 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
1359 static int parse_proc_cmdline_item(const char *key
, const char *value
) {
1367 if (startswith(key
, "rd."))
1368 key
+= strlen("rd.");
1370 if (startswith(key
, "udev."))
1371 key
+= strlen("udev.");
1375 if (streq(key
, "log-priority")) {
1378 prio
= util_log_priority(value
);
1379 log_set_max_level(prio
);
1380 } else if (streq(key
, "children-max")) {
1381 r
= safe_atou(value
, &arg_children_max
);
1383 log_warning("invalid udev.children-max ignored: %s", value
);
1384 } else if (streq(key
, "exec-delay")) {
1385 r
= safe_atoi(value
, &arg_exec_delay
);
1387 log_warning("invalid udev.exec-delay ignored: %s", value
);
1388 } else if (streq(key
, "event-timeout")) {
1389 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1391 log_warning("invalid udev.event-timeout ignored: %s", value
);
1393 arg_event_timeout_usec
*= USEC_PER_SEC
;
1394 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1401 static void help(void) {
1402 printf("%s [OPTIONS...]\n\n"
1403 "Manages devices.\n\n"
1404 " -h --help Print this message\n"
1405 " --version Print version of the program\n"
1406 " --daemon Detach and run in the background\n"
1407 " --debug Enable debug output\n"
1408 " --children-max=INT Set maximum number of workers\n"
1409 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1410 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1411 " --resolve-names=early|late|never\n"
1412 " When to resolve users and groups\n"
1413 , program_invocation_short_name
);
1416 static int parse_argv(int argc
, char *argv
[]) {
1417 static const struct option options
[] = {
1418 { "daemon", no_argument
, NULL
, 'd' },
1419 { "debug", no_argument
, NULL
, 'D' },
1420 { "children-max", required_argument
, NULL
, 'c' },
1421 { "exec-delay", required_argument
, NULL
, 'e' },
1422 { "event-timeout", required_argument
, NULL
, 't' },
1423 { "resolve-names", required_argument
, NULL
, 'N' },
1424 { "help", no_argument
, NULL
, 'h' },
1425 { "version", no_argument
, NULL
, 'V' },
1434 while ((c
= getopt_long(argc
, argv
, "c:de:DtN:hV", options
, NULL
)) >= 0) {
1440 arg_daemonize
= true;
1443 r
= safe_atou(optarg
, &arg_children_max
);
1445 log_warning("Invalid --children-max ignored: %s", optarg
);
1448 r
= safe_atoi(optarg
, &arg_exec_delay
);
1450 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1453 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1455 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1457 arg_event_timeout_usec
*= USEC_PER_SEC
;
1458 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1465 if (streq(optarg
, "early")) {
1466 arg_resolve_names
= 1;
1467 } else if (streq(optarg
, "late")) {
1468 arg_resolve_names
= 0;
1469 } else if (streq(optarg
, "never")) {
1470 arg_resolve_names
= -1;
1472 log_error("resolve-names must be early, late or never");
1480 printf("%s\n", VERSION
);
1485 assert_not_reached("Unhandled option");
1493 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1494 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1498 manager
= new0(Manager
, 1);
1502 manager
->fd_inotify
= -1;
1503 manager
->worker_watch
[WRITE_END
] = -1;
1504 manager
->worker_watch
[READ_END
] = -1;
1506 manager
->udev
= udev_new();
1508 return log_error_errno(errno
, "could not allocate udev context: %m");
1510 udev_builtin_init(manager
->udev
);
1512 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1513 if (!manager
->rules
)
1514 return log_error_errno(ENOMEM
, "error reading rules");
1516 udev_list_node_init(&manager
->events
);
1517 udev_list_init(manager
->udev
, &manager
->properties
, true);
1519 manager
->cgroup
= cgroup
;
1521 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, fd_ctrl
);
1523 return log_error_errno(EINVAL
, "error taking over udev control socket");
1525 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", fd_uevent
);
1526 if (!manager
->monitor
)
1527 return log_error_errno(EINVAL
, "error taking over netlink socket");
1535 static int manager_listen(Manager
*manager
) {
1536 int r
, fd_worker
, one
= 1;
1540 /* unnamed socket from workers to the main daemon */
1541 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1543 return log_error_errno(errno
, "error creating socketpair: %m");
1545 fd_worker
= manager
->worker_watch
[READ_END
];
1547 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1549 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1551 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1552 if (manager
->fd_inotify
< 0)
1553 return log_error_errno(ENOMEM
, "error initializing inotify");
1555 udev_watch_restore(manager
->udev
);
1557 /* block and listen to all signals on signalfd */
1558 assert_se(sigprocmask_many(SIG_BLOCK
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) == 0);
1560 r
= sd_event_default(&manager
->event
);
1562 return log_error_errno(errno
, "could not allocate event loop: %m");
1564 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1566 return log_error_errno(r
, "error creating sigint event source: %m");
1568 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1570 return log_error_errno(r
, "error creating sigterm event source: %m");
1572 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1574 return log_error_errno(r
, "error creating sighup event source: %m");
1576 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1578 return log_error_errno(r
, "error creating sigchld event source: %m");
1580 r
= sd_event_set_watchdog(manager
->event
, true);
1582 return log_error_errno(r
, "error creating watchdog event source: %m");
1584 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, udev_ctrl_get_fd(manager
->ctrl
), EPOLLIN
, on_ctrl_msg
, manager
);
1586 return log_error_errno(r
, "error creating ctrl event source: %m");
1588 /* This needs to be after the inotify and uevent handling, to make sure
1589 * that the ping is send back after fully processing the pending uevents
1590 * (including the synthetic ones we may create due to inotify events).
1592 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1594 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1596 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1598 return log_error_errno(r
, "error creating inotify event source: %m");
1600 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, udev_monitor_get_fd(manager
->monitor
), EPOLLIN
, on_uevent
, manager
);
1602 return log_error_errno(r
, "error creating uevent event source: %m");
1604 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1606 return log_error_errno(r
, "error creating worker event source: %m");
1608 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1610 return log_error_errno(r
, "error creating post event source: %m");
1615 int main(int argc
, char *argv
[]) {
1616 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1617 _cleanup_free_
char *cgroup
= NULL
;
1618 int r
, fd_ctrl
, fd_uevent
;
1620 log_set_target(LOG_TARGET_AUTO
);
1621 log_parse_environment();
1624 r
= parse_argv(argc
, argv
);
1628 r
= parse_proc_cmdline(parse_proc_cmdline_item
);
1630 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1633 log_set_max_level(LOG_DEBUG
);
1635 if (getuid() != 0) {
1636 r
= log_error_errno(EPERM
, "root privileges required");
1640 if (arg_children_max
== 0) {
1643 arg_children_max
= 8;
1645 if (sched_getaffinity(0, sizeof (cpu_set
), &cpu_set
) == 0) {
1646 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1649 log_debug("set children_max to %u", arg_children_max
);
1652 /* set umask before creating any file/directory */
1655 r
= log_error_errno(errno
, "could not change dir to /: %m");
1661 r
= mac_selinux_init("/dev");
1663 log_error_errno(r
, "could not initialize labelling: %m");
1667 r
= mkdir("/run/udev", 0755);
1668 if (r
< 0 && errno
!= EEXIST
) {
1669 r
= log_error_errno(errno
, "could not create /run/udev: %m");
1673 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1675 if (getppid() == 1) {
1676 /* get our own cgroup, we regularly kill everything udev has left behind
1677 we only do this on systemd systems, and only if we are directly spawned
1678 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1679 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1681 log_warning_errno(r
, "failed to get cgroup: %m");
1684 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1686 r
= log_error_errno(r
, "could not listen on fds: %m");
1690 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1694 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1696 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1698 if (arg_daemonize
) {
1701 log_info("starting version " VERSION
);
1708 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1711 mac_selinux_finish();
1713 _exit(EXIT_SUCCESS
);
1718 write_string_file("/proc/self/oom_score_adj", "-1000");
1721 r
= manager_listen(manager
);
1723 return log_error_errno(r
, "failed to set up fds and listen for events: %m");
1725 (void) sd_notify(false,
1727 "STATUS=Processing...");
1729 r
= sd_event_loop(manager
->event
);
1731 log_error_errno(r
, "event loop failed: %m");
1735 sd_event_get_exit_code(manager
->event
, &r
);
1740 "STATUS=Shutting down...");
1743 udev_ctrl_cleanup(manager
->ctrl
);
1744 mac_selinux_finish();
1746 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;