2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
30 #include <sys/epoll.h>
32 #include <sys/inotify.h>
33 #include <sys/ioctl.h>
34 #include <sys/mount.h>
35 #include <sys/prctl.h>
36 #include <sys/signalfd.h>
37 #include <sys/socket.h>
43 #include "sd-daemon.h"
46 #include "cgroup-util.h"
47 #include "cpu-set-util.h"
48 #include "dev-setup.h"
49 #include "event-util.h"
52 #include "formats-util.h"
55 #include "netlink-util.h"
56 #include "process-util.h"
57 #include "selinux-util.h"
58 #include "signal-util.h"
59 #include "string-util.h"
60 #include "terminal-util.h"
61 #include "udev-util.h"
64 static bool arg_debug
= false;
65 static int arg_daemonize
= false;
66 static int arg_resolve_names
= 1;
67 static unsigned arg_children_max
;
68 static int arg_exec_delay
;
69 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
70 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
72 typedef struct Manager
{
76 struct udev_list_node events
;
78 pid_t pid
; /* the process that originally allocated the manager object */
80 struct udev_rules
*rules
;
81 struct udev_list properties
;
83 struct udev_monitor
*monitor
;
84 struct udev_ctrl
*ctrl
;
85 struct udev_ctrl_connection
*ctrl_conn_blocking
;
89 sd_event_source
*ctrl_event
;
90 sd_event_source
*uevent_event
;
91 sd_event_source
*inotify_event
;
95 bool stop_exec_queue
:1;
106 struct udev_list_node node
;
109 struct udev_device
*dev
;
110 struct udev_device
*dev_kernel
;
111 struct worker
*worker
;
112 enum event_state state
;
113 unsigned long long int delaying_seqnum
;
114 unsigned long long int seqnum
;
117 const char *devpath_old
;
121 sd_event_source
*timeout_warning
;
122 sd_event_source
*timeout
;
125 static inline struct event
*node_to_event(struct udev_list_node
*node
) {
126 return container_of(node
, struct event
, node
);
129 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
140 struct udev_list_node node
;
143 struct udev_monitor
*monitor
;
144 enum worker_state state
;
148 /* passed from worker to main process */
149 struct worker_message
{
152 static void event_free(struct event
*event
) {
158 udev_list_node_remove(&event
->node
);
159 udev_device_unref(event
->dev
);
160 udev_device_unref(event
->dev_kernel
);
162 sd_event_source_unref(event
->timeout_warning
);
163 sd_event_source_unref(event
->timeout
);
166 event
->worker
->event
= NULL
;
168 assert(event
->manager
);
170 if (udev_list_node_is_empty(&event
->manager
->events
)) {
171 /* only clean up the queue from the process that created it */
172 if (event
->manager
->pid
== getpid()) {
173 r
= unlink("/run/udev/queue");
175 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
182 static void worker_free(struct worker
*worker
) {
186 assert(worker
->manager
);
188 hashmap_remove(worker
->manager
->workers
, UINT_TO_PTR(worker
->pid
));
189 udev_monitor_unref(worker
->monitor
);
190 event_free(worker
->event
);
195 static void manager_workers_free(Manager
*manager
) {
196 struct worker
*worker
;
201 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
204 manager
->workers
= hashmap_free(manager
->workers
);
207 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
208 _cleanup_free_
struct worker
*worker
= NULL
;
213 assert(worker_monitor
);
216 worker
= new0(struct worker
, 1);
220 worker
->refcount
= 1;
221 worker
->manager
= manager
;
222 /* close monitor, but keep address around */
223 udev_monitor_disconnect(worker_monitor
);
224 worker
->monitor
= udev_monitor_ref(worker_monitor
);
227 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
231 r
= hashmap_put(manager
->workers
, UINT_TO_PTR(pid
), worker
);
241 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
242 struct event
*event
= userdata
;
245 assert(event
->worker
);
247 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
248 event
->worker
->state
= WORKER_KILLED
;
250 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
255 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
256 struct event
*event
= userdata
;
260 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
265 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
270 assert(worker
->manager
);
272 assert(!event
->worker
);
273 assert(!worker
->event
);
275 worker
->state
= WORKER_RUNNING
;
276 worker
->event
= event
;
277 event
->state
= EVENT_RUNNING
;
278 event
->worker
= worker
;
280 e
= worker
->manager
->event
;
282 assert_se(sd_event_now(e
, clock_boottime_or_monotonic(), &usec
) >= 0);
284 (void) sd_event_add_time(e
, &event
->timeout_warning
, clock_boottime_or_monotonic(),
285 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
287 (void) sd_event_add_time(e
, &event
->timeout
, clock_boottime_or_monotonic(),
288 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
291 static void manager_free(Manager
*manager
) {
295 udev_builtin_exit(manager
->udev
);
297 sd_event_source_unref(manager
->ctrl_event
);
298 sd_event_source_unref(manager
->uevent_event
);
299 sd_event_source_unref(manager
->inotify_event
);
301 udev_unref(manager
->udev
);
302 sd_event_unref(manager
->event
);
303 manager_workers_free(manager
);
304 event_queue_cleanup(manager
, EVENT_UNDEF
);
306 udev_monitor_unref(manager
->monitor
);
307 udev_ctrl_unref(manager
->ctrl
);
308 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
310 udev_list_cleanup(&manager
->properties
);
311 udev_rules_unref(manager
->rules
);
313 safe_close(manager
->fd_inotify
);
314 safe_close_pair(manager
->worker_watch
);
319 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
321 static int worker_send_message(int fd
) {
322 struct worker_message message
= {};
324 return loop_write(fd
, &message
, sizeof(message
), false);
327 static void worker_spawn(Manager
*manager
, struct event
*event
) {
328 struct udev
*udev
= event
->udev
;
329 _cleanup_udev_monitor_unref_
struct udev_monitor
*worker_monitor
= NULL
;
333 /* listen for new events */
334 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
335 if (worker_monitor
== NULL
)
337 /* allow the main daemon netlink address to send devices to the worker */
338 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
339 r
= udev_monitor_enable_receiving(worker_monitor
);
341 log_error_errno(r
, "worker: could not enable receiving of device: %m");
346 struct udev_device
*dev
= NULL
;
347 _cleanup_netlink_unref_ sd_netlink
*rtnl
= NULL
;
349 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
350 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
351 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
354 /* take initial device from queue */
358 unsetenv("NOTIFY_SOCKET");
360 manager_workers_free(manager
);
361 event_queue_cleanup(manager
, EVENT_UNDEF
);
363 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
364 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
365 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
366 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
367 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
369 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
370 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
371 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
373 manager
->event
= sd_event_unref(manager
->event
);
376 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
378 r
= log_error_errno(errno
, "error creating signalfd %m");
381 ep_signal
.data
.fd
= fd_signal
;
383 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
384 ep_monitor
.data
.fd
= fd_monitor
;
386 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
388 r
= log_error_errno(errno
, "error creating epoll fd: %m");
392 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
393 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
394 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
398 /* request TERM signal if parent exits */
399 prctl(PR_SET_PDEATHSIG
, SIGTERM
);
401 /* reset OOM score, we only protect the main daemon */
402 write_string_file("/proc/self/oom_score_adj", "0", 0);
405 struct udev_event
*udev_event
;
410 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
411 udev_event
= udev_event_new(dev
);
412 if (udev_event
== NULL
) {
417 if (arg_exec_delay
> 0)
418 udev_event
->exec_delay
= arg_exec_delay
;
421 * Take a shared lock on the device node; this establishes
422 * a concept of device "ownership" to serialize device
423 * access. External processes holding an exclusive lock will
424 * cause udev to skip the event handling; in the case udev
425 * acquired the lock, the external process can block until
426 * udev has finished its event handling.
428 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
429 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
430 !startswith(udev_device_get_sysname(dev
), "dm-") &&
431 !startswith(udev_device_get_sysname(dev
), "md")) {
432 struct udev_device
*d
= dev
;
434 if (streq_ptr("partition", udev_device_get_devtype(d
)))
435 d
= udev_device_get_parent(d
);
438 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
439 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
440 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
441 fd_lock
= safe_close(fd_lock
);
447 /* needed for renaming netifs */
448 udev_event
->rtnl
= rtnl
;
450 /* apply rules, create node, symlinks */
451 udev_event_execute_rules(udev_event
,
452 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
453 &manager
->properties
,
456 udev_event_execute_run(udev_event
,
457 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
459 if (udev_event
->rtnl
)
460 /* in case rtnl was initialized */
461 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
463 /* apply/restore inotify watch */
464 if (udev_event
->inotify_watch
) {
465 udev_watch_begin(udev
, dev
);
466 udev_device_update_db(dev
);
471 /* send processed event back to libudev listeners */
472 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
475 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
477 /* send udevd the result of the event execution */
478 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
480 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
481 udev_device_get_seqnum(dev
));
483 udev_device_unref(dev
);
486 udev_event_unref(udev_event
);
488 /* wait for more device messages from main udevd, or term signal */
489 while (dev
== NULL
) {
490 struct epoll_event ev
[4];
494 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
498 r
= log_error_errno(errno
, "failed to poll: %m");
502 for (i
= 0; i
< fdcount
; i
++) {
503 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
504 dev
= udev_monitor_receive_device(worker_monitor
);
506 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
507 struct signalfd_siginfo fdsi
;
510 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
511 if (size
!= sizeof(struct signalfd_siginfo
))
513 switch (fdsi
.ssi_signo
) {
522 udev_device_unref(dev
);
523 manager_free(manager
);
525 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
528 event
->state
= EVENT_QUEUED
;
529 log_error_errno(errno
, "fork of child failed: %m");
533 struct worker
*worker
;
535 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
539 worker_attach_event(worker
, event
);
541 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
547 static void event_run(Manager
*manager
, struct event
*event
) {
548 struct worker
*worker
;
554 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
557 if (worker
->state
!= WORKER_IDLE
)
560 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
562 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
564 kill(worker
->pid
, SIGKILL
);
565 worker
->state
= WORKER_KILLED
;
568 worker_attach_event(worker
, event
);
572 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
573 if (arg_children_max
> 1)
574 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
578 /* start new worker and pass initial device */
579 worker_spawn(manager
, event
);
582 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
589 /* only one process can add events to the queue */
590 if (manager
->pid
== 0)
591 manager
->pid
= getpid();
593 assert(manager
->pid
== getpid());
595 event
= new0(struct event
, 1);
599 event
->udev
= udev_device_get_udev(dev
);
600 event
->manager
= manager
;
602 event
->dev_kernel
= udev_device_shallow_clone(dev
);
603 udev_device_copy_properties(event
->dev_kernel
, dev
);
604 event
->seqnum
= udev_device_get_seqnum(dev
);
605 event
->devpath
= udev_device_get_devpath(dev
);
606 event
->devpath_len
= strlen(event
->devpath
);
607 event
->devpath_old
= udev_device_get_devpath_old(dev
);
608 event
->devnum
= udev_device_get_devnum(dev
);
609 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
610 event
->ifindex
= udev_device_get_ifindex(dev
);
612 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
613 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
615 event
->state
= EVENT_QUEUED
;
617 if (udev_list_node_is_empty(&manager
->events
)) {
618 r
= touch("/run/udev/queue");
620 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
623 udev_list_node_append(&event
->node
, &manager
->events
);
628 static void manager_kill_workers(Manager
*manager
) {
629 struct worker
*worker
;
634 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
635 if (worker
->state
== WORKER_KILLED
)
638 worker
->state
= WORKER_KILLED
;
639 kill(worker
->pid
, SIGTERM
);
643 /* lookup event for identical, parent, child device */
644 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
645 struct udev_list_node
*loop
;
648 /* check if queue contains events we depend on */
649 udev_list_node_foreach(loop
, &manager
->events
) {
650 struct event
*loop_event
= node_to_event(loop
);
652 /* we already found a later event, earlier can not block us, no need to check again */
653 if (loop_event
->seqnum
< event
->delaying_seqnum
)
656 /* event we checked earlier still exists, no need to check again */
657 if (loop_event
->seqnum
== event
->delaying_seqnum
)
660 /* found ourself, no later event can block us */
661 if (loop_event
->seqnum
>= event
->seqnum
)
664 /* check major/minor */
665 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
668 /* check network device ifindex */
669 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
672 /* check our old name */
673 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
674 event
->delaying_seqnum
= loop_event
->seqnum
;
678 /* compare devpath */
679 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
681 /* one devpath is contained in the other? */
682 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
685 /* identical device event found */
686 if (loop_event
->devpath_len
== event
->devpath_len
) {
687 /* devices names might have changed/swapped in the meantime */
688 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
690 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
692 event
->delaying_seqnum
= loop_event
->seqnum
;
696 /* parent device event found */
697 if (event
->devpath
[common
] == '/') {
698 event
->delaying_seqnum
= loop_event
->seqnum
;
702 /* child device event found */
703 if (loop_event
->devpath
[common
] == '/') {
704 event
->delaying_seqnum
= loop_event
->seqnum
;
708 /* no matching device */
715 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
716 Manager
*manager
= userdata
;
720 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
722 sd_event_exit(manager
->event
, -ETIMEDOUT
);
727 static void manager_exit(Manager
*manager
) {
733 manager
->exit
= true;
737 "STATUS=Starting shutdown...");
739 /* close sources of new events and discard buffered events */
740 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
741 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
743 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
744 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
746 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
747 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
749 /* discard queued events and kill workers */
750 event_queue_cleanup(manager
, EVENT_QUEUED
);
751 manager_kill_workers(manager
);
753 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
755 r
= sd_event_add_time(manager
->event
, NULL
, clock_boottime_or_monotonic(),
756 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
761 /* reload requested, HUP signal received, rules changed, builtin changed */
762 static void manager_reload(Manager
*manager
) {
768 "STATUS=Flushing configuration...");
770 manager_kill_workers(manager
);
771 manager
->rules
= udev_rules_unref(manager
->rules
);
772 udev_builtin_exit(manager
->udev
);
776 "STATUS=Processing...");
779 static void event_queue_start(Manager
*manager
) {
780 struct udev_list_node
*loop
;
785 if (udev_list_node_is_empty(&manager
->events
) ||
786 manager
->exit
|| manager
->stop_exec_queue
)
789 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
790 /* check for changed config, every 3 seconds at most */
791 if (manager
->last_usec
== 0 ||
792 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
793 if (udev_rules_check_timestamp(manager
->rules
) ||
794 udev_builtin_validate(manager
->udev
))
795 manager_reload(manager
);
797 manager
->last_usec
= usec
;
800 udev_builtin_init(manager
->udev
);
802 if (!manager
->rules
) {
803 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
808 udev_list_node_foreach(loop
, &manager
->events
) {
809 struct event
*event
= node_to_event(loop
);
811 if (event
->state
!= EVENT_QUEUED
)
814 /* do not start event if parent or child event is still running */
815 if (is_devpath_busy(manager
, event
))
818 event_run(manager
, event
);
822 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
823 struct udev_list_node
*loop
, *tmp
;
825 udev_list_node_foreach_safe(loop
, tmp
, &manager
->events
) {
826 struct event
*event
= node_to_event(loop
);
828 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
835 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
836 Manager
*manager
= userdata
;
841 struct worker_message msg
;
842 struct iovec iovec
= {
844 .iov_len
= sizeof(msg
),
847 struct cmsghdr cmsghdr
;
848 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
850 struct msghdr msghdr
= {
853 .msg_control
= &control
,
854 .msg_controllen
= sizeof(control
),
856 struct cmsghdr
*cmsg
;
858 struct ucred
*ucred
= NULL
;
859 struct worker
*worker
;
861 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
865 else if (errno
== EAGAIN
)
866 /* nothing more to read */
869 return log_error_errno(errno
, "failed to receive message: %m");
870 } else if (size
!= sizeof(struct worker_message
)) {
871 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
875 CMSG_FOREACH(cmsg
, &msghdr
) {
876 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
877 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
878 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
879 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
882 if (!ucred
|| ucred
->pid
<= 0) {
883 log_warning_errno(EIO
, "ignoring worker message without valid PID");
887 /* lookup worker who sent the signal */
888 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(ucred
->pid
));
890 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
894 if (worker
->state
!= WORKER_KILLED
)
895 worker
->state
= WORKER_IDLE
;
897 /* worker returned */
898 event_free(worker
->event
);
901 /* we have free workers, try to schedule events */
902 event_queue_start(manager
);
907 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
908 Manager
*manager
= userdata
;
909 struct udev_device
*dev
;
914 dev
= udev_monitor_receive_device(manager
->monitor
);
916 udev_device_ensure_usec_initialized(dev
, NULL
);
917 r
= event_queue_insert(manager
, dev
);
919 udev_device_unref(dev
);
921 /* we have fresh events, try to schedule them */
922 event_queue_start(manager
);
928 /* receive the udevd message from userspace */
929 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
930 Manager
*manager
= userdata
;
931 _cleanup_udev_ctrl_connection_unref_
struct udev_ctrl_connection
*ctrl_conn
= NULL
;
932 _cleanup_udev_ctrl_msg_unref_
struct udev_ctrl_msg
*ctrl_msg
= NULL
;
938 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
942 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
946 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
948 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
949 log_set_max_level(i
);
950 manager_kill_workers(manager
);
953 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
954 log_debug("udevd message (STOP_EXEC_QUEUE) received");
955 manager
->stop_exec_queue
= true;
958 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
959 log_debug("udevd message (START_EXEC_QUEUE) received");
960 manager
->stop_exec_queue
= false;
961 event_queue_start(manager
);
964 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
965 log_debug("udevd message (RELOAD) received");
966 manager_reload(manager
);
969 str
= udev_ctrl_get_set_env(ctrl_msg
);
971 _cleanup_free_
char *key
= NULL
;
977 val
= strchr(key
, '=');
981 if (val
[0] == '\0') {
982 log_debug("udevd message (ENV) received, unset '%s'", key
);
983 udev_list_entry_add(&manager
->properties
, key
, NULL
);
985 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
986 udev_list_entry_add(&manager
->properties
, key
, val
);
989 log_error("wrong key format '%s'", key
);
991 manager_kill_workers(manager
);
994 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
996 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
997 arg_children_max
= i
;
1000 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1001 log_debug("udevd message (SYNC) received");
1003 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1004 log_debug("udevd message (EXIT) received");
1005 manager_exit(manager
);
1006 /* keep reference to block the client until we exit
1007 TODO: deal with several blocking exit requests */
1008 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1014 static int synthesize_change(struct udev_device
*dev
) {
1015 char filename
[UTIL_PATH_SIZE
];
1018 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1019 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1020 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1021 bool part_table_read
= false;
1022 bool has_partitions
= false;
1024 struct udev
*udev
= udev_device_get_udev(dev
);
1025 _cleanup_udev_enumerate_unref_
struct udev_enumerate
*e
= NULL
;
1026 struct udev_list_entry
*item
;
1029 * Try to re-read the partition table. This only succeeds if
1030 * none of the devices is busy. The kernel returns 0 if no
1031 * partition table is found, and we will not get an event for
1034 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1036 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1038 r
= ioctl(fd
, BLKRRPART
, 0);
1042 part_table_read
= true;
1045 /* search for partitions */
1046 e
= udev_enumerate_new(udev
);
1050 r
= udev_enumerate_add_match_parent(e
, dev
);
1054 r
= udev_enumerate_add_match_subsystem(e
, "block");
1058 r
= udev_enumerate_scan_devices(e
);
1062 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1063 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1065 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1069 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1072 has_partitions
= true;
1077 * We have partitions and re-read the table, the kernel already sent
1078 * out a "change" event for the disk, and "remove/add" for all
1081 if (part_table_read
&& has_partitions
)
1085 * We have partitions but re-reading the partition table did not
1086 * work, synthesize "change" for the disk and all partitions.
1088 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1089 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1090 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1092 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1093 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1095 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1099 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1102 log_debug("device %s closed, synthesising partition '%s' 'change'",
1103 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1104 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1105 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1111 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1112 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1113 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1118 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1119 Manager
*manager
= userdata
;
1120 union inotify_event_buffer buffer
;
1121 struct inotify_event
*e
;
1126 l
= read(fd
, &buffer
, sizeof(buffer
));
1128 if (errno
== EAGAIN
|| errno
== EINTR
)
1131 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1134 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1135 _cleanup_udev_device_unref_
struct udev_device
*dev
= NULL
;
1137 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1141 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1142 if (e
->mask
& IN_CLOSE_WRITE
) {
1143 synthesize_change(dev
);
1145 /* settle might be waiting on us to determine the queue
1146 * state. If we just handled an inotify event, we might have
1147 * generated a "change" event, but we won't have queued up
1148 * the resultant uevent yet. Do that.
1150 on_uevent(NULL
, -1, 0, manager
);
1151 } else if (e
->mask
& IN_IGNORED
)
1152 udev_watch_end(manager
->udev
, dev
);
1158 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1159 Manager
*manager
= userdata
;
1163 manager_exit(manager
);
1168 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1169 Manager
*manager
= userdata
;
1173 manager_reload(manager
);
1178 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1179 Manager
*manager
= userdata
;
1186 struct worker
*worker
;
1188 pid
= waitpid(-1, &status
, WNOHANG
);
1192 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(pid
));
1194 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1198 if (WIFEXITED(status
)) {
1199 if (WEXITSTATUS(status
) == 0)
1200 log_debug("worker ["PID_FMT
"] exited", pid
);
1202 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1203 } else if (WIFSIGNALED(status
)) {
1204 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), strsignal(WTERMSIG(status
)));
1205 } else if (WIFSTOPPED(status
)) {
1206 log_info("worker ["PID_FMT
"] stopped", pid
);
1208 } else if (WIFCONTINUED(status
)) {
1209 log_info("worker ["PID_FMT
"] continued", pid
);
1212 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1214 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1215 if (worker
->event
) {
1216 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1217 /* delete state from disk */
1218 udev_device_delete_db(worker
->event
->dev
);
1219 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1220 /* forward kernel event without amending it */
1221 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1225 worker_free(worker
);
1228 /* we can start new workers, try to schedule events */
1229 event_queue_start(manager
);
1234 static int on_post(sd_event_source
*s
, void *userdata
) {
1235 Manager
*manager
= userdata
;
1240 if (udev_list_node_is_empty(&manager
->events
)) {
1241 /* no pending events */
1242 if (!hashmap_isempty(manager
->workers
)) {
1243 /* there are idle workers */
1244 log_debug("cleanup idle workers");
1245 manager_kill_workers(manager
);
1248 if (manager
->exit
) {
1249 r
= sd_event_exit(manager
->event
, 0);
1252 } else if (manager
->cgroup
)
1253 /* cleanup possible left-over processes in our cgroup */
1254 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, false, true, NULL
);
1261 static int listen_fds(int *rctrl
, int *rnetlink
) {
1262 _cleanup_udev_unref_
struct udev
*udev
= NULL
;
1263 int ctrl_fd
= -1, netlink_fd
= -1;
1269 n
= sd_listen_fds(true);
1273 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1274 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1281 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1282 if (netlink_fd
>= 0)
1292 _cleanup_udev_ctrl_unref_
struct udev_ctrl
*ctrl
= NULL
;
1298 ctrl
= udev_ctrl_new(udev
);
1300 return log_error_errno(EINVAL
, "error initializing udev control socket");
1302 r
= udev_ctrl_enable_receiving(ctrl
);
1304 return log_error_errno(EINVAL
, "error binding udev control socket");
1306 fd
= udev_ctrl_get_fd(ctrl
);
1308 return log_error_errno(EIO
, "could not get ctrl fd");
1310 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1312 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1315 if (netlink_fd
< 0) {
1316 _cleanup_udev_monitor_unref_
struct udev_monitor
*monitor
= NULL
;
1324 monitor
= udev_monitor_new_from_netlink(udev
, "kernel");
1326 return log_error_errno(EINVAL
, "error initializing netlink socket");
1328 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1330 r
= udev_monitor_enable_receiving(monitor
);
1332 return log_error_errno(EINVAL
, "error binding netlink socket");
1334 fd
= udev_monitor_get_fd(monitor
);
1336 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1338 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1340 return log_error_errno(errno
, "could not dup netlink fd: %m");
1344 *rnetlink
= netlink_fd
;
1350 * read the kernel command line, in case we need to get into debug mode
1351 * udev.log-priority=<level> syslog priority
1352 * udev.children-max=<number of workers> events are fully serialized if set to 1
1353 * udev.exec-delay=<number of seconds> delay execution of every executed program
1354 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
1356 static int parse_proc_cmdline_item(const char *key
, const char *value
) {
1357 const char *full_key
= key
;
1365 if (startswith(key
, "rd."))
1366 key
+= strlen("rd.");
1368 if (startswith(key
, "udev."))
1369 key
+= strlen("udev.");
1373 if (streq(key
, "log-priority")) {
1376 prio
= util_log_priority(value
);
1379 log_set_max_level(prio
);
1380 } else if (streq(key
, "children-max")) {
1381 r
= safe_atou(value
, &arg_children_max
);
1384 } else if (streq(key
, "exec-delay")) {
1385 r
= safe_atoi(value
, &arg_exec_delay
);
1388 } else if (streq(key
, "event-timeout")) {
1389 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1392 arg_event_timeout_usec
*= USEC_PER_SEC
;
1393 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1398 log_warning("invalid %s ignored: %s", full_key
, value
);
1402 static void help(void) {
1403 printf("%s [OPTIONS...]\n\n"
1404 "Manages devices.\n\n"
1405 " -h --help Print this message\n"
1406 " --version Print version of the program\n"
1407 " --daemon Detach and run in the background\n"
1408 " --debug Enable debug output\n"
1409 " --children-max=INT Set maximum number of workers\n"
1410 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1411 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1412 " --resolve-names=early|late|never\n"
1413 " When to resolve users and groups\n"
1414 , program_invocation_short_name
);
1417 static int parse_argv(int argc
, char *argv
[]) {
1418 static const struct option options
[] = {
1419 { "daemon", no_argument
, NULL
, 'd' },
1420 { "debug", no_argument
, NULL
, 'D' },
1421 { "children-max", required_argument
, NULL
, 'c' },
1422 { "exec-delay", required_argument
, NULL
, 'e' },
1423 { "event-timeout", required_argument
, NULL
, 't' },
1424 { "resolve-names", required_argument
, NULL
, 'N' },
1425 { "help", no_argument
, NULL
, 'h' },
1426 { "version", no_argument
, NULL
, 'V' },
1435 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1441 arg_daemonize
= true;
1444 r
= safe_atou(optarg
, &arg_children_max
);
1446 log_warning("Invalid --children-max ignored: %s", optarg
);
1449 r
= safe_atoi(optarg
, &arg_exec_delay
);
1451 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1454 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1456 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1458 arg_event_timeout_usec
*= USEC_PER_SEC
;
1459 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1466 if (streq(optarg
, "early")) {
1467 arg_resolve_names
= 1;
1468 } else if (streq(optarg
, "late")) {
1469 arg_resolve_names
= 0;
1470 } else if (streq(optarg
, "never")) {
1471 arg_resolve_names
= -1;
1473 log_error("resolve-names must be early, late or never");
1481 printf("%s\n", VERSION
);
1486 assert_not_reached("Unhandled option");
1494 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1495 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1496 int r
, fd_worker
, one
= 1;
1499 assert(fd_ctrl
>= 0);
1500 assert(fd_uevent
>= 0);
1502 manager
= new0(Manager
, 1);
1506 manager
->fd_inotify
= -1;
1507 manager
->worker_watch
[WRITE_END
] = -1;
1508 manager
->worker_watch
[READ_END
] = -1;
1510 manager
->udev
= udev_new();
1512 return log_error_errno(errno
, "could not allocate udev context: %m");
1514 udev_builtin_init(manager
->udev
);
1516 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1517 if (!manager
->rules
)
1518 return log_error_errno(ENOMEM
, "error reading rules");
1520 udev_list_node_init(&manager
->events
);
1521 udev_list_init(manager
->udev
, &manager
->properties
, true);
1523 manager
->cgroup
= cgroup
;
1525 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, fd_ctrl
);
1527 return log_error_errno(EINVAL
, "error taking over udev control socket");
1529 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", fd_uevent
);
1530 if (!manager
->monitor
)
1531 return log_error_errno(EINVAL
, "error taking over netlink socket");
1533 /* unnamed socket from workers to the main daemon */
1534 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1536 return log_error_errno(errno
, "error creating socketpair: %m");
1538 fd_worker
= manager
->worker_watch
[READ_END
];
1540 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1542 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1544 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1545 if (manager
->fd_inotify
< 0)
1546 return log_error_errno(ENOMEM
, "error initializing inotify");
1548 udev_watch_restore(manager
->udev
);
1550 /* block and listen to all signals on signalfd */
1551 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1553 r
= sd_event_default(&manager
->event
);
1555 return log_error_errno(errno
, "could not allocate event loop: %m");
1557 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1559 return log_error_errno(r
, "error creating sigint event source: %m");
1561 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1563 return log_error_errno(r
, "error creating sigterm event source: %m");
1565 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1567 return log_error_errno(r
, "error creating sighup event source: %m");
1569 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1571 return log_error_errno(r
, "error creating sigchld event source: %m");
1573 r
= sd_event_set_watchdog(manager
->event
, true);
1575 return log_error_errno(r
, "error creating watchdog event source: %m");
1577 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1579 return log_error_errno(r
, "error creating ctrl event source: %m");
1581 /* This needs to be after the inotify and uevent handling, to make sure
1582 * that the ping is send back after fully processing the pending uevents
1583 * (including the synthetic ones we may create due to inotify events).
1585 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1587 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1589 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1591 return log_error_errno(r
, "error creating inotify event source: %m");
1593 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1595 return log_error_errno(r
, "error creating uevent event source: %m");
1597 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1599 return log_error_errno(r
, "error creating worker event source: %m");
1601 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1603 return log_error_errno(r
, "error creating post event source: %m");
1611 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1612 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1615 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1617 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1621 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1623 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1625 (void) sd_notify(false,
1627 "STATUS=Processing...");
1629 r
= sd_event_loop(manager
->event
);
1631 log_error_errno(r
, "event loop failed: %m");
1635 sd_event_get_exit_code(manager
->event
, &r
);
1640 "STATUS=Shutting down...");
1642 udev_ctrl_cleanup(manager
->ctrl
);
1646 int main(int argc
, char *argv
[]) {
1647 _cleanup_free_
char *cgroup
= NULL
;
1648 int r
, fd_ctrl
, fd_uevent
;
1650 log_set_target(LOG_TARGET_AUTO
);
1651 log_parse_environment();
1654 r
= parse_argv(argc
, argv
);
1658 r
= parse_proc_cmdline(parse_proc_cmdline_item
);
1660 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1663 log_set_target(LOG_TARGET_CONSOLE
);
1664 log_set_max_level(LOG_DEBUG
);
1667 if (getuid() != 0) {
1668 r
= log_error_errno(EPERM
, "root privileges required");
1672 if (arg_children_max
== 0) {
1675 arg_children_max
= 8;
1677 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1678 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1680 log_debug("set children_max to %u", arg_children_max
);
1683 /* set umask before creating any file/directory */
1686 r
= log_error_errno(errno
, "could not change dir to /: %m");
1692 r
= mac_selinux_init("/dev");
1694 log_error_errno(r
, "could not initialize labelling: %m");
1698 r
= mkdir("/run/udev", 0755);
1699 if (r
< 0 && errno
!= EEXIST
) {
1700 r
= log_error_errno(errno
, "could not create /run/udev: %m");
1704 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1706 if (getppid() == 1) {
1707 /* get our own cgroup, we regularly kill everything udev has left behind
1708 we only do this on systemd systems, and only if we are directly spawned
1709 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1710 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1712 if (r
== -ENOENT
|| r
== -ENOEXEC
)
1713 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1715 log_warning_errno(r
, "failed to get cgroup: %m");
1719 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1721 r
= log_error_errno(r
, "could not listen on fds: %m");
1725 if (arg_daemonize
) {
1728 log_info("starting version " VERSION
);
1730 /* connect /dev/null to stdin, stdout, stderr */
1731 if (log_get_max_level() < LOG_DEBUG
)
1732 (void) make_null_stdio();
1739 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1742 mac_selinux_finish();
1744 _exit(EXIT_SUCCESS
);
1749 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1752 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1755 mac_selinux_finish();
1757 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;