1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
17 #include <sys/epoll.h>
19 #include <sys/inotify.h>
20 #include <sys/ioctl.h>
21 #include <sys/mount.h>
22 #include <sys/prctl.h>
23 #include <sys/signalfd.h>
24 #include <sys/socket.h>
30 #include "sd-daemon.h"
33 #include "alloc-util.h"
34 #include "cgroup-util.h"
35 #include "cpu-set-util.h"
36 #include "dev-setup.h"
37 #include "device-util.h"
40 #include "format-util.h"
44 #include "libudev-device-internal.h"
46 #include "netlink-util.h"
47 #include "parse-util.h"
48 #include "proc-cmdline.h"
49 #include "process-util.h"
50 #include "selinux-util.h"
51 #include "signal-util.h"
52 #include "socket-util.h"
53 #include "string-util.h"
54 #include "terminal-util.h"
55 #include "udev-builtin.h"
56 #include "udev-ctrl.h"
57 #include "udev-util.h"
58 #include "udev-watch.h"
60 #include "user-util.h"
62 static bool arg_debug
= false;
63 static int arg_daemonize
= false;
64 static int arg_resolve_names
= 1;
65 static unsigned arg_children_max
;
66 static int arg_exec_delay
;
67 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
68 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
70 typedef struct Manager
{
73 LIST_HEAD(struct event
, events
);
75 pid_t pid
; /* the process that originally allocated the manager object */
77 struct udev_rules
*rules
;
78 struct udev_list properties
;
80 struct udev_monitor
*monitor
;
81 struct udev_ctrl
*ctrl
;
82 struct udev_ctrl_connection
*ctrl_conn_blocking
;
86 sd_event_source
*ctrl_event
;
87 sd_event_source
*uevent_event
;
88 sd_event_source
*inotify_event
;
92 bool stop_exec_queue
:1;
103 LIST_FIELDS(struct event
, event
);
105 struct udev_device
*dev
;
106 struct udev_device
*dev_kernel
;
107 struct worker
*worker
;
108 enum event_state state
;
109 unsigned long long int delaying_seqnum
;
110 unsigned long long int seqnum
;
113 const char *devpath_old
;
117 sd_event_source
*timeout_warning
;
118 sd_event_source
*timeout
;
121 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
133 struct udev_monitor
*monitor
;
134 enum worker_state state
;
138 /* passed from worker to main process */
139 struct worker_message
{
142 static void event_free(struct event
*event
) {
147 assert(event
->manager
);
149 LIST_REMOVE(event
, event
->manager
->events
, event
);
150 udev_device_unref(event
->dev
);
151 udev_device_unref(event
->dev_kernel
);
153 sd_event_source_unref(event
->timeout_warning
);
154 sd_event_source_unref(event
->timeout
);
157 event
->worker
->event
= NULL
;
159 if (LIST_IS_EMPTY(event
->manager
->events
)) {
160 /* only clean up the queue from the process that created it */
161 if (event
->manager
->pid
== getpid_cached()) {
162 r
= unlink("/run/udev/queue");
164 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
171 static void worker_free(struct worker
*worker
) {
175 assert(worker
->manager
);
177 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
178 udev_monitor_unref(worker
->monitor
);
179 event_free(worker
->event
);
184 static void manager_workers_free(Manager
*manager
) {
185 struct worker
*worker
;
190 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
193 manager
->workers
= hashmap_free(manager
->workers
);
196 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
197 _cleanup_free_
struct worker
*worker
= NULL
;
202 assert(worker_monitor
);
205 worker
= new0(struct worker
, 1);
209 worker
->manager
= manager
;
210 /* close monitor, but keep address around */
211 udev_monitor_disconnect(worker_monitor
);
212 worker
->monitor
= udev_monitor_ref(worker_monitor
);
215 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
219 r
= hashmap_put(manager
->workers
, PID_TO_PTR(pid
), worker
);
223 *ret
= TAKE_PTR(worker
);
228 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
229 struct event
*event
= userdata
;
232 assert(event
->worker
);
234 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
235 event
->worker
->state
= WORKER_KILLED
;
237 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
242 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
243 struct event
*event
= userdata
;
247 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
252 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
257 assert(worker
->manager
);
259 assert(!event
->worker
);
260 assert(!worker
->event
);
262 worker
->state
= WORKER_RUNNING
;
263 worker
->event
= event
;
264 event
->state
= EVENT_RUNNING
;
265 event
->worker
= worker
;
267 e
= worker
->manager
->event
;
269 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &usec
) >= 0);
271 (void) sd_event_add_time(e
, &event
->timeout_warning
, CLOCK_MONOTONIC
,
272 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
274 (void) sd_event_add_time(e
, &event
->timeout
, CLOCK_MONOTONIC
,
275 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
278 static void manager_free(Manager
*manager
) {
284 sd_event_source_unref(manager
->ctrl_event
);
285 sd_event_source_unref(manager
->uevent_event
);
286 sd_event_source_unref(manager
->inotify_event
);
288 sd_event_unref(manager
->event
);
289 manager_workers_free(manager
);
290 event_queue_cleanup(manager
, EVENT_UNDEF
);
292 udev_monitor_unref(manager
->monitor
);
293 udev_ctrl_unref(manager
->ctrl
);
294 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
296 udev_list_cleanup(&manager
->properties
);
297 udev_rules_unref(manager
->rules
);
299 safe_close(manager
->fd_inotify
);
300 safe_close_pair(manager
->worker_watch
);
305 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
307 static int worker_send_message(int fd
) {
308 struct worker_message message
= {};
310 return loop_write(fd
, &message
, sizeof(message
), false);
313 static bool shall_lock_device(struct udev_device
*dev
) {
316 if (!streq_ptr("block", udev_device_get_subsystem(dev
)))
319 sysname
= udev_device_get_sysname(dev
);
320 return !startswith(sysname
, "dm-") &&
321 !startswith(sysname
, "md") &&
322 !startswith(sysname
, "drbd");
325 static void worker_spawn(Manager
*manager
, struct event
*event
) {
326 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*worker_monitor
= NULL
;
330 /* listen for new events */
331 worker_monitor
= udev_monitor_new_from_netlink(NULL
, NULL
);
332 if (worker_monitor
== NULL
)
334 /* allow the main daemon netlink address to send devices to the worker */
335 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
336 r
= udev_monitor_enable_receiving(worker_monitor
);
338 log_error_errno(r
, "worker: could not enable receiving of device: %m");
343 struct udev_device
*dev
= NULL
;
344 _cleanup_(sd_netlink_unrefp
) sd_netlink
*rtnl
= NULL
;
346 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
347 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
348 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
351 /* take initial device from queue */
352 dev
= TAKE_PTR(event
->dev
);
354 unsetenv("NOTIFY_SOCKET");
356 manager_workers_free(manager
);
357 event_queue_cleanup(manager
, EVENT_UNDEF
);
359 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
360 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
361 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
362 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
364 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
365 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
366 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
368 manager
->event
= sd_event_unref(manager
->event
);
371 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
373 r
= log_error_errno(errno
, "error creating signalfd %m");
376 ep_signal
.data
.fd
= fd_signal
;
378 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
379 ep_monitor
.data
.fd
= fd_monitor
;
381 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
383 r
= log_error_errno(errno
, "error creating epoll fd: %m");
387 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
388 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
389 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
393 /* Request TERM signal if parent exits.
394 Ignore error, not much we can do in that case. */
395 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
397 /* Reset OOM score, we only protect the main daemon. */
398 write_string_file("/proc/self/oom_score_adj", "0", 0);
401 struct udev_event
*udev_event
;
406 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
407 udev_event
= udev_event_new(dev
);
408 if (udev_event
== NULL
) {
413 if (arg_exec_delay
> 0)
414 udev_event
->exec_delay
= arg_exec_delay
;
417 * Take a shared lock on the device node; this establishes
418 * a concept of device "ownership" to serialize device
419 * access. External processes holding an exclusive lock will
420 * cause udev to skip the event handling; in the case udev
421 * acquired the lock, the external process can block until
422 * udev has finished its event handling.
424 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
425 shall_lock_device(dev
)) {
426 struct udev_device
*d
= dev
;
428 if (streq_ptr("partition", udev_device_get_devtype(d
)))
429 d
= udev_device_get_parent(d
);
432 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
433 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
434 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
435 fd_lock
= safe_close(fd_lock
);
441 /* needed for renaming netifs */
442 udev_event
->rtnl
= rtnl
;
444 /* apply rules, create node, symlinks */
445 udev_event_execute_rules(udev_event
,
446 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
447 &manager
->properties
,
450 udev_event_execute_run(udev_event
,
451 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
453 if (udev_event
->rtnl
)
454 /* in case rtnl was initialized */
455 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
457 /* apply/restore inotify watch */
458 if (udev_event
->inotify_watch
) {
459 udev_watch_begin(dev
->device
);
460 udev_device_update_db(dev
);
465 /* send processed event back to libudev listeners */
466 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
469 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
471 /* send udevd the result of the event execution */
472 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
474 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
475 udev_device_get_seqnum(dev
));
477 udev_device_unref(dev
);
480 udev_event_unref(udev_event
);
482 /* wait for more device messages from main udevd, or term signal */
483 while (dev
== NULL
) {
484 struct epoll_event ev
[4];
488 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
492 r
= log_error_errno(errno
, "failed to poll: %m");
496 for (i
= 0; i
< fdcount
; i
++) {
497 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
498 dev
= udev_monitor_receive_device(worker_monitor
);
500 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
501 struct signalfd_siginfo fdsi
;
504 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
505 if (size
!= sizeof(struct signalfd_siginfo
))
507 switch (fdsi
.ssi_signo
) {
516 udev_device_unref(dev
);
517 manager_free(manager
);
519 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
522 event
->state
= EVENT_QUEUED
;
523 log_error_errno(errno
, "fork of child failed: %m");
527 struct worker
*worker
;
529 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
533 worker_attach_event(worker
, event
);
535 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
541 static void event_run(Manager
*manager
, struct event
*event
) {
542 struct worker
*worker
;
548 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
551 if (worker
->state
!= WORKER_IDLE
)
554 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
556 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
558 kill(worker
->pid
, SIGKILL
);
559 worker
->state
= WORKER_KILLED
;
562 worker_attach_event(worker
, event
);
566 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
567 if (arg_children_max
> 1)
568 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
572 /* start new worker and pass initial device */
573 worker_spawn(manager
, event
);
576 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
583 /* only one process can add events to the queue */
584 if (manager
->pid
== 0)
585 manager
->pid
= getpid_cached();
587 assert(manager
->pid
== getpid_cached());
589 event
= new0(struct event
, 1);
593 event
->manager
= manager
;
595 event
->dev_kernel
= udev_device_shallow_clone(dev
);
596 udev_device_copy_properties(event
->dev_kernel
, dev
);
597 event
->seqnum
= udev_device_get_seqnum(dev
);
598 event
->devpath
= udev_device_get_devpath(dev
);
599 event
->devpath_len
= strlen(event
->devpath
);
600 event
->devpath_old
= udev_device_get_devpath_old(dev
);
601 event
->devnum
= udev_device_get_devnum(dev
);
602 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
603 event
->ifindex
= udev_device_get_ifindex(dev
);
605 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
606 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
608 event
->state
= EVENT_QUEUED
;
610 if (LIST_IS_EMPTY(manager
->events
)) {
611 r
= touch("/run/udev/queue");
613 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
616 LIST_APPEND(event
, manager
->events
, event
);
621 static void manager_kill_workers(Manager
*manager
) {
622 struct worker
*worker
;
627 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
628 if (worker
->state
== WORKER_KILLED
)
631 worker
->state
= WORKER_KILLED
;
632 kill(worker
->pid
, SIGTERM
);
636 /* lookup event for identical, parent, child device */
637 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
638 struct event
*loop_event
;
641 /* check if queue contains events we depend on */
642 LIST_FOREACH(event
, loop_event
, manager
->events
) {
643 /* we already found a later event, earlier cannot block us, no need to check again */
644 if (loop_event
->seqnum
< event
->delaying_seqnum
)
647 /* event we checked earlier still exists, no need to check again */
648 if (loop_event
->seqnum
== event
->delaying_seqnum
)
651 /* found ourself, no later event can block us */
652 if (loop_event
->seqnum
>= event
->seqnum
)
655 /* check major/minor */
656 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
659 /* check network device ifindex */
660 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
663 /* check our old name */
664 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
665 event
->delaying_seqnum
= loop_event
->seqnum
;
669 /* compare devpath */
670 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
672 /* one devpath is contained in the other? */
673 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
676 /* identical device event found */
677 if (loop_event
->devpath_len
== event
->devpath_len
) {
678 /* devices names might have changed/swapped in the meantime */
679 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
681 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
683 event
->delaying_seqnum
= loop_event
->seqnum
;
687 /* parent device event found */
688 if (event
->devpath
[common
] == '/') {
689 event
->delaying_seqnum
= loop_event
->seqnum
;
693 /* child device event found */
694 if (loop_event
->devpath
[common
] == '/') {
695 event
->delaying_seqnum
= loop_event
->seqnum
;
699 /* no matching device */
706 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
707 Manager
*manager
= userdata
;
711 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
713 sd_event_exit(manager
->event
, -ETIMEDOUT
);
718 static void manager_exit(Manager
*manager
) {
724 manager
->exit
= true;
728 "STATUS=Starting shutdown...");
730 /* close sources of new events and discard buffered events */
731 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
732 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
734 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
735 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
737 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
738 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
740 /* discard queued events and kill workers */
741 event_queue_cleanup(manager
, EVENT_QUEUED
);
742 manager_kill_workers(manager
);
744 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
746 r
= sd_event_add_time(manager
->event
, NULL
, CLOCK_MONOTONIC
,
747 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
752 /* reload requested, HUP signal received, rules changed, builtin changed */
753 static void manager_reload(Manager
*manager
) {
759 "STATUS=Flushing configuration...");
761 manager_kill_workers(manager
);
762 manager
->rules
= udev_rules_unref(manager
->rules
);
767 "STATUS=Processing with %u children at max", arg_children_max
);
770 static void event_queue_start(Manager
*manager
) {
776 if (LIST_IS_EMPTY(manager
->events
) ||
777 manager
->exit
|| manager
->stop_exec_queue
)
780 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
781 /* check for changed config, every 3 seconds at most */
782 if (manager
->last_usec
== 0 ||
783 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
784 if (udev_rules_check_timestamp(manager
->rules
) ||
785 udev_builtin_validate())
786 manager_reload(manager
);
788 manager
->last_usec
= usec
;
793 if (!manager
->rules
) {
794 manager
->rules
= udev_rules_new(arg_resolve_names
);
799 LIST_FOREACH(event
,event
,manager
->events
) {
800 if (event
->state
!= EVENT_QUEUED
)
803 /* do not start event if parent or child event is still running */
804 if (is_devpath_busy(manager
, event
))
807 event_run(manager
, event
);
811 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
812 struct event
*event
, *tmp
;
814 LIST_FOREACH_SAFE(event
, event
, tmp
, manager
->events
) {
815 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
822 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
823 Manager
*manager
= userdata
;
828 struct worker_message msg
;
829 struct iovec iovec
= {
831 .iov_len
= sizeof(msg
),
834 struct cmsghdr cmsghdr
;
835 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
837 struct msghdr msghdr
= {
840 .msg_control
= &control
,
841 .msg_controllen
= sizeof(control
),
843 struct cmsghdr
*cmsg
;
845 struct ucred
*ucred
= NULL
;
846 struct worker
*worker
;
848 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
852 else if (errno
== EAGAIN
)
853 /* nothing more to read */
856 return log_error_errno(errno
, "failed to receive message: %m");
857 } else if (size
!= sizeof(struct worker_message
)) {
858 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
862 CMSG_FOREACH(cmsg
, &msghdr
) {
863 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
864 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
865 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
866 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
869 if (!ucred
|| ucred
->pid
<= 0) {
870 log_warning_errno(EIO
, "ignoring worker message without valid PID");
874 /* lookup worker who sent the signal */
875 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
877 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
881 if (worker
->state
!= WORKER_KILLED
)
882 worker
->state
= WORKER_IDLE
;
884 /* worker returned */
885 event_free(worker
->event
);
888 /* we have free workers, try to schedule events */
889 event_queue_start(manager
);
894 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
895 Manager
*manager
= userdata
;
896 struct udev_device
*dev
;
901 dev
= udev_monitor_receive_device(manager
->monitor
);
903 udev_device_ensure_usec_initialized(dev
, NULL
);
904 r
= event_queue_insert(manager
, dev
);
906 udev_device_unref(dev
);
908 /* we have fresh events, try to schedule them */
909 event_queue_start(manager
);
915 /* receive the udevd message from userspace */
916 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
917 Manager
*manager
= userdata
;
918 _cleanup_(udev_ctrl_connection_unrefp
) struct udev_ctrl_connection
*ctrl_conn
= NULL
;
919 _cleanup_(udev_ctrl_msg_unrefp
) struct udev_ctrl_msg
*ctrl_msg
= NULL
;
925 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
929 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
933 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
935 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
936 log_set_max_level(i
);
937 manager_kill_workers(manager
);
940 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
941 log_debug("udevd message (STOP_EXEC_QUEUE) received");
942 manager
->stop_exec_queue
= true;
945 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
946 log_debug("udevd message (START_EXEC_QUEUE) received");
947 manager
->stop_exec_queue
= false;
948 event_queue_start(manager
);
951 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
952 log_debug("udevd message (RELOAD) received");
953 manager_reload(manager
);
956 str
= udev_ctrl_get_set_env(ctrl_msg
);
958 _cleanup_free_
char *key
= NULL
;
964 val
= strchr(key
, '=');
968 if (val
[0] == '\0') {
969 log_debug("udevd message (ENV) received, unset '%s'", key
);
970 udev_list_entry_add(&manager
->properties
, key
, NULL
);
972 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
973 udev_list_entry_add(&manager
->properties
, key
, val
);
976 log_error("wrong key format '%s'", key
);
978 manager_kill_workers(manager
);
981 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
983 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
984 arg_children_max
= i
;
986 (void) sd_notifyf(false,
988 "STATUS=Processing with %u children at max", arg_children_max
);
991 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
992 log_debug("udevd message (SYNC) received");
994 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
995 log_debug("udevd message (EXIT) received");
996 manager_exit(manager
);
997 /* keep reference to block the client until we exit
998 TODO: deal with several blocking exit requests */
999 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1005 static int synthesize_change(sd_device
*dev
) {
1006 const char *subsystem
, *sysname
, *devname
, *syspath
, *devtype
;
1007 char filename
[PATH_MAX
];
1010 r
= sd_device_get_subsystem(dev
, &subsystem
);
1014 r
= sd_device_get_sysname(dev
, &sysname
);
1018 r
= sd_device_get_devname(dev
, &devname
);
1022 r
= sd_device_get_syspath(dev
, &syspath
);
1026 r
= sd_device_get_devtype(dev
, &devtype
);
1030 if (streq_ptr("block", subsystem
) &&
1031 streq_ptr("disk", devtype
) &&
1032 !startswith(sysname
, "dm-")) {
1033 _cleanup_(sd_device_enumerator_unrefp
) sd_device_enumerator
*e
= NULL
;
1034 bool part_table_read
= false, has_partitions
= false;
1039 * Try to re-read the partition table. This only succeeds if
1040 * none of the devices is busy. The kernel returns 0 if no
1041 * partition table is found, and we will not get an event for
1044 fd
= open(devname
, O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1046 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1048 r
= ioctl(fd
, BLKRRPART
, 0);
1052 part_table_read
= true;
1055 /* search for partitions */
1056 r
= sd_device_enumerator_new(&e
);
1060 r
= sd_device_enumerator_allow_uninitialized(e
);
1064 r
= sd_device_enumerator_add_match_parent(e
, dev
);
1068 r
= sd_device_enumerator_add_match_subsystem(e
, "block", true);
1072 FOREACH_DEVICE(e
, d
) {
1075 if (sd_device_get_devtype(d
, &t
) < 0 ||
1076 !streq("partition", t
))
1079 has_partitions
= true;
1084 * We have partitions and re-read the table, the kernel already sent
1085 * out a "change" event for the disk, and "remove/add" for all
1088 if (part_table_read
&& has_partitions
)
1092 * We have partitions but re-reading the partition table did not
1093 * work, synthesize "change" for the disk and all partitions.
1095 log_debug("Device '%s' is closed, synthesising 'change'", devname
);
1096 strscpyl(filename
, sizeof(filename
), syspath
, "/uevent", NULL
);
1097 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1099 FOREACH_DEVICE(e
, d
) {
1100 const char *t
, *n
, *s
;
1102 if (sd_device_get_devtype(d
, &t
) < 0 ||
1103 !streq("partition", t
))
1106 if (sd_device_get_devname(d
, &n
) < 0 ||
1107 sd_device_get_syspath(d
, &s
) < 0)
1110 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname
, n
);
1111 strscpyl(filename
, sizeof(filename
), s
, "/uevent", NULL
);
1112 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1118 log_debug("Device %s is closed, synthesising 'change'", devname
);
1119 strscpyl(filename
, sizeof(filename
), syspath
, "/uevent", NULL
);
1120 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1125 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1126 Manager
*manager
= userdata
;
1127 union inotify_event_buffer buffer
;
1128 struct inotify_event
*e
;
1133 l
= read(fd
, &buffer
, sizeof(buffer
));
1135 if (IN_SET(errno
, EAGAIN
, EINTR
))
1138 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1141 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1142 _cleanup_(sd_device_unrefp
) sd_device
*dev
= NULL
;
1143 const char *devnode
;
1145 if (udev_watch_lookup(e
->wd
, &dev
) < 0)
1148 if (sd_device_get_devname(dev
, &devnode
) < 0)
1151 log_debug("inotify event: %x for %s", e
->mask
, devnode
);
1152 if (e
->mask
& IN_CLOSE_WRITE
) {
1153 synthesize_change(dev
);
1155 /* settle might be waiting on us to determine the queue
1156 * state. If we just handled an inotify event, we might have
1157 * generated a "change" event, but we won't have queued up
1158 * the resultant uevent yet. Do that.
1160 on_uevent(NULL
, -1, 0, manager
);
1161 } else if (e
->mask
& IN_IGNORED
)
1162 udev_watch_end(dev
);
1168 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1169 Manager
*manager
= userdata
;
1173 manager_exit(manager
);
1178 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1179 Manager
*manager
= userdata
;
1183 manager_reload(manager
);
1188 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1189 Manager
*manager
= userdata
;
1196 struct worker
*worker
;
1198 pid
= waitpid(-1, &status
, WNOHANG
);
1202 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1204 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1208 if (WIFEXITED(status
)) {
1209 if (WEXITSTATUS(status
) == 0)
1210 log_debug("worker ["PID_FMT
"] exited", pid
);
1212 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1213 } else if (WIFSIGNALED(status
)) {
1214 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), signal_to_string(WTERMSIG(status
)));
1215 } else if (WIFSTOPPED(status
)) {
1216 log_info("worker ["PID_FMT
"] stopped", pid
);
1218 } else if (WIFCONTINUED(status
)) {
1219 log_info("worker ["PID_FMT
"] continued", pid
);
1222 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1224 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1225 if (worker
->event
) {
1226 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1227 /* delete state from disk */
1228 udev_device_delete_db(worker
->event
->dev
);
1229 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1230 /* forward kernel event without amending it */
1231 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1235 worker_free(worker
);
1238 /* we can start new workers, try to schedule events */
1239 event_queue_start(manager
);
1244 static int on_post(sd_event_source
*s
, void *userdata
) {
1245 Manager
*manager
= userdata
;
1250 if (LIST_IS_EMPTY(manager
->events
)) {
1251 /* no pending events */
1252 if (!hashmap_isempty(manager
->workers
)) {
1253 /* there are idle workers */
1254 log_debug("cleanup idle workers");
1255 manager_kill_workers(manager
);
1258 if (manager
->exit
) {
1259 r
= sd_event_exit(manager
->event
, 0);
1262 } else if (manager
->cgroup
)
1263 /* cleanup possible left-over processes in our cgroup */
1264 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, CGROUP_IGNORE_SELF
, NULL
, NULL
, NULL
);
1271 static int listen_fds(int *rctrl
, int *rnetlink
) {
1272 int ctrl_fd
= -1, netlink_fd
= -1;
1278 n
= sd_listen_fds(true);
1282 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1283 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1290 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1291 if (netlink_fd
>= 0)
1301 _cleanup_(udev_ctrl_unrefp
) struct udev_ctrl
*ctrl
= NULL
;
1303 ctrl
= udev_ctrl_new();
1305 return log_error_errno(EINVAL
, "error initializing udev control socket");
1307 r
= udev_ctrl_enable_receiving(ctrl
);
1309 return log_error_errno(EINVAL
, "error binding udev control socket");
1311 fd
= udev_ctrl_get_fd(ctrl
);
1313 return log_error_errno(EIO
, "could not get ctrl fd");
1315 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1317 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1320 if (netlink_fd
< 0) {
1321 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*monitor
= NULL
;
1323 monitor
= udev_monitor_new_from_netlink(NULL
, "kernel");
1325 return log_error_errno(EINVAL
, "error initializing netlink socket");
1327 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1329 r
= udev_monitor_enable_receiving(monitor
);
1331 return log_error_errno(EINVAL
, "error binding netlink socket");
1333 fd
= udev_monitor_get_fd(monitor
);
1335 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1337 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1339 return log_error_errno(errno
, "could not dup netlink fd: %m");
1343 *rnetlink
= netlink_fd
;
1349 * read the kernel command line, in case we need to get into debug mode
1350 * udev.log_priority=<level> syslog priority
1351 * udev.children_max=<number of workers> events are fully serialized if set to 1
1352 * udev.exec_delay=<number of seconds> delay execution of every executed program
1353 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1355 static int parse_proc_cmdline_item(const char *key
, const char *value
, void *data
) {
1363 if (proc_cmdline_key_streq(key
, "udev.log_priority")) {
1365 if (proc_cmdline_value_missing(key
, value
))
1368 r
= util_log_priority(value
);
1370 log_set_max_level(r
);
1372 } else if (proc_cmdline_key_streq(key
, "udev.event_timeout")) {
1374 if (proc_cmdline_value_missing(key
, value
))
1377 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1379 arg_event_timeout_usec
*= USEC_PER_SEC
;
1380 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1383 } else if (proc_cmdline_key_streq(key
, "udev.children_max")) {
1385 if (proc_cmdline_value_missing(key
, value
))
1388 r
= safe_atou(value
, &arg_children_max
);
1390 } else if (proc_cmdline_key_streq(key
, "udev.exec_delay")) {
1392 if (proc_cmdline_value_missing(key
, value
))
1395 r
= safe_atoi(value
, &arg_exec_delay
);
1397 } else if (startswith(key
, "udev."))
1398 log_warning("Unknown udev kernel command line option \"%s\"", key
);
1401 log_warning_errno(r
, "Failed to parse \"%s=%s\", ignoring: %m", key
, value
);
1406 static int help(void) {
1407 _cleanup_free_
char *link
= NULL
;
1410 r
= terminal_urlify_man("systemd-udevd.service", "8", &link
);
1414 printf("%s [OPTIONS...]\n\n"
1415 "Manages devices.\n\n"
1416 " -h --help Print this message\n"
1417 " -V --version Print version of the program\n"
1418 " -d --daemon Detach and run in the background\n"
1419 " -D --debug Enable debug output\n"
1420 " -c --children-max=INT Set maximum number of workers\n"
1421 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1422 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1423 " -N --resolve-names=early|late|never\n"
1424 " When to resolve users and groups\n"
1425 "\nSee the %s for details.\n"
1426 , program_invocation_short_name
1433 static int parse_argv(int argc
, char *argv
[]) {
1434 static const struct option options
[] = {
1435 { "daemon", no_argument
, NULL
, 'd' },
1436 { "debug", no_argument
, NULL
, 'D' },
1437 { "children-max", required_argument
, NULL
, 'c' },
1438 { "exec-delay", required_argument
, NULL
, 'e' },
1439 { "event-timeout", required_argument
, NULL
, 't' },
1440 { "resolve-names", required_argument
, NULL
, 'N' },
1441 { "help", no_argument
, NULL
, 'h' },
1442 { "version", no_argument
, NULL
, 'V' },
1451 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1457 arg_daemonize
= true;
1460 r
= safe_atou(optarg
, &arg_children_max
);
1462 log_warning("Invalid --children-max ignored: %s", optarg
);
1465 r
= safe_atoi(optarg
, &arg_exec_delay
);
1467 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1470 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1472 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1474 arg_event_timeout_usec
*= USEC_PER_SEC
;
1475 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1482 if (streq(optarg
, "early")) {
1483 arg_resolve_names
= 1;
1484 } else if (streq(optarg
, "late")) {
1485 arg_resolve_names
= 0;
1486 } else if (streq(optarg
, "never")) {
1487 arg_resolve_names
= -1;
1489 log_error("resolve-names must be early, late or never");
1496 printf("%s\n", PACKAGE_VERSION
);
1501 assert_not_reached("Unhandled option");
1509 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1510 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1511 int r
, fd_worker
, one
= 1;
1514 assert(fd_ctrl
>= 0);
1515 assert(fd_uevent
>= 0);
1517 manager
= new0(Manager
, 1);
1521 manager
->fd_inotify
= -1;
1522 manager
->worker_watch
[WRITE_END
] = -1;
1523 manager
->worker_watch
[READ_END
] = -1;
1525 udev_builtin_init();
1527 manager
->rules
= udev_rules_new(arg_resolve_names
);
1528 if (!manager
->rules
)
1529 return log_error_errno(ENOMEM
, "error reading rules");
1531 LIST_HEAD_INIT(manager
->events
);
1532 udev_list_init(NULL
, &manager
->properties
, true);
1534 manager
->cgroup
= cgroup
;
1536 manager
->ctrl
= udev_ctrl_new_from_fd(fd_ctrl
);
1538 return log_error_errno(EINVAL
, "error taking over udev control socket");
1540 manager
->monitor
= udev_monitor_new_from_netlink_fd(NULL
, "kernel", fd_uevent
);
1541 if (!manager
->monitor
)
1542 return log_error_errno(EINVAL
, "error taking over netlink socket");
1544 /* unnamed socket from workers to the main daemon */
1545 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1547 return log_error_errno(errno
, "error creating socketpair: %m");
1549 fd_worker
= manager
->worker_watch
[READ_END
];
1551 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1553 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1555 manager
->fd_inotify
= udev_watch_init();
1556 if (manager
->fd_inotify
< 0)
1557 return log_error_errno(ENOMEM
, "error initializing inotify");
1559 udev_watch_restore();
1561 /* block and listen to all signals on signalfd */
1562 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1564 r
= sd_event_default(&manager
->event
);
1566 return log_error_errno(r
, "could not allocate event loop: %m");
1568 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1570 return log_error_errno(r
, "error creating sigint event source: %m");
1572 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1574 return log_error_errno(r
, "error creating sigterm event source: %m");
1576 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1578 return log_error_errno(r
, "error creating sighup event source: %m");
1580 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1582 return log_error_errno(r
, "error creating sigchld event source: %m");
1584 r
= sd_event_set_watchdog(manager
->event
, true);
1586 return log_error_errno(r
, "error creating watchdog event source: %m");
1588 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1590 return log_error_errno(r
, "error creating ctrl event source: %m");
1592 /* This needs to be after the inotify and uevent handling, to make sure
1593 * that the ping is send back after fully processing the pending uevents
1594 * (including the synthetic ones we may create due to inotify events).
1596 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1598 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1600 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1602 return log_error_errno(r
, "error creating inotify event source: %m");
1604 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1606 return log_error_errno(r
, "error creating uevent event source: %m");
1608 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1610 return log_error_errno(r
, "error creating worker event source: %m");
1612 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1614 return log_error_errno(r
, "error creating post event source: %m");
1616 *ret
= TAKE_PTR(manager
);
1621 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1622 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1625 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1627 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1631 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1633 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1635 (void) sd_notifyf(false,
1637 "STATUS=Processing with %u children at max", arg_children_max
);
1639 r
= sd_event_loop(manager
->event
);
1641 log_error_errno(r
, "event loop failed: %m");
1645 sd_event_get_exit_code(manager
->event
, &r
);
1650 "STATUS=Shutting down...");
1652 udev_ctrl_cleanup(manager
->ctrl
);
1656 int main(int argc
, char *argv
[]) {
1657 _cleanup_free_
char *cgroup
= NULL
;
1658 int fd_ctrl
= -1, fd_uevent
= -1;
1661 log_set_target(LOG_TARGET_AUTO
);
1662 udev_parse_config();
1663 log_parse_environment();
1666 r
= parse_argv(argc
, argv
);
1670 r
= proc_cmdline_parse(parse_proc_cmdline_item
, NULL
, PROC_CMDLINE_STRIP_RD_PREFIX
);
1672 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1675 log_set_target(LOG_TARGET_CONSOLE
);
1676 log_set_max_level(LOG_DEBUG
);
1683 if (arg_children_max
== 0) {
1685 unsigned long mem_limit
;
1687 arg_children_max
= 8;
1689 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1690 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1692 mem_limit
= physical_memory() / (128LU*1024*1024);
1693 arg_children_max
= MAX(10U, MIN(arg_children_max
, mem_limit
));
1695 log_debug("set children_max to %u", arg_children_max
);
1698 /* set umask before creating any file/directory */
1701 r
= log_error_errno(errno
, "could not change dir to /: %m");
1707 r
= mac_selinux_init();
1709 log_error_errno(r
, "could not initialize labelling: %m");
1713 r
= mkdir_errno_wrapper("/run/udev", 0755);
1714 if (r
< 0 && r
!= -EEXIST
) {
1715 log_error_errno(r
, "could not create /run/udev: %m");
1719 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1721 if (getppid() == 1) {
1722 /* get our own cgroup, we regularly kill everything udev has left behind
1723 we only do this on systemd systems, and only if we are directly spawned
1724 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1725 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1727 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
1728 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1730 log_warning_errno(r
, "failed to get cgroup: %m");
1734 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1736 r
= log_error_errno(r
, "could not listen on fds: %m");
1740 if (arg_daemonize
) {
1743 log_info("starting version " PACKAGE_VERSION
);
1745 /* connect /dev/null to stdin, stdout, stderr */
1746 if (log_get_max_level() < LOG_DEBUG
) {
1747 r
= make_null_stdio();
1749 log_warning_errno(r
, "Failed to redirect standard streams to /dev/null: %m");
1757 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1760 mac_selinux_finish();
1762 _exit(EXIT_SUCCESS
);
1767 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1770 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1773 mac_selinux_finish();
1775 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;