1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
17 #include <sys/epoll.h>
19 #include <sys/inotify.h>
20 #include <sys/ioctl.h>
21 #include <sys/mount.h>
22 #include <sys/prctl.h>
23 #include <sys/signalfd.h>
24 #include <sys/socket.h>
30 #include "sd-daemon.h"
33 #include "alloc-util.h"
34 #include "cgroup-util.h"
35 #include "cpu-set-util.h"
36 #include "dev-setup.h"
39 #include "format-util.h"
44 #include "netlink-util.h"
45 #include "parse-util.h"
46 #include "proc-cmdline.h"
47 #include "process-util.h"
48 #include "selinux-util.h"
49 #include "signal-util.h"
50 #include "socket-util.h"
51 #include "string-util.h"
52 #include "terminal-util.h"
53 #include "udev-util.h"
55 #include "user-util.h"
57 static bool arg_debug
= false;
58 static int arg_daemonize
= false;
59 static int arg_resolve_names
= 1;
60 static unsigned arg_children_max
;
61 static int arg_exec_delay
;
62 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
63 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
65 typedef struct Manager
{
68 LIST_HEAD(struct event
, events
);
70 pid_t pid
; /* the process that originally allocated the manager object */
72 struct udev_rules
*rules
;
73 struct udev_list properties
;
75 struct udev_monitor
*monitor
;
76 struct udev_ctrl
*ctrl
;
77 struct udev_ctrl_connection
*ctrl_conn_blocking
;
81 sd_event_source
*ctrl_event
;
82 sd_event_source
*uevent_event
;
83 sd_event_source
*inotify_event
;
87 bool stop_exec_queue
:1;
98 LIST_FIELDS(struct event
, event
);
100 struct udev_device
*dev
;
101 struct udev_device
*dev_kernel
;
102 struct worker
*worker
;
103 enum event_state state
;
104 unsigned long long int delaying_seqnum
;
105 unsigned long long int seqnum
;
108 const char *devpath_old
;
112 sd_event_source
*timeout_warning
;
113 sd_event_source
*timeout
;
116 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
128 struct udev_monitor
*monitor
;
129 enum worker_state state
;
133 /* passed from worker to main process */
134 struct worker_message
{
137 static void event_free(struct event
*event
) {
142 assert(event
->manager
);
144 LIST_REMOVE(event
, event
->manager
->events
, event
);
145 udev_device_unref(event
->dev
);
146 udev_device_unref(event
->dev_kernel
);
148 sd_event_source_unref(event
->timeout_warning
);
149 sd_event_source_unref(event
->timeout
);
152 event
->worker
->event
= NULL
;
154 if (LIST_IS_EMPTY(event
->manager
->events
)) {
155 /* only clean up the queue from the process that created it */
156 if (event
->manager
->pid
== getpid_cached()) {
157 r
= unlink("/run/udev/queue");
159 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
166 static void worker_free(struct worker
*worker
) {
170 assert(worker
->manager
);
172 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
173 udev_monitor_unref(worker
->monitor
);
174 event_free(worker
->event
);
179 static void manager_workers_free(Manager
*manager
) {
180 struct worker
*worker
;
185 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
188 manager
->workers
= hashmap_free(manager
->workers
);
191 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
192 _cleanup_free_
struct worker
*worker
= NULL
;
197 assert(worker_monitor
);
200 worker
= new0(struct worker
, 1);
204 worker
->manager
= manager
;
205 /* close monitor, but keep address around */
206 udev_monitor_disconnect(worker_monitor
);
207 worker
->monitor
= udev_monitor_ref(worker_monitor
);
210 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
214 r
= hashmap_put(manager
->workers
, PID_TO_PTR(pid
), worker
);
218 *ret
= TAKE_PTR(worker
);
223 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
224 struct event
*event
= userdata
;
227 assert(event
->worker
);
229 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
230 event
->worker
->state
= WORKER_KILLED
;
232 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
237 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
238 struct event
*event
= userdata
;
242 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
247 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
252 assert(worker
->manager
);
254 assert(!event
->worker
);
255 assert(!worker
->event
);
257 worker
->state
= WORKER_RUNNING
;
258 worker
->event
= event
;
259 event
->state
= EVENT_RUNNING
;
260 event
->worker
= worker
;
262 e
= worker
->manager
->event
;
264 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &usec
) >= 0);
266 (void) sd_event_add_time(e
, &event
->timeout_warning
, CLOCK_MONOTONIC
,
267 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
269 (void) sd_event_add_time(e
, &event
->timeout
, CLOCK_MONOTONIC
,
270 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
273 static void manager_free(Manager
*manager
) {
279 sd_event_source_unref(manager
->ctrl_event
);
280 sd_event_source_unref(manager
->uevent_event
);
281 sd_event_source_unref(manager
->inotify_event
);
283 sd_event_unref(manager
->event
);
284 manager_workers_free(manager
);
285 event_queue_cleanup(manager
, EVENT_UNDEF
);
287 udev_monitor_unref(manager
->monitor
);
288 udev_ctrl_unref(manager
->ctrl
);
289 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
291 udev_list_cleanup(&manager
->properties
);
292 udev_rules_unref(manager
->rules
);
294 safe_close(manager
->fd_inotify
);
295 safe_close_pair(manager
->worker_watch
);
300 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
302 static int worker_send_message(int fd
) {
303 struct worker_message message
= {};
305 return loop_write(fd
, &message
, sizeof(message
), false);
308 static bool shall_lock_device(struct udev_device
*dev
) {
311 if (!streq_ptr("block", udev_device_get_subsystem(dev
)))
314 sysname
= udev_device_get_sysname(dev
);
315 return !startswith(sysname
, "dm-") &&
316 !startswith(sysname
, "md") &&
317 !startswith(sysname
, "drbd");
320 static void worker_spawn(Manager
*manager
, struct event
*event
) {
321 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*worker_monitor
= NULL
;
325 /* listen for new events */
326 worker_monitor
= udev_monitor_new_from_netlink(NULL
, NULL
);
327 if (worker_monitor
== NULL
)
329 /* allow the main daemon netlink address to send devices to the worker */
330 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
331 r
= udev_monitor_enable_receiving(worker_monitor
);
333 log_error_errno(r
, "worker: could not enable receiving of device: %m");
338 struct udev_device
*dev
= NULL
;
339 _cleanup_(sd_netlink_unrefp
) sd_netlink
*rtnl
= NULL
;
341 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
342 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
343 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
346 /* take initial device from queue */
347 dev
= TAKE_PTR(event
->dev
);
349 unsetenv("NOTIFY_SOCKET");
351 manager_workers_free(manager
);
352 event_queue_cleanup(manager
, EVENT_UNDEF
);
354 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
355 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
356 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
357 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
359 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
360 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
361 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
363 manager
->event
= sd_event_unref(manager
->event
);
366 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
368 r
= log_error_errno(errno
, "error creating signalfd %m");
371 ep_signal
.data
.fd
= fd_signal
;
373 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
374 ep_monitor
.data
.fd
= fd_monitor
;
376 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
378 r
= log_error_errno(errno
, "error creating epoll fd: %m");
382 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
383 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
384 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
388 /* Request TERM signal if parent exits.
389 Ignore error, not much we can do in that case. */
390 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
392 /* Reset OOM score, we only protect the main daemon. */
393 write_string_file("/proc/self/oom_score_adj", "0", 0);
396 struct udev_event
*udev_event
;
401 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
402 udev_event
= udev_event_new(dev
);
403 if (udev_event
== NULL
) {
408 if (arg_exec_delay
> 0)
409 udev_event
->exec_delay
= arg_exec_delay
;
412 * Take a shared lock on the device node; this establishes
413 * a concept of device "ownership" to serialize device
414 * access. External processes holding an exclusive lock will
415 * cause udev to skip the event handling; in the case udev
416 * acquired the lock, the external process can block until
417 * udev has finished its event handling.
419 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
420 shall_lock_device(dev
)) {
421 struct udev_device
*d
= dev
;
423 if (streq_ptr("partition", udev_device_get_devtype(d
)))
424 d
= udev_device_get_parent(d
);
427 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
428 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
429 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
430 fd_lock
= safe_close(fd_lock
);
436 /* needed for renaming netifs */
437 udev_event
->rtnl
= rtnl
;
439 /* apply rules, create node, symlinks */
440 udev_event_execute_rules(udev_event
,
441 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
442 &manager
->properties
,
445 udev_event_execute_run(udev_event
,
446 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
448 if (udev_event
->rtnl
)
449 /* in case rtnl was initialized */
450 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
452 /* apply/restore inotify watch */
453 if (udev_event
->inotify_watch
) {
454 udev_watch_begin(dev
);
455 udev_device_update_db(dev
);
460 /* send processed event back to libudev listeners */
461 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
464 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
466 /* send udevd the result of the event execution */
467 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
469 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
470 udev_device_get_seqnum(dev
));
472 udev_device_unref(dev
);
475 udev_event_unref(udev_event
);
477 /* wait for more device messages from main udevd, or term signal */
478 while (dev
== NULL
) {
479 struct epoll_event ev
[4];
483 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
487 r
= log_error_errno(errno
, "failed to poll: %m");
491 for (i
= 0; i
< fdcount
; i
++) {
492 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
493 dev
= udev_monitor_receive_device(worker_monitor
);
495 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
496 struct signalfd_siginfo fdsi
;
499 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
500 if (size
!= sizeof(struct signalfd_siginfo
))
502 switch (fdsi
.ssi_signo
) {
511 udev_device_unref(dev
);
512 manager_free(manager
);
514 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
517 event
->state
= EVENT_QUEUED
;
518 log_error_errno(errno
, "fork of child failed: %m");
522 struct worker
*worker
;
524 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
528 worker_attach_event(worker
, event
);
530 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
536 static void event_run(Manager
*manager
, struct event
*event
) {
537 struct worker
*worker
;
543 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
546 if (worker
->state
!= WORKER_IDLE
)
549 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
551 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
553 kill(worker
->pid
, SIGKILL
);
554 worker
->state
= WORKER_KILLED
;
557 worker_attach_event(worker
, event
);
561 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
562 if (arg_children_max
> 1)
563 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
567 /* start new worker and pass initial device */
568 worker_spawn(manager
, event
);
571 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
578 /* only one process can add events to the queue */
579 if (manager
->pid
== 0)
580 manager
->pid
= getpid_cached();
582 assert(manager
->pid
== getpid_cached());
584 event
= new0(struct event
, 1);
588 event
->manager
= manager
;
590 event
->dev_kernel
= udev_device_shallow_clone(dev
);
591 udev_device_copy_properties(event
->dev_kernel
, dev
);
592 event
->seqnum
= udev_device_get_seqnum(dev
);
593 event
->devpath
= udev_device_get_devpath(dev
);
594 event
->devpath_len
= strlen(event
->devpath
);
595 event
->devpath_old
= udev_device_get_devpath_old(dev
);
596 event
->devnum
= udev_device_get_devnum(dev
);
597 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
598 event
->ifindex
= udev_device_get_ifindex(dev
);
600 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
601 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
603 event
->state
= EVENT_QUEUED
;
605 if (LIST_IS_EMPTY(manager
->events
)) {
606 r
= touch("/run/udev/queue");
608 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
611 LIST_APPEND(event
, manager
->events
, event
);
616 static void manager_kill_workers(Manager
*manager
) {
617 struct worker
*worker
;
622 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
623 if (worker
->state
== WORKER_KILLED
)
626 worker
->state
= WORKER_KILLED
;
627 kill(worker
->pid
, SIGTERM
);
631 /* lookup event for identical, parent, child device */
632 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
633 struct event
*loop_event
;
636 /* check if queue contains events we depend on */
637 LIST_FOREACH(event
, loop_event
, manager
->events
) {
638 /* we already found a later event, earlier cannot block us, no need to check again */
639 if (loop_event
->seqnum
< event
->delaying_seqnum
)
642 /* event we checked earlier still exists, no need to check again */
643 if (loop_event
->seqnum
== event
->delaying_seqnum
)
646 /* found ourself, no later event can block us */
647 if (loop_event
->seqnum
>= event
->seqnum
)
650 /* check major/minor */
651 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
654 /* check network device ifindex */
655 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
658 /* check our old name */
659 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
660 event
->delaying_seqnum
= loop_event
->seqnum
;
664 /* compare devpath */
665 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
667 /* one devpath is contained in the other? */
668 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
671 /* identical device event found */
672 if (loop_event
->devpath_len
== event
->devpath_len
) {
673 /* devices names might have changed/swapped in the meantime */
674 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
676 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
678 event
->delaying_seqnum
= loop_event
->seqnum
;
682 /* parent device event found */
683 if (event
->devpath
[common
] == '/') {
684 event
->delaying_seqnum
= loop_event
->seqnum
;
688 /* child device event found */
689 if (loop_event
->devpath
[common
] == '/') {
690 event
->delaying_seqnum
= loop_event
->seqnum
;
694 /* no matching device */
701 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
702 Manager
*manager
= userdata
;
706 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
708 sd_event_exit(manager
->event
, -ETIMEDOUT
);
713 static void manager_exit(Manager
*manager
) {
719 manager
->exit
= true;
723 "STATUS=Starting shutdown...");
725 /* close sources of new events and discard buffered events */
726 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
727 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
729 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
730 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
732 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
733 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
735 /* discard queued events and kill workers */
736 event_queue_cleanup(manager
, EVENT_QUEUED
);
737 manager_kill_workers(manager
);
739 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
741 r
= sd_event_add_time(manager
->event
, NULL
, CLOCK_MONOTONIC
,
742 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
747 /* reload requested, HUP signal received, rules changed, builtin changed */
748 static void manager_reload(Manager
*manager
) {
754 "STATUS=Flushing configuration...");
756 manager_kill_workers(manager
);
757 manager
->rules
= udev_rules_unref(manager
->rules
);
762 "STATUS=Processing with %u children at max", arg_children_max
);
765 static void event_queue_start(Manager
*manager
) {
771 if (LIST_IS_EMPTY(manager
->events
) ||
772 manager
->exit
|| manager
->stop_exec_queue
)
775 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
776 /* check for changed config, every 3 seconds at most */
777 if (manager
->last_usec
== 0 ||
778 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
779 if (udev_rules_check_timestamp(manager
->rules
) ||
780 udev_builtin_validate())
781 manager_reload(manager
);
783 manager
->last_usec
= usec
;
788 if (!manager
->rules
) {
789 manager
->rules
= udev_rules_new(arg_resolve_names
);
794 LIST_FOREACH(event
,event
,manager
->events
) {
795 if (event
->state
!= EVENT_QUEUED
)
798 /* do not start event if parent or child event is still running */
799 if (is_devpath_busy(manager
, event
))
802 event_run(manager
, event
);
806 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
807 struct event
*event
, *tmp
;
809 LIST_FOREACH_SAFE(event
, event
, tmp
, manager
->events
) {
810 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
817 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
818 Manager
*manager
= userdata
;
823 struct worker_message msg
;
824 struct iovec iovec
= {
826 .iov_len
= sizeof(msg
),
829 struct cmsghdr cmsghdr
;
830 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
832 struct msghdr msghdr
= {
835 .msg_control
= &control
,
836 .msg_controllen
= sizeof(control
),
838 struct cmsghdr
*cmsg
;
840 struct ucred
*ucred
= NULL
;
841 struct worker
*worker
;
843 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
847 else if (errno
== EAGAIN
)
848 /* nothing more to read */
851 return log_error_errno(errno
, "failed to receive message: %m");
852 } else if (size
!= sizeof(struct worker_message
)) {
853 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
857 CMSG_FOREACH(cmsg
, &msghdr
) {
858 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
859 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
860 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
861 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
864 if (!ucred
|| ucred
->pid
<= 0) {
865 log_warning_errno(EIO
, "ignoring worker message without valid PID");
869 /* lookup worker who sent the signal */
870 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
872 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
876 if (worker
->state
!= WORKER_KILLED
)
877 worker
->state
= WORKER_IDLE
;
879 /* worker returned */
880 event_free(worker
->event
);
883 /* we have free workers, try to schedule events */
884 event_queue_start(manager
);
889 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
890 Manager
*manager
= userdata
;
891 struct udev_device
*dev
;
896 dev
= udev_monitor_receive_device(manager
->monitor
);
898 udev_device_ensure_usec_initialized(dev
, NULL
);
899 r
= event_queue_insert(manager
, dev
);
901 udev_device_unref(dev
);
903 /* we have fresh events, try to schedule them */
904 event_queue_start(manager
);
910 /* receive the udevd message from userspace */
911 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
912 Manager
*manager
= userdata
;
913 _cleanup_(udev_ctrl_connection_unrefp
) struct udev_ctrl_connection
*ctrl_conn
= NULL
;
914 _cleanup_(udev_ctrl_msg_unrefp
) struct udev_ctrl_msg
*ctrl_msg
= NULL
;
920 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
924 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
928 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
930 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
931 log_set_max_level(i
);
932 manager_kill_workers(manager
);
935 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
936 log_debug("udevd message (STOP_EXEC_QUEUE) received");
937 manager
->stop_exec_queue
= true;
940 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
941 log_debug("udevd message (START_EXEC_QUEUE) received");
942 manager
->stop_exec_queue
= false;
943 event_queue_start(manager
);
946 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
947 log_debug("udevd message (RELOAD) received");
948 manager_reload(manager
);
951 str
= udev_ctrl_get_set_env(ctrl_msg
);
953 _cleanup_free_
char *key
= NULL
;
959 val
= strchr(key
, '=');
963 if (val
[0] == '\0') {
964 log_debug("udevd message (ENV) received, unset '%s'", key
);
965 udev_list_entry_add(&manager
->properties
, key
, NULL
);
967 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
968 udev_list_entry_add(&manager
->properties
, key
, val
);
971 log_error("wrong key format '%s'", key
);
973 manager_kill_workers(manager
);
976 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
978 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
979 arg_children_max
= i
;
981 (void) sd_notifyf(false,
983 "STATUS=Processing with %u children at max", arg_children_max
);
986 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
987 log_debug("udevd message (SYNC) received");
989 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
990 log_debug("udevd message (EXIT) received");
991 manager_exit(manager
);
992 /* keep reference to block the client until we exit
993 TODO: deal with several blocking exit requests */
994 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1000 static int synthesize_change(struct udev_device
*dev
) {
1001 char filename
[UTIL_PATH_SIZE
];
1004 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1005 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1006 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1007 bool part_table_read
= false;
1008 bool has_partitions
= false;
1010 _cleanup_(udev_enumerate_unrefp
) struct udev_enumerate
*e
= NULL
;
1011 struct udev_list_entry
*item
;
1014 * Try to re-read the partition table. This only succeeds if
1015 * none of the devices is busy. The kernel returns 0 if no
1016 * partition table is found, and we will not get an event for
1019 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1021 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1023 r
= ioctl(fd
, BLKRRPART
, 0);
1027 part_table_read
= true;
1030 /* search for partitions */
1031 e
= udev_enumerate_new(NULL
);
1035 r
= udev_enumerate_add_match_parent(e
, dev
);
1039 r
= udev_enumerate_add_match_subsystem(e
, "block");
1043 r
= udev_enumerate_scan_devices(e
);
1047 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1048 _cleanup_(udev_device_unrefp
) struct udev_device
*d
= NULL
;
1050 d
= udev_device_new_from_syspath(NULL
, udev_list_entry_get_name(item
));
1054 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1057 has_partitions
= true;
1062 * We have partitions and re-read the table, the kernel already sent
1063 * out a "change" event for the disk, and "remove/add" for all
1066 if (part_table_read
&& has_partitions
)
1070 * We have partitions but re-reading the partition table did not
1071 * work, synthesize "change" for the disk and all partitions.
1073 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1074 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1075 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1077 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1078 _cleanup_(udev_device_unrefp
) struct udev_device
*d
= NULL
;
1080 d
= udev_device_new_from_syspath(NULL
, udev_list_entry_get_name(item
));
1084 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1087 log_debug("device %s closed, synthesising partition '%s' 'change'",
1088 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1089 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1090 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1096 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1097 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1098 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1103 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1104 Manager
*manager
= userdata
;
1105 union inotify_event_buffer buffer
;
1106 struct inotify_event
*e
;
1111 l
= read(fd
, &buffer
, sizeof(buffer
));
1113 if (IN_SET(errno
, EAGAIN
, EINTR
))
1116 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1119 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1120 _cleanup_(udev_device_unrefp
) struct udev_device
*dev
= NULL
;
1122 dev
= udev_watch_lookup(e
->wd
);
1126 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1127 if (e
->mask
& IN_CLOSE_WRITE
) {
1128 synthesize_change(dev
);
1130 /* settle might be waiting on us to determine the queue
1131 * state. If we just handled an inotify event, we might have
1132 * generated a "change" event, but we won't have queued up
1133 * the resultant uevent yet. Do that.
1135 on_uevent(NULL
, -1, 0, manager
);
1136 } else if (e
->mask
& IN_IGNORED
)
1137 udev_watch_end(dev
);
1143 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1144 Manager
*manager
= userdata
;
1148 manager_exit(manager
);
1153 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1154 Manager
*manager
= userdata
;
1158 manager_reload(manager
);
1163 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1164 Manager
*manager
= userdata
;
1171 struct worker
*worker
;
1173 pid
= waitpid(-1, &status
, WNOHANG
);
1177 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1179 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1183 if (WIFEXITED(status
)) {
1184 if (WEXITSTATUS(status
) == 0)
1185 log_debug("worker ["PID_FMT
"] exited", pid
);
1187 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1188 } else if (WIFSIGNALED(status
)) {
1189 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), signal_to_string(WTERMSIG(status
)));
1190 } else if (WIFSTOPPED(status
)) {
1191 log_info("worker ["PID_FMT
"] stopped", pid
);
1193 } else if (WIFCONTINUED(status
)) {
1194 log_info("worker ["PID_FMT
"] continued", pid
);
1197 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1199 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1200 if (worker
->event
) {
1201 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1202 /* delete state from disk */
1203 udev_device_delete_db(worker
->event
->dev
);
1204 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1205 /* forward kernel event without amending it */
1206 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1210 worker_free(worker
);
1213 /* we can start new workers, try to schedule events */
1214 event_queue_start(manager
);
1219 static int on_post(sd_event_source
*s
, void *userdata
) {
1220 Manager
*manager
= userdata
;
1225 if (LIST_IS_EMPTY(manager
->events
)) {
1226 /* no pending events */
1227 if (!hashmap_isempty(manager
->workers
)) {
1228 /* there are idle workers */
1229 log_debug("cleanup idle workers");
1230 manager_kill_workers(manager
);
1233 if (manager
->exit
) {
1234 r
= sd_event_exit(manager
->event
, 0);
1237 } else if (manager
->cgroup
)
1238 /* cleanup possible left-over processes in our cgroup */
1239 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, CGROUP_IGNORE_SELF
, NULL
, NULL
, NULL
);
1246 static int listen_fds(int *rctrl
, int *rnetlink
) {
1247 int ctrl_fd
= -1, netlink_fd
= -1;
1253 n
= sd_listen_fds(true);
1257 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1258 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1265 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1266 if (netlink_fd
>= 0)
1276 _cleanup_(udev_ctrl_unrefp
) struct udev_ctrl
*ctrl
= NULL
;
1278 ctrl
= udev_ctrl_new();
1280 return log_error_errno(EINVAL
, "error initializing udev control socket");
1282 r
= udev_ctrl_enable_receiving(ctrl
);
1284 return log_error_errno(EINVAL
, "error binding udev control socket");
1286 fd
= udev_ctrl_get_fd(ctrl
);
1288 return log_error_errno(EIO
, "could not get ctrl fd");
1290 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1292 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1295 if (netlink_fd
< 0) {
1296 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*monitor
= NULL
;
1298 monitor
= udev_monitor_new_from_netlink(NULL
, "kernel");
1300 return log_error_errno(EINVAL
, "error initializing netlink socket");
1302 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1304 r
= udev_monitor_enable_receiving(monitor
);
1306 return log_error_errno(EINVAL
, "error binding netlink socket");
1308 fd
= udev_monitor_get_fd(monitor
);
1310 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1312 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1314 return log_error_errno(errno
, "could not dup netlink fd: %m");
1318 *rnetlink
= netlink_fd
;
1324 * read the kernel command line, in case we need to get into debug mode
1325 * udev.log_priority=<level> syslog priority
1326 * udev.children_max=<number of workers> events are fully serialized if set to 1
1327 * udev.exec_delay=<number of seconds> delay execution of every executed program
1328 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1330 static int parse_proc_cmdline_item(const char *key
, const char *value
, void *data
) {
1338 if (proc_cmdline_key_streq(key
, "udev.log_priority")) {
1340 if (proc_cmdline_value_missing(key
, value
))
1343 r
= util_log_priority(value
);
1345 log_set_max_level(r
);
1347 } else if (proc_cmdline_key_streq(key
, "udev.event_timeout")) {
1349 if (proc_cmdline_value_missing(key
, value
))
1352 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1354 arg_event_timeout_usec
*= USEC_PER_SEC
;
1355 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1358 } else if (proc_cmdline_key_streq(key
, "udev.children_max")) {
1360 if (proc_cmdline_value_missing(key
, value
))
1363 r
= safe_atou(value
, &arg_children_max
);
1365 } else if (proc_cmdline_key_streq(key
, "udev.exec_delay")) {
1367 if (proc_cmdline_value_missing(key
, value
))
1370 r
= safe_atoi(value
, &arg_exec_delay
);
1372 } else if (startswith(key
, "udev."))
1373 log_warning("Unknown udev kernel command line option \"%s\"", key
);
1376 log_warning_errno(r
, "Failed to parse \"%s=%s\", ignoring: %m", key
, value
);
1381 static int help(void) {
1382 _cleanup_free_
char *link
= NULL
;
1385 r
= terminal_urlify_man("systemd-udevd.service", "8", &link
);
1389 printf("%s [OPTIONS...]\n\n"
1390 "Manages devices.\n\n"
1391 " -h --help Print this message\n"
1392 " -V --version Print version of the program\n"
1393 " -d --daemon Detach and run in the background\n"
1394 " -D --debug Enable debug output\n"
1395 " -c --children-max=INT Set maximum number of workers\n"
1396 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1397 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1398 " -N --resolve-names=early|late|never\n"
1399 " When to resolve users and groups\n"
1400 "\nSee the %s for details.\n"
1401 , program_invocation_short_name
1408 static int parse_argv(int argc
, char *argv
[]) {
1409 static const struct option options
[] = {
1410 { "daemon", no_argument
, NULL
, 'd' },
1411 { "debug", no_argument
, NULL
, 'D' },
1412 { "children-max", required_argument
, NULL
, 'c' },
1413 { "exec-delay", required_argument
, NULL
, 'e' },
1414 { "event-timeout", required_argument
, NULL
, 't' },
1415 { "resolve-names", required_argument
, NULL
, 'N' },
1416 { "help", no_argument
, NULL
, 'h' },
1417 { "version", no_argument
, NULL
, 'V' },
1426 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1432 arg_daemonize
= true;
1435 r
= safe_atou(optarg
, &arg_children_max
);
1437 log_warning("Invalid --children-max ignored: %s", optarg
);
1440 r
= safe_atoi(optarg
, &arg_exec_delay
);
1442 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1445 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1447 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1449 arg_event_timeout_usec
*= USEC_PER_SEC
;
1450 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1457 if (streq(optarg
, "early")) {
1458 arg_resolve_names
= 1;
1459 } else if (streq(optarg
, "late")) {
1460 arg_resolve_names
= 0;
1461 } else if (streq(optarg
, "never")) {
1462 arg_resolve_names
= -1;
1464 log_error("resolve-names must be early, late or never");
1471 printf("%s\n", PACKAGE_VERSION
);
1476 assert_not_reached("Unhandled option");
1484 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1485 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1486 int r
, fd_worker
, one
= 1;
1489 assert(fd_ctrl
>= 0);
1490 assert(fd_uevent
>= 0);
1492 manager
= new0(Manager
, 1);
1496 manager
->fd_inotify
= -1;
1497 manager
->worker_watch
[WRITE_END
] = -1;
1498 manager
->worker_watch
[READ_END
] = -1;
1500 udev_builtin_init();
1502 manager
->rules
= udev_rules_new(arg_resolve_names
);
1503 if (!manager
->rules
)
1504 return log_error_errno(ENOMEM
, "error reading rules");
1506 LIST_HEAD_INIT(manager
->events
);
1507 udev_list_init(NULL
, &manager
->properties
, true);
1509 manager
->cgroup
= cgroup
;
1511 manager
->ctrl
= udev_ctrl_new_from_fd(fd_ctrl
);
1513 return log_error_errno(EINVAL
, "error taking over udev control socket");
1515 manager
->monitor
= udev_monitor_new_from_netlink_fd(NULL
, "kernel", fd_uevent
);
1516 if (!manager
->monitor
)
1517 return log_error_errno(EINVAL
, "error taking over netlink socket");
1519 /* unnamed socket from workers to the main daemon */
1520 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1522 return log_error_errno(errno
, "error creating socketpair: %m");
1524 fd_worker
= manager
->worker_watch
[READ_END
];
1526 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1528 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1530 manager
->fd_inotify
= udev_watch_init();
1531 if (manager
->fd_inotify
< 0)
1532 return log_error_errno(ENOMEM
, "error initializing inotify");
1534 udev_watch_restore();
1536 /* block and listen to all signals on signalfd */
1537 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1539 r
= sd_event_default(&manager
->event
);
1541 return log_error_errno(r
, "could not allocate event loop: %m");
1543 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1545 return log_error_errno(r
, "error creating sigint event source: %m");
1547 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1549 return log_error_errno(r
, "error creating sigterm event source: %m");
1551 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1553 return log_error_errno(r
, "error creating sighup event source: %m");
1555 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1557 return log_error_errno(r
, "error creating sigchld event source: %m");
1559 r
= sd_event_set_watchdog(manager
->event
, true);
1561 return log_error_errno(r
, "error creating watchdog event source: %m");
1563 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1565 return log_error_errno(r
, "error creating ctrl event source: %m");
1567 /* This needs to be after the inotify and uevent handling, to make sure
1568 * that the ping is send back after fully processing the pending uevents
1569 * (including the synthetic ones we may create due to inotify events).
1571 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1573 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1575 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1577 return log_error_errno(r
, "error creating inotify event source: %m");
1579 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1581 return log_error_errno(r
, "error creating uevent event source: %m");
1583 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1585 return log_error_errno(r
, "error creating worker event source: %m");
1587 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1589 return log_error_errno(r
, "error creating post event source: %m");
1591 *ret
= TAKE_PTR(manager
);
1596 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1597 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1600 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1602 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1606 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1608 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1610 (void) sd_notifyf(false,
1612 "STATUS=Processing with %u children at max", arg_children_max
);
1614 r
= sd_event_loop(manager
->event
);
1616 log_error_errno(r
, "event loop failed: %m");
1620 sd_event_get_exit_code(manager
->event
, &r
);
1625 "STATUS=Shutting down...");
1627 udev_ctrl_cleanup(manager
->ctrl
);
1631 int main(int argc
, char *argv
[]) {
1632 _cleanup_free_
char *cgroup
= NULL
;
1633 int fd_ctrl
= -1, fd_uevent
= -1;
1636 log_set_target(LOG_TARGET_AUTO
);
1637 udev_parse_config();
1638 log_parse_environment();
1641 r
= parse_argv(argc
, argv
);
1645 r
= proc_cmdline_parse(parse_proc_cmdline_item
, NULL
, PROC_CMDLINE_STRIP_RD_PREFIX
);
1647 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1650 log_set_target(LOG_TARGET_CONSOLE
);
1651 log_set_max_level(LOG_DEBUG
);
1658 if (arg_children_max
== 0) {
1660 unsigned long mem_limit
;
1662 arg_children_max
= 8;
1664 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1665 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1667 mem_limit
= physical_memory() / (128LU*1024*1024);
1668 arg_children_max
= MAX(10U, MIN(arg_children_max
, mem_limit
));
1670 log_debug("set children_max to %u", arg_children_max
);
1673 /* set umask before creating any file/directory */
1676 r
= log_error_errno(errno
, "could not change dir to /: %m");
1682 r
= mac_selinux_init();
1684 log_error_errno(r
, "could not initialize labelling: %m");
1688 r
= mkdir_errno_wrapper("/run/udev", 0755);
1689 if (r
< 0 && r
!= -EEXIST
) {
1690 log_error_errno(r
, "could not create /run/udev: %m");
1694 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1696 if (getppid() == 1) {
1697 /* get our own cgroup, we regularly kill everything udev has left behind
1698 we only do this on systemd systems, and only if we are directly spawned
1699 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1700 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1702 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
1703 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1705 log_warning_errno(r
, "failed to get cgroup: %m");
1709 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1711 r
= log_error_errno(r
, "could not listen on fds: %m");
1715 if (arg_daemonize
) {
1718 log_info("starting version " PACKAGE_VERSION
);
1720 /* connect /dev/null to stdin, stdout, stderr */
1721 if (log_get_max_level() < LOG_DEBUG
) {
1722 r
= make_null_stdio();
1724 log_warning_errno(r
, "Failed to redirect standard streams to /dev/null: %m");
1732 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1735 mac_selinux_finish();
1737 _exit(EXIT_SUCCESS
);
1742 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1745 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1748 mac_selinux_finish();
1750 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;