1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
17 #include <sys/epoll.h>
19 #include <sys/inotify.h>
20 #include <sys/ioctl.h>
21 #include <sys/mount.h>
22 #include <sys/prctl.h>
23 #include <sys/signalfd.h>
24 #include <sys/socket.h>
30 #include "sd-daemon.h"
33 #include "alloc-util.h"
34 #include "cgroup-util.h"
35 #include "cpu-set-util.h"
36 #include "dev-setup.h"
37 #include "device-util.h"
40 #include "format-util.h"
44 #include "libudev-device-internal.h"
46 #include "netlink-util.h"
47 #include "parse-util.h"
48 #include "proc-cmdline.h"
49 #include "process-util.h"
50 #include "selinux-util.h"
51 #include "signal-util.h"
52 #include "socket-util.h"
53 #include "string-util.h"
54 #include "terminal-util.h"
55 #include "udev-builtin.h"
56 #include "udev-ctrl.h"
57 #include "udev-util.h"
58 #include "udev-watch.h"
60 #include "user-util.h"
62 static bool arg_debug
= false;
63 static int arg_daemonize
= false;
64 static int arg_resolve_names
= 1;
65 static unsigned arg_children_max
;
66 static int arg_exec_delay
;
67 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
68 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
70 typedef struct Manager
{
73 LIST_HEAD(struct event
, events
);
75 pid_t pid
; /* the process that originally allocated the manager object */
77 struct udev_rules
*rules
;
80 struct udev_monitor
*monitor
;
81 struct udev_ctrl
*ctrl
;
82 struct udev_ctrl_connection
*ctrl_conn_blocking
;
86 sd_event_source
*ctrl_event
;
87 sd_event_source
*uevent_event
;
88 sd_event_source
*inotify_event
;
92 bool stop_exec_queue
:1;
103 LIST_FIELDS(struct event
, event
);
105 struct udev_device
*dev
;
106 struct udev_device
*dev_kernel
;
107 struct worker
*worker
;
108 enum event_state state
;
109 unsigned long long int delaying_seqnum
;
110 unsigned long long int seqnum
;
113 const char *devpath_old
;
117 sd_event_source
*timeout_warning
;
118 sd_event_source
*timeout
;
121 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
133 struct udev_monitor
*monitor
;
134 enum worker_state state
;
138 /* passed from worker to main process */
139 struct worker_message
{
142 static void event_free(struct event
*event
) {
147 assert(event
->manager
);
149 LIST_REMOVE(event
, event
->manager
->events
, event
);
150 udev_device_unref(event
->dev
);
151 udev_device_unref(event
->dev_kernel
);
153 sd_event_source_unref(event
->timeout_warning
);
154 sd_event_source_unref(event
->timeout
);
157 event
->worker
->event
= NULL
;
159 if (LIST_IS_EMPTY(event
->manager
->events
)) {
160 /* only clean up the queue from the process that created it */
161 if (event
->manager
->pid
== getpid_cached()) {
162 r
= unlink("/run/udev/queue");
164 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
171 static void worker_free(struct worker
*worker
) {
175 assert(worker
->manager
);
177 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
178 udev_monitor_unref(worker
->monitor
);
179 event_free(worker
->event
);
184 static void manager_workers_free(Manager
*manager
) {
185 struct worker
*worker
;
190 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
193 manager
->workers
= hashmap_free(manager
->workers
);
196 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
197 _cleanup_free_
struct worker
*worker
= NULL
;
202 assert(worker_monitor
);
205 worker
= new0(struct worker
, 1);
209 worker
->manager
= manager
;
210 /* close monitor, but keep address around */
211 udev_monitor_disconnect(worker_monitor
);
212 worker
->monitor
= udev_monitor_ref(worker_monitor
);
215 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
219 r
= hashmap_put(manager
->workers
, PID_TO_PTR(pid
), worker
);
223 *ret
= TAKE_PTR(worker
);
228 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
229 struct event
*event
= userdata
;
232 assert(event
->worker
);
234 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
235 event
->worker
->state
= WORKER_KILLED
;
237 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
242 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
243 struct event
*event
= userdata
;
247 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
252 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
257 assert(worker
->manager
);
259 assert(!event
->worker
);
260 assert(!worker
->event
);
262 worker
->state
= WORKER_RUNNING
;
263 worker
->event
= event
;
264 event
->state
= EVENT_RUNNING
;
265 event
->worker
= worker
;
267 e
= worker
->manager
->event
;
269 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &usec
) >= 0);
271 (void) sd_event_add_time(e
, &event
->timeout_warning
, CLOCK_MONOTONIC
,
272 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
274 (void) sd_event_add_time(e
, &event
->timeout
, CLOCK_MONOTONIC
,
275 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
278 static void manager_free(Manager
*manager
) {
284 sd_event_source_unref(manager
->ctrl_event
);
285 sd_event_source_unref(manager
->uevent_event
);
286 sd_event_source_unref(manager
->inotify_event
);
288 sd_event_unref(manager
->event
);
289 manager_workers_free(manager
);
290 event_queue_cleanup(manager
, EVENT_UNDEF
);
292 udev_monitor_unref(manager
->monitor
);
293 udev_ctrl_unref(manager
->ctrl
);
294 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
296 hashmap_free_free_free(manager
->properties
);
297 udev_rules_unref(manager
->rules
);
299 safe_close(manager
->fd_inotify
);
300 safe_close_pair(manager
->worker_watch
);
305 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
307 static int worker_send_message(int fd
) {
308 struct worker_message message
= {};
310 return loop_write(fd
, &message
, sizeof(message
), false);
313 static bool shall_lock_device(struct udev_device
*dev
) {
316 if (!streq_ptr("block", udev_device_get_subsystem(dev
)))
319 sysname
= udev_device_get_sysname(dev
);
320 return !startswith(sysname
, "dm-") &&
321 !startswith(sysname
, "md") &&
322 !startswith(sysname
, "drbd");
325 static void worker_spawn(Manager
*manager
, struct event
*event
) {
326 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*worker_monitor
= NULL
;
330 /* listen for new events */
331 worker_monitor
= udev_monitor_new_from_netlink(NULL
, NULL
);
332 if (worker_monitor
== NULL
)
334 /* allow the main daemon netlink address to send devices to the worker */
335 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
336 r
= udev_monitor_enable_receiving(worker_monitor
);
338 log_error_errno(r
, "worker: could not enable receiving of device: %m");
343 struct udev_device
*dev
= NULL
;
344 _cleanup_(sd_netlink_unrefp
) sd_netlink
*rtnl
= NULL
;
346 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
347 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
348 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
351 /* take initial device from queue */
352 dev
= TAKE_PTR(event
->dev
);
354 unsetenv("NOTIFY_SOCKET");
356 manager_workers_free(manager
);
357 event_queue_cleanup(manager
, EVENT_UNDEF
);
359 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
360 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
361 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
362 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
364 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
365 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
366 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
368 manager
->event
= sd_event_unref(manager
->event
);
371 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
373 r
= log_error_errno(errno
, "error creating signalfd %m");
376 ep_signal
.data
.fd
= fd_signal
;
378 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
379 ep_monitor
.data
.fd
= fd_monitor
;
381 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
383 r
= log_error_errno(errno
, "error creating epoll fd: %m");
387 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
388 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
389 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
393 /* Request TERM signal if parent exits.
394 Ignore error, not much we can do in that case. */
395 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
397 /* Reset OOM score, we only protect the main daemon. */
398 write_string_file("/proc/self/oom_score_adj", "0", 0);
401 _cleanup_(udev_event_freep
) struct udev_event
*udev_event
= NULL
;
406 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
407 udev_event
= udev_event_new(dev
);
408 if (udev_event
== NULL
) {
413 if (arg_exec_delay
> 0)
414 udev_event
->exec_delay
= arg_exec_delay
;
417 * Take a shared lock on the device node; this establishes
418 * a concept of device "ownership" to serialize device
419 * access. External processes holding an exclusive lock will
420 * cause udev to skip the event handling; in the case udev
421 * acquired the lock, the external process can block until
422 * udev has finished its event handling.
424 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
425 shall_lock_device(dev
)) {
426 struct udev_device
*d
= dev
;
428 if (streq_ptr("partition", udev_device_get_devtype(d
)))
429 d
= udev_device_get_parent(d
);
432 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
433 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
434 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
435 fd_lock
= safe_close(fd_lock
);
441 /* needed for renaming netifs */
442 udev_event
->rtnl
= rtnl
;
444 /* apply rules, create node, symlinks */
445 udev_event_execute_rules(udev_event
,
446 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
450 udev_event_execute_run(udev_event
,
451 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
453 if (udev_event
->rtnl
)
454 /* in case rtnl was initialized */
455 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
457 /* apply/restore inotify watch */
458 if (udev_event
->inotify_watch
) {
459 udev_watch_begin(dev
->device
);
460 udev_device_update_db(dev
);
465 /* send processed event back to libudev listeners */
466 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
469 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
471 /* send udevd the result of the event execution */
472 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
474 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
475 udev_device_get_seqnum(dev
));
477 udev_device_unref(dev
);
480 /* wait for more device messages from main udevd, or term signal */
481 while (dev
== NULL
) {
482 struct epoll_event ev
[4];
486 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
490 r
= log_error_errno(errno
, "failed to poll: %m");
494 for (i
= 0; i
< fdcount
; i
++) {
495 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
496 dev
= udev_monitor_receive_device(worker_monitor
);
498 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
499 struct signalfd_siginfo fdsi
;
502 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
503 if (size
!= sizeof(struct signalfd_siginfo
))
505 switch (fdsi
.ssi_signo
) {
514 udev_device_unref(dev
);
515 manager_free(manager
);
517 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
520 event
->state
= EVENT_QUEUED
;
521 log_error_errno(errno
, "fork of child failed: %m");
525 struct worker
*worker
;
527 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
531 worker_attach_event(worker
, event
);
533 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
539 static void event_run(Manager
*manager
, struct event
*event
) {
540 struct worker
*worker
;
546 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
549 if (worker
->state
!= WORKER_IDLE
)
552 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
554 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
556 (void) kill(worker
->pid
, SIGKILL
);
557 worker
->state
= WORKER_KILLED
;
560 worker_attach_event(worker
, event
);
564 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
565 if (arg_children_max
> 1)
566 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
570 /* start new worker and pass initial device */
571 worker_spawn(manager
, event
);
574 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
581 /* only one process can add events to the queue */
582 if (manager
->pid
== 0)
583 manager
->pid
= getpid_cached();
585 assert(manager
->pid
== getpid_cached());
587 event
= new0(struct event
, 1);
591 event
->manager
= manager
;
593 event
->dev_kernel
= udev_device_shallow_clone(dev
);
594 udev_device_copy_properties(event
->dev_kernel
, dev
);
595 event
->seqnum
= udev_device_get_seqnum(dev
);
596 event
->devpath
= udev_device_get_devpath(dev
);
597 event
->devpath_len
= strlen(event
->devpath
);
598 event
->devpath_old
= udev_device_get_devpath_old(dev
);
599 event
->devnum
= udev_device_get_devnum(dev
);
600 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
601 event
->ifindex
= udev_device_get_ifindex(dev
);
603 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
604 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
606 event
->state
= EVENT_QUEUED
;
608 if (LIST_IS_EMPTY(manager
->events
)) {
609 r
= touch("/run/udev/queue");
611 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
614 LIST_APPEND(event
, manager
->events
, event
);
619 static void manager_kill_workers(Manager
*manager
) {
620 struct worker
*worker
;
625 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
626 if (worker
->state
== WORKER_KILLED
)
629 worker
->state
= WORKER_KILLED
;
630 (void) kill(worker
->pid
, SIGTERM
);
634 /* lookup event for identical, parent, child device */
635 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
636 struct event
*loop_event
;
639 /* check if queue contains events we depend on */
640 LIST_FOREACH(event
, loop_event
, manager
->events
) {
641 /* we already found a later event, earlier cannot block us, no need to check again */
642 if (loop_event
->seqnum
< event
->delaying_seqnum
)
645 /* event we checked earlier still exists, no need to check again */
646 if (loop_event
->seqnum
== event
->delaying_seqnum
)
649 /* found ourself, no later event can block us */
650 if (loop_event
->seqnum
>= event
->seqnum
)
653 /* check major/minor */
654 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
657 /* check network device ifindex */
658 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
661 /* check our old name */
662 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
663 event
->delaying_seqnum
= loop_event
->seqnum
;
667 /* compare devpath */
668 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
670 /* one devpath is contained in the other? */
671 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
674 /* identical device event found */
675 if (loop_event
->devpath_len
== event
->devpath_len
) {
676 /* devices names might have changed/swapped in the meantime */
677 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
679 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
681 event
->delaying_seqnum
= loop_event
->seqnum
;
685 /* parent device event found */
686 if (event
->devpath
[common
] == '/') {
687 event
->delaying_seqnum
= loop_event
->seqnum
;
691 /* child device event found */
692 if (loop_event
->devpath
[common
] == '/') {
693 event
->delaying_seqnum
= loop_event
->seqnum
;
697 /* no matching device */
704 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
705 Manager
*manager
= userdata
;
709 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
711 sd_event_exit(manager
->event
, -ETIMEDOUT
);
716 static void manager_exit(Manager
*manager
) {
722 manager
->exit
= true;
726 "STATUS=Starting shutdown...");
728 /* close sources of new events and discard buffered events */
729 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
730 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
732 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
733 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
735 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
736 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
738 /* discard queued events and kill workers */
739 event_queue_cleanup(manager
, EVENT_QUEUED
);
740 manager_kill_workers(manager
);
742 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
744 r
= sd_event_add_time(manager
->event
, NULL
, CLOCK_MONOTONIC
,
745 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
750 /* reload requested, HUP signal received, rules changed, builtin changed */
751 static void manager_reload(Manager
*manager
) {
757 "STATUS=Flushing configuration...");
759 manager_kill_workers(manager
);
760 manager
->rules
= udev_rules_unref(manager
->rules
);
765 "STATUS=Processing with %u children at max", arg_children_max
);
768 static void event_queue_start(Manager
*manager
) {
774 if (LIST_IS_EMPTY(manager
->events
) ||
775 manager
->exit
|| manager
->stop_exec_queue
)
778 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
779 /* check for changed config, every 3 seconds at most */
780 if (manager
->last_usec
== 0 ||
781 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
782 if (udev_rules_check_timestamp(manager
->rules
) ||
783 udev_builtin_validate())
784 manager_reload(manager
);
786 manager
->last_usec
= usec
;
791 if (!manager
->rules
) {
792 manager
->rules
= udev_rules_new(arg_resolve_names
);
797 LIST_FOREACH(event
,event
,manager
->events
) {
798 if (event
->state
!= EVENT_QUEUED
)
801 /* do not start event if parent or child event is still running */
802 if (is_devpath_busy(manager
, event
))
805 event_run(manager
, event
);
809 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
810 struct event
*event
, *tmp
;
812 LIST_FOREACH_SAFE(event
, event
, tmp
, manager
->events
) {
813 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
820 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
821 Manager
*manager
= userdata
;
826 struct worker_message msg
;
827 struct iovec iovec
= {
829 .iov_len
= sizeof(msg
),
832 struct cmsghdr cmsghdr
;
833 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
835 struct msghdr msghdr
= {
838 .msg_control
= &control
,
839 .msg_controllen
= sizeof(control
),
841 struct cmsghdr
*cmsg
;
843 struct ucred
*ucred
= NULL
;
844 struct worker
*worker
;
846 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
850 else if (errno
== EAGAIN
)
851 /* nothing more to read */
854 return log_error_errno(errno
, "failed to receive message: %m");
855 } else if (size
!= sizeof(struct worker_message
)) {
856 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
860 CMSG_FOREACH(cmsg
, &msghdr
) {
861 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
862 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
863 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
864 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
867 if (!ucred
|| ucred
->pid
<= 0) {
868 log_warning_errno(EIO
, "ignoring worker message without valid PID");
872 /* lookup worker who sent the signal */
873 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
875 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
879 if (worker
->state
!= WORKER_KILLED
)
880 worker
->state
= WORKER_IDLE
;
882 /* worker returned */
883 event_free(worker
->event
);
886 /* we have free workers, try to schedule events */
887 event_queue_start(manager
);
892 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
893 Manager
*manager
= userdata
;
894 struct udev_device
*dev
;
899 dev
= udev_monitor_receive_device(manager
->monitor
);
901 udev_device_ensure_usec_initialized(dev
, NULL
);
902 r
= event_queue_insert(manager
, dev
);
904 udev_device_unref(dev
);
906 /* we have fresh events, try to schedule them */
907 event_queue_start(manager
);
913 /* receive the udevd message from userspace */
914 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
915 Manager
*manager
= userdata
;
916 _cleanup_(udev_ctrl_connection_unrefp
) struct udev_ctrl_connection
*ctrl_conn
= NULL
;
917 _cleanup_(udev_ctrl_msg_unrefp
) struct udev_ctrl_msg
*ctrl_msg
= NULL
;
923 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
927 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
931 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
933 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
934 log_set_max_level(i
);
935 manager_kill_workers(manager
);
938 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
939 log_debug("udevd message (STOP_EXEC_QUEUE) received");
940 manager
->stop_exec_queue
= true;
943 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
944 log_debug("udevd message (START_EXEC_QUEUE) received");
945 manager
->stop_exec_queue
= false;
946 event_queue_start(manager
);
949 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
950 log_debug("udevd message (RELOAD) received");
951 manager_reload(manager
);
954 str
= udev_ctrl_get_set_env(ctrl_msg
);
956 _cleanup_free_
char *key
= NULL
, *val
= NULL
, *old_key
= NULL
, *old_val
= NULL
;
959 eq
= strchr(str
, '=');
961 log_error("Invalid key format '%s'", str
);
965 key
= strndup(str
, eq
- str
);
971 old_val
= hashmap_remove2(manager
->properties
, key
, (void **) &old_key
);
973 r
= hashmap_ensure_allocated(&manager
->properties
, &string_hash_ops
);
981 log_debug("udevd message (ENV) received, unset '%s'", key
);
983 r
= hashmap_put(manager
->properties
, key
, NULL
);
995 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
997 r
= hashmap_put(manager
->properties
, key
, val
);
1005 manager_kill_workers(manager
);
1008 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
1010 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
1011 arg_children_max
= i
;
1013 (void) sd_notifyf(false,
1015 "STATUS=Processing with %u children at max", arg_children_max
);
1018 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1019 log_debug("udevd message (SYNC) received");
1021 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1022 log_debug("udevd message (EXIT) received");
1023 manager_exit(manager
);
1024 /* keep reference to block the client until we exit
1025 TODO: deal with several blocking exit requests */
1026 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1032 static int synthesize_change(sd_device
*dev
) {
1033 const char *subsystem
, *sysname
, *devname
, *syspath
, *devtype
;
1034 char filename
[PATH_MAX
];
1037 r
= sd_device_get_subsystem(dev
, &subsystem
);
1041 r
= sd_device_get_sysname(dev
, &sysname
);
1045 r
= sd_device_get_devname(dev
, &devname
);
1049 r
= sd_device_get_syspath(dev
, &syspath
);
1053 r
= sd_device_get_devtype(dev
, &devtype
);
1057 if (streq_ptr("block", subsystem
) &&
1058 streq_ptr("disk", devtype
) &&
1059 !startswith(sysname
, "dm-")) {
1060 _cleanup_(sd_device_enumerator_unrefp
) sd_device_enumerator
*e
= NULL
;
1061 bool part_table_read
= false, has_partitions
= false;
1066 * Try to re-read the partition table. This only succeeds if
1067 * none of the devices is busy. The kernel returns 0 if no
1068 * partition table is found, and we will not get an event for
1071 fd
= open(devname
, O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1073 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1075 r
= ioctl(fd
, BLKRRPART
, 0);
1079 part_table_read
= true;
1082 /* search for partitions */
1083 r
= sd_device_enumerator_new(&e
);
1087 r
= sd_device_enumerator_allow_uninitialized(e
);
1091 r
= sd_device_enumerator_add_match_parent(e
, dev
);
1095 r
= sd_device_enumerator_add_match_subsystem(e
, "block", true);
1099 FOREACH_DEVICE(e
, d
) {
1102 if (sd_device_get_devtype(d
, &t
) < 0 ||
1103 !streq("partition", t
))
1106 has_partitions
= true;
1111 * We have partitions and re-read the table, the kernel already sent
1112 * out a "change" event for the disk, and "remove/add" for all
1115 if (part_table_read
&& has_partitions
)
1119 * We have partitions but re-reading the partition table did not
1120 * work, synthesize "change" for the disk and all partitions.
1122 log_debug("Device '%s' is closed, synthesising 'change'", devname
);
1123 strscpyl(filename
, sizeof(filename
), syspath
, "/uevent", NULL
);
1124 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1126 FOREACH_DEVICE(e
, d
) {
1127 const char *t
, *n
, *s
;
1129 if (sd_device_get_devtype(d
, &t
) < 0 ||
1130 !streq("partition", t
))
1133 if (sd_device_get_devname(d
, &n
) < 0 ||
1134 sd_device_get_syspath(d
, &s
) < 0)
1137 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname
, n
);
1138 strscpyl(filename
, sizeof(filename
), s
, "/uevent", NULL
);
1139 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1145 log_debug("Device %s is closed, synthesising 'change'", devname
);
1146 strscpyl(filename
, sizeof(filename
), syspath
, "/uevent", NULL
);
1147 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1152 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1153 Manager
*manager
= userdata
;
1154 union inotify_event_buffer buffer
;
1155 struct inotify_event
*e
;
1160 l
= read(fd
, &buffer
, sizeof(buffer
));
1162 if (IN_SET(errno
, EAGAIN
, EINTR
))
1165 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1168 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1169 _cleanup_(sd_device_unrefp
) sd_device
*dev
= NULL
;
1170 const char *devnode
;
1172 if (udev_watch_lookup(e
->wd
, &dev
) <= 0)
1175 if (sd_device_get_devname(dev
, &devnode
) < 0)
1178 log_device_debug(dev
, "Inotify event: %x for %s", e
->mask
, devnode
);
1179 if (e
->mask
& IN_CLOSE_WRITE
) {
1180 synthesize_change(dev
);
1182 /* settle might be waiting on us to determine the queue
1183 * state. If we just handled an inotify event, we might have
1184 * generated a "change" event, but we won't have queued up
1185 * the resultant uevent yet. Do that.
1187 on_uevent(NULL
, -1, 0, manager
);
1188 } else if (e
->mask
& IN_IGNORED
)
1189 udev_watch_end(dev
);
1195 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1196 Manager
*manager
= userdata
;
1200 manager_exit(manager
);
1205 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1206 Manager
*manager
= userdata
;
1210 manager_reload(manager
);
1215 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1216 Manager
*manager
= userdata
;
1223 struct worker
*worker
;
1225 pid
= waitpid(-1, &status
, WNOHANG
);
1229 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1231 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1235 if (WIFEXITED(status
)) {
1236 if (WEXITSTATUS(status
) == 0)
1237 log_debug("worker ["PID_FMT
"] exited", pid
);
1239 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1240 } else if (WIFSIGNALED(status
)) {
1241 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), signal_to_string(WTERMSIG(status
)));
1242 } else if (WIFSTOPPED(status
)) {
1243 log_info("worker ["PID_FMT
"] stopped", pid
);
1245 } else if (WIFCONTINUED(status
)) {
1246 log_info("worker ["PID_FMT
"] continued", pid
);
1249 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1251 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1252 if (worker
->event
) {
1253 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1254 /* delete state from disk */
1255 udev_device_delete_db(worker
->event
->dev
);
1256 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1257 /* forward kernel event without amending it */
1258 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1262 worker_free(worker
);
1265 /* we can start new workers, try to schedule events */
1266 event_queue_start(manager
);
1271 static int on_post(sd_event_source
*s
, void *userdata
) {
1272 Manager
*manager
= userdata
;
1277 if (LIST_IS_EMPTY(manager
->events
)) {
1278 /* no pending events */
1279 if (!hashmap_isempty(manager
->workers
)) {
1280 /* there are idle workers */
1281 log_debug("cleanup idle workers");
1282 manager_kill_workers(manager
);
1285 if (manager
->exit
) {
1286 r
= sd_event_exit(manager
->event
, 0);
1289 } else if (manager
->cgroup
)
1290 /* cleanup possible left-over processes in our cgroup */
1291 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, CGROUP_IGNORE_SELF
, NULL
, NULL
, NULL
);
1298 static int listen_fds(int *rctrl
, int *rnetlink
) {
1299 int ctrl_fd
= -1, netlink_fd
= -1;
1305 n
= sd_listen_fds(true);
1309 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1310 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1317 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1318 if (netlink_fd
>= 0)
1328 _cleanup_(udev_ctrl_unrefp
) struct udev_ctrl
*ctrl
= NULL
;
1330 ctrl
= udev_ctrl_new();
1332 return log_error_errno(EINVAL
, "error initializing udev control socket");
1334 r
= udev_ctrl_enable_receiving(ctrl
);
1336 return log_error_errno(EINVAL
, "error binding udev control socket");
1338 fd
= udev_ctrl_get_fd(ctrl
);
1340 return log_error_errno(EIO
, "could not get ctrl fd");
1342 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1344 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1347 if (netlink_fd
< 0) {
1348 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*monitor
= NULL
;
1350 monitor
= udev_monitor_new_from_netlink(NULL
, "kernel");
1352 return log_error_errno(EINVAL
, "error initializing netlink socket");
1354 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1356 r
= udev_monitor_enable_receiving(monitor
);
1358 return log_error_errno(EINVAL
, "error binding netlink socket");
1360 fd
= udev_monitor_get_fd(monitor
);
1362 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1364 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1366 return log_error_errno(errno
, "could not dup netlink fd: %m");
1370 *rnetlink
= netlink_fd
;
1376 * read the kernel command line, in case we need to get into debug mode
1377 * udev.log_priority=<level> syslog priority
1378 * udev.children_max=<number of workers> events are fully serialized if set to 1
1379 * udev.exec_delay=<number of seconds> delay execution of every executed program
1380 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1382 static int parse_proc_cmdline_item(const char *key
, const char *value
, void *data
) {
1390 if (proc_cmdline_key_streq(key
, "udev.log_priority")) {
1392 if (proc_cmdline_value_missing(key
, value
))
1395 r
= util_log_priority(value
);
1397 log_set_max_level(r
);
1399 } else if (proc_cmdline_key_streq(key
, "udev.event_timeout")) {
1401 if (proc_cmdline_value_missing(key
, value
))
1404 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1406 arg_event_timeout_usec
*= USEC_PER_SEC
;
1407 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1410 } else if (proc_cmdline_key_streq(key
, "udev.children_max")) {
1412 if (proc_cmdline_value_missing(key
, value
))
1415 r
= safe_atou(value
, &arg_children_max
);
1417 } else if (proc_cmdline_key_streq(key
, "udev.exec_delay")) {
1419 if (proc_cmdline_value_missing(key
, value
))
1422 r
= safe_atoi(value
, &arg_exec_delay
);
1424 } else if (startswith(key
, "udev."))
1425 log_warning("Unknown udev kernel command line option \"%s\"", key
);
1428 log_warning_errno(r
, "Failed to parse \"%s=%s\", ignoring: %m", key
, value
);
1433 static int help(void) {
1434 _cleanup_free_
char *link
= NULL
;
1437 r
= terminal_urlify_man("systemd-udevd.service", "8", &link
);
1441 printf("%s [OPTIONS...]\n\n"
1442 "Manages devices.\n\n"
1443 " -h --help Print this message\n"
1444 " -V --version Print version of the program\n"
1445 " -d --daemon Detach and run in the background\n"
1446 " -D --debug Enable debug output\n"
1447 " -c --children-max=INT Set maximum number of workers\n"
1448 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1449 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1450 " -N --resolve-names=early|late|never\n"
1451 " When to resolve users and groups\n"
1452 "\nSee the %s for details.\n"
1453 , program_invocation_short_name
1460 static int parse_argv(int argc
, char *argv
[]) {
1461 static const struct option options
[] = {
1462 { "daemon", no_argument
, NULL
, 'd' },
1463 { "debug", no_argument
, NULL
, 'D' },
1464 { "children-max", required_argument
, NULL
, 'c' },
1465 { "exec-delay", required_argument
, NULL
, 'e' },
1466 { "event-timeout", required_argument
, NULL
, 't' },
1467 { "resolve-names", required_argument
, NULL
, 'N' },
1468 { "help", no_argument
, NULL
, 'h' },
1469 { "version", no_argument
, NULL
, 'V' },
1478 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1484 arg_daemonize
= true;
1487 r
= safe_atou(optarg
, &arg_children_max
);
1489 log_warning("Invalid --children-max ignored: %s", optarg
);
1492 r
= safe_atoi(optarg
, &arg_exec_delay
);
1494 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1497 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1499 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1501 arg_event_timeout_usec
*= USEC_PER_SEC
;
1502 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1509 if (streq(optarg
, "early")) {
1510 arg_resolve_names
= 1;
1511 } else if (streq(optarg
, "late")) {
1512 arg_resolve_names
= 0;
1513 } else if (streq(optarg
, "never")) {
1514 arg_resolve_names
= -1;
1516 log_error("resolve-names must be early, late or never");
1523 printf("%s\n", PACKAGE_VERSION
);
1528 assert_not_reached("Unhandled option");
1536 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1537 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1541 assert(fd_ctrl
>= 0);
1542 assert(fd_uevent
>= 0);
1544 manager
= new0(Manager
, 1);
1548 manager
->fd_inotify
= -1;
1549 manager
->worker_watch
[WRITE_END
] = -1;
1550 manager
->worker_watch
[READ_END
] = -1;
1552 udev_builtin_init();
1554 manager
->rules
= udev_rules_new(arg_resolve_names
);
1555 if (!manager
->rules
)
1556 return log_error_errno(ENOMEM
, "error reading rules");
1558 LIST_HEAD_INIT(manager
->events
);
1560 manager
->cgroup
= cgroup
;
1562 manager
->ctrl
= udev_ctrl_new_from_fd(fd_ctrl
);
1564 return log_error_errno(EINVAL
, "error taking over udev control socket");
1566 manager
->monitor
= udev_monitor_new_from_netlink_fd(NULL
, "kernel", fd_uevent
);
1567 if (!manager
->monitor
)
1568 return log_error_errno(EINVAL
, "error taking over netlink socket");
1570 /* unnamed socket from workers to the main daemon */
1571 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1573 return log_error_errno(errno
, "error creating socketpair: %m");
1575 fd_worker
= manager
->worker_watch
[READ_END
];
1577 r
= setsockopt_int(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, true);
1579 return log_error_errno(r
, "could not enable SO_PASSCRED: %m");
1581 r
= udev_watch_init();
1583 return log_error_errno(r
, "Failed to create inotify descriptor: %m");
1584 manager
->fd_inotify
= r
;
1586 udev_watch_restore();
1588 /* block and listen to all signals on signalfd */
1589 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1591 r
= sd_event_default(&manager
->event
);
1593 return log_error_errno(r
, "could not allocate event loop: %m");
1595 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1597 return log_error_errno(r
, "error creating sigint event source: %m");
1599 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1601 return log_error_errno(r
, "error creating sigterm event source: %m");
1603 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1605 return log_error_errno(r
, "error creating sighup event source: %m");
1607 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1609 return log_error_errno(r
, "error creating sigchld event source: %m");
1611 r
= sd_event_set_watchdog(manager
->event
, true);
1613 return log_error_errno(r
, "error creating watchdog event source: %m");
1615 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1617 return log_error_errno(r
, "error creating ctrl event source: %m");
1619 /* This needs to be after the inotify and uevent handling, to make sure
1620 * that the ping is send back after fully processing the pending uevents
1621 * (including the synthetic ones we may create due to inotify events).
1623 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1625 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1627 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1629 return log_error_errno(r
, "error creating inotify event source: %m");
1631 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1633 return log_error_errno(r
, "error creating uevent event source: %m");
1635 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1637 return log_error_errno(r
, "error creating worker event source: %m");
1639 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1641 return log_error_errno(r
, "error creating post event source: %m");
1643 *ret
= TAKE_PTR(manager
);
1648 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1649 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1652 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1654 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1658 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1660 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1662 (void) sd_notifyf(false,
1664 "STATUS=Processing with %u children at max", arg_children_max
);
1666 r
= sd_event_loop(manager
->event
);
1668 log_error_errno(r
, "event loop failed: %m");
1672 sd_event_get_exit_code(manager
->event
, &r
);
1677 "STATUS=Shutting down...");
1679 udev_ctrl_cleanup(manager
->ctrl
);
1683 int main(int argc
, char *argv
[]) {
1684 _cleanup_free_
char *cgroup
= NULL
;
1685 int fd_ctrl
= -1, fd_uevent
= -1;
1688 log_set_target(LOG_TARGET_AUTO
);
1689 udev_parse_config();
1690 log_parse_environment();
1693 r
= parse_argv(argc
, argv
);
1697 r
= proc_cmdline_parse(parse_proc_cmdline_item
, NULL
, PROC_CMDLINE_STRIP_RD_PREFIX
);
1699 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1702 log_set_target(LOG_TARGET_CONSOLE
);
1703 log_set_max_level(LOG_DEBUG
);
1710 if (arg_children_max
== 0) {
1712 unsigned long mem_limit
;
1714 arg_children_max
= 8;
1716 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1717 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1719 mem_limit
= physical_memory() / (128LU*1024*1024);
1720 arg_children_max
= MAX(10U, MIN(arg_children_max
, mem_limit
));
1722 log_debug("set children_max to %u", arg_children_max
);
1725 /* set umask before creating any file/directory */
1728 r
= log_error_errno(errno
, "could not change dir to /: %m");
1734 r
= mac_selinux_init();
1736 log_error_errno(r
, "could not initialize labelling: %m");
1740 r
= mkdir_errno_wrapper("/run/udev", 0755);
1741 if (r
< 0 && r
!= -EEXIST
) {
1742 log_error_errno(r
, "could not create /run/udev: %m");
1746 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1748 if (getppid() == 1) {
1749 /* get our own cgroup, we regularly kill everything udev has left behind
1750 we only do this on systemd systems, and only if we are directly spawned
1751 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1752 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1754 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
1755 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1757 log_warning_errno(r
, "failed to get cgroup: %m");
1761 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1763 r
= log_error_errno(r
, "could not listen on fds: %m");
1767 if (arg_daemonize
) {
1770 log_info("starting version " PACKAGE_VERSION
);
1772 /* connect /dev/null to stdin, stdout, stderr */
1773 if (log_get_max_level() < LOG_DEBUG
) {
1774 r
= make_null_stdio();
1776 log_warning_errno(r
, "Failed to redirect standard streams to /dev/null: %m");
1784 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1787 mac_selinux_finish();
1789 _exit(EXIT_SUCCESS
);
1794 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1797 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1800 mac_selinux_finish();
1802 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;