2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
30 #include <sys/epoll.h>
32 #include <sys/inotify.h>
33 #include <sys/ioctl.h>
34 #include <sys/mount.h>
35 #include <sys/prctl.h>
36 #include <sys/signalfd.h>
37 #include <sys/socket.h>
43 #include "sd-daemon.h"
46 #include "cgroup-util.h"
47 #include "cpu-set-util.h"
48 #include "dev-setup.h"
49 #include "event-util.h"
52 #include "formats-util.h"
56 #include "netlink-util.h"
57 #include "parse-util.h"
58 #include "proc-cmdline.h"
59 #include "process-util.h"
60 #include "selinux-util.h"
61 #include "signal-util.h"
62 #include "string-util.h"
63 #include "terminal-util.h"
64 #include "udev-util.h"
67 static bool arg_debug
= false;
68 static int arg_daemonize
= false;
69 static int arg_resolve_names
= 1;
70 static unsigned arg_children_max
;
71 static int arg_exec_delay
;
72 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
73 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
75 typedef struct Manager
{
79 struct udev_list_node events
;
81 pid_t pid
; /* the process that originally allocated the manager object */
83 struct udev_rules
*rules
;
84 struct udev_list properties
;
86 struct udev_monitor
*monitor
;
87 struct udev_ctrl
*ctrl
;
88 struct udev_ctrl_connection
*ctrl_conn_blocking
;
92 sd_event_source
*ctrl_event
;
93 sd_event_source
*uevent_event
;
94 sd_event_source
*inotify_event
;
98 bool stop_exec_queue
:1;
109 struct udev_list_node node
;
112 struct udev_device
*dev
;
113 struct udev_device
*dev_kernel
;
114 struct worker
*worker
;
115 enum event_state state
;
116 unsigned long long int delaying_seqnum
;
117 unsigned long long int seqnum
;
120 const char *devpath_old
;
124 sd_event_source
*timeout_warning
;
125 sd_event_source
*timeout
;
128 static inline struct event
*node_to_event(struct udev_list_node
*node
) {
129 return container_of(node
, struct event
, node
);
132 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
143 struct udev_list_node node
;
146 struct udev_monitor
*monitor
;
147 enum worker_state state
;
151 /* passed from worker to main process */
152 struct worker_message
{
155 static void event_free(struct event
*event
) {
161 udev_list_node_remove(&event
->node
);
162 udev_device_unref(event
->dev
);
163 udev_device_unref(event
->dev_kernel
);
165 sd_event_source_unref(event
->timeout_warning
);
166 sd_event_source_unref(event
->timeout
);
169 event
->worker
->event
= NULL
;
171 assert(event
->manager
);
173 if (udev_list_node_is_empty(&event
->manager
->events
)) {
174 /* only clean up the queue from the process that created it */
175 if (event
->manager
->pid
== getpid()) {
176 r
= unlink("/run/udev/queue");
178 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
185 static void worker_free(struct worker
*worker
) {
189 assert(worker
->manager
);
191 hashmap_remove(worker
->manager
->workers
, UINT_TO_PTR(worker
->pid
));
192 udev_monitor_unref(worker
->monitor
);
193 event_free(worker
->event
);
198 static void manager_workers_free(Manager
*manager
) {
199 struct worker
*worker
;
204 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
207 manager
->workers
= hashmap_free(manager
->workers
);
210 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
211 _cleanup_free_
struct worker
*worker
= NULL
;
216 assert(worker_monitor
);
219 worker
= new0(struct worker
, 1);
223 worker
->refcount
= 1;
224 worker
->manager
= manager
;
225 /* close monitor, but keep address around */
226 udev_monitor_disconnect(worker_monitor
);
227 worker
->monitor
= udev_monitor_ref(worker_monitor
);
230 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
234 r
= hashmap_put(manager
->workers
, UINT_TO_PTR(pid
), worker
);
244 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
245 struct event
*event
= userdata
;
248 assert(event
->worker
);
250 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
251 event
->worker
->state
= WORKER_KILLED
;
253 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
258 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
259 struct event
*event
= userdata
;
263 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
268 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
273 assert(worker
->manager
);
275 assert(!event
->worker
);
276 assert(!worker
->event
);
278 worker
->state
= WORKER_RUNNING
;
279 worker
->event
= event
;
280 event
->state
= EVENT_RUNNING
;
281 event
->worker
= worker
;
283 e
= worker
->manager
->event
;
285 assert_se(sd_event_now(e
, clock_boottime_or_monotonic(), &usec
) >= 0);
287 (void) sd_event_add_time(e
, &event
->timeout_warning
, clock_boottime_or_monotonic(),
288 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
290 (void) sd_event_add_time(e
, &event
->timeout
, clock_boottime_or_monotonic(),
291 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
294 static void manager_free(Manager
*manager
) {
298 udev_builtin_exit(manager
->udev
);
300 sd_event_source_unref(manager
->ctrl_event
);
301 sd_event_source_unref(manager
->uevent_event
);
302 sd_event_source_unref(manager
->inotify_event
);
304 udev_unref(manager
->udev
);
305 sd_event_unref(manager
->event
);
306 manager_workers_free(manager
);
307 event_queue_cleanup(manager
, EVENT_UNDEF
);
309 udev_monitor_unref(manager
->monitor
);
310 udev_ctrl_unref(manager
->ctrl
);
311 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
313 udev_list_cleanup(&manager
->properties
);
314 udev_rules_unref(manager
->rules
);
316 safe_close(manager
->fd_inotify
);
317 safe_close_pair(manager
->worker_watch
);
322 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
324 static int worker_send_message(int fd
) {
325 struct worker_message message
= {};
327 return loop_write(fd
, &message
, sizeof(message
), false);
330 static void worker_spawn(Manager
*manager
, struct event
*event
) {
331 struct udev
*udev
= event
->udev
;
332 _cleanup_udev_monitor_unref_
struct udev_monitor
*worker_monitor
= NULL
;
336 /* listen for new events */
337 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
338 if (worker_monitor
== NULL
)
340 /* allow the main daemon netlink address to send devices to the worker */
341 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
342 r
= udev_monitor_enable_receiving(worker_monitor
);
344 log_error_errno(r
, "worker: could not enable receiving of device: %m");
349 struct udev_device
*dev
= NULL
;
350 _cleanup_netlink_unref_ sd_netlink
*rtnl
= NULL
;
352 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
353 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
354 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
357 /* take initial device from queue */
361 unsetenv("NOTIFY_SOCKET");
363 manager_workers_free(manager
);
364 event_queue_cleanup(manager
, EVENT_UNDEF
);
366 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
367 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
368 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
369 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
370 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
372 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
373 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
374 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
376 manager
->event
= sd_event_unref(manager
->event
);
379 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
381 r
= log_error_errno(errno
, "error creating signalfd %m");
384 ep_signal
.data
.fd
= fd_signal
;
386 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
387 ep_monitor
.data
.fd
= fd_monitor
;
389 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
391 r
= log_error_errno(errno
, "error creating epoll fd: %m");
395 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
396 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
397 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
401 /* request TERM signal if parent exits */
402 prctl(PR_SET_PDEATHSIG
, SIGTERM
);
404 /* reset OOM score, we only protect the main daemon */
405 write_string_file("/proc/self/oom_score_adj", "0", 0);
408 struct udev_event
*udev_event
;
413 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
414 udev_event
= udev_event_new(dev
);
415 if (udev_event
== NULL
) {
420 if (arg_exec_delay
> 0)
421 udev_event
->exec_delay
= arg_exec_delay
;
424 * Take a shared lock on the device node; this establishes
425 * a concept of device "ownership" to serialize device
426 * access. External processes holding an exclusive lock will
427 * cause udev to skip the event handling; in the case udev
428 * acquired the lock, the external process can block until
429 * udev has finished its event handling.
431 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
432 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
433 !startswith(udev_device_get_sysname(dev
), "dm-") &&
434 !startswith(udev_device_get_sysname(dev
), "md")) {
435 struct udev_device
*d
= dev
;
437 if (streq_ptr("partition", udev_device_get_devtype(d
)))
438 d
= udev_device_get_parent(d
);
441 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
442 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
443 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
444 fd_lock
= safe_close(fd_lock
);
450 /* needed for renaming netifs */
451 udev_event
->rtnl
= rtnl
;
453 /* apply rules, create node, symlinks */
454 udev_event_execute_rules(udev_event
,
455 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
456 &manager
->properties
,
459 udev_event_execute_run(udev_event
,
460 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
462 if (udev_event
->rtnl
)
463 /* in case rtnl was initialized */
464 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
466 /* apply/restore inotify watch */
467 if (udev_event
->inotify_watch
) {
468 udev_watch_begin(udev
, dev
);
469 udev_device_update_db(dev
);
474 /* send processed event back to libudev listeners */
475 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
478 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
480 /* send udevd the result of the event execution */
481 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
483 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
484 udev_device_get_seqnum(dev
));
486 udev_device_unref(dev
);
489 udev_event_unref(udev_event
);
491 /* wait for more device messages from main udevd, or term signal */
492 while (dev
== NULL
) {
493 struct epoll_event ev
[4];
497 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
501 r
= log_error_errno(errno
, "failed to poll: %m");
505 for (i
= 0; i
< fdcount
; i
++) {
506 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
507 dev
= udev_monitor_receive_device(worker_monitor
);
509 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
510 struct signalfd_siginfo fdsi
;
513 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
514 if (size
!= sizeof(struct signalfd_siginfo
))
516 switch (fdsi
.ssi_signo
) {
525 udev_device_unref(dev
);
526 manager_free(manager
);
528 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
531 event
->state
= EVENT_QUEUED
;
532 log_error_errno(errno
, "fork of child failed: %m");
536 struct worker
*worker
;
538 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
542 worker_attach_event(worker
, event
);
544 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
550 static void event_run(Manager
*manager
, struct event
*event
) {
551 struct worker
*worker
;
557 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
560 if (worker
->state
!= WORKER_IDLE
)
563 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
565 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
567 kill(worker
->pid
, SIGKILL
);
568 worker
->state
= WORKER_KILLED
;
571 worker_attach_event(worker
, event
);
575 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
576 if (arg_children_max
> 1)
577 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
581 /* start new worker and pass initial device */
582 worker_spawn(manager
, event
);
585 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
592 /* only one process can add events to the queue */
593 if (manager
->pid
== 0)
594 manager
->pid
= getpid();
596 assert(manager
->pid
== getpid());
598 event
= new0(struct event
, 1);
602 event
->udev
= udev_device_get_udev(dev
);
603 event
->manager
= manager
;
605 event
->dev_kernel
= udev_device_shallow_clone(dev
);
606 udev_device_copy_properties(event
->dev_kernel
, dev
);
607 event
->seqnum
= udev_device_get_seqnum(dev
);
608 event
->devpath
= udev_device_get_devpath(dev
);
609 event
->devpath_len
= strlen(event
->devpath
);
610 event
->devpath_old
= udev_device_get_devpath_old(dev
);
611 event
->devnum
= udev_device_get_devnum(dev
);
612 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
613 event
->ifindex
= udev_device_get_ifindex(dev
);
615 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
616 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
618 event
->state
= EVENT_QUEUED
;
620 if (udev_list_node_is_empty(&manager
->events
)) {
621 r
= touch("/run/udev/queue");
623 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
626 udev_list_node_append(&event
->node
, &manager
->events
);
631 static void manager_kill_workers(Manager
*manager
) {
632 struct worker
*worker
;
637 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
638 if (worker
->state
== WORKER_KILLED
)
641 worker
->state
= WORKER_KILLED
;
642 kill(worker
->pid
, SIGTERM
);
646 /* lookup event for identical, parent, child device */
647 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
648 struct udev_list_node
*loop
;
651 /* check if queue contains events we depend on */
652 udev_list_node_foreach(loop
, &manager
->events
) {
653 struct event
*loop_event
= node_to_event(loop
);
655 /* we already found a later event, earlier can not block us, no need to check again */
656 if (loop_event
->seqnum
< event
->delaying_seqnum
)
659 /* event we checked earlier still exists, no need to check again */
660 if (loop_event
->seqnum
== event
->delaying_seqnum
)
663 /* found ourself, no later event can block us */
664 if (loop_event
->seqnum
>= event
->seqnum
)
667 /* check major/minor */
668 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
671 /* check network device ifindex */
672 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
675 /* check our old name */
676 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
677 event
->delaying_seqnum
= loop_event
->seqnum
;
681 /* compare devpath */
682 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
684 /* one devpath is contained in the other? */
685 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
688 /* identical device event found */
689 if (loop_event
->devpath_len
== event
->devpath_len
) {
690 /* devices names might have changed/swapped in the meantime */
691 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
693 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
695 event
->delaying_seqnum
= loop_event
->seqnum
;
699 /* parent device event found */
700 if (event
->devpath
[common
] == '/') {
701 event
->delaying_seqnum
= loop_event
->seqnum
;
705 /* child device event found */
706 if (loop_event
->devpath
[common
] == '/') {
707 event
->delaying_seqnum
= loop_event
->seqnum
;
711 /* no matching device */
718 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
719 Manager
*manager
= userdata
;
723 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
725 sd_event_exit(manager
->event
, -ETIMEDOUT
);
730 static void manager_exit(Manager
*manager
) {
736 manager
->exit
= true;
740 "STATUS=Starting shutdown...");
742 /* close sources of new events and discard buffered events */
743 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
744 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
746 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
747 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
749 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
750 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
752 /* discard queued events and kill workers */
753 event_queue_cleanup(manager
, EVENT_QUEUED
);
754 manager_kill_workers(manager
);
756 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
758 r
= sd_event_add_time(manager
->event
, NULL
, clock_boottime_or_monotonic(),
759 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
764 /* reload requested, HUP signal received, rules changed, builtin changed */
765 static void manager_reload(Manager
*manager
) {
771 "STATUS=Flushing configuration...");
773 manager_kill_workers(manager
);
774 manager
->rules
= udev_rules_unref(manager
->rules
);
775 udev_builtin_exit(manager
->udev
);
779 "STATUS=Processing...");
782 static void event_queue_start(Manager
*manager
) {
783 struct udev_list_node
*loop
;
788 if (udev_list_node_is_empty(&manager
->events
) ||
789 manager
->exit
|| manager
->stop_exec_queue
)
792 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
793 /* check for changed config, every 3 seconds at most */
794 if (manager
->last_usec
== 0 ||
795 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
796 if (udev_rules_check_timestamp(manager
->rules
) ||
797 udev_builtin_validate(manager
->udev
))
798 manager_reload(manager
);
800 manager
->last_usec
= usec
;
803 udev_builtin_init(manager
->udev
);
805 if (!manager
->rules
) {
806 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
811 udev_list_node_foreach(loop
, &manager
->events
) {
812 struct event
*event
= node_to_event(loop
);
814 if (event
->state
!= EVENT_QUEUED
)
817 /* do not start event if parent or child event is still running */
818 if (is_devpath_busy(manager
, event
))
821 event_run(manager
, event
);
825 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
826 struct udev_list_node
*loop
, *tmp
;
828 udev_list_node_foreach_safe(loop
, tmp
, &manager
->events
) {
829 struct event
*event
= node_to_event(loop
);
831 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
838 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
839 Manager
*manager
= userdata
;
844 struct worker_message msg
;
845 struct iovec iovec
= {
847 .iov_len
= sizeof(msg
),
850 struct cmsghdr cmsghdr
;
851 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
853 struct msghdr msghdr
= {
856 .msg_control
= &control
,
857 .msg_controllen
= sizeof(control
),
859 struct cmsghdr
*cmsg
;
861 struct ucred
*ucred
= NULL
;
862 struct worker
*worker
;
864 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
868 else if (errno
== EAGAIN
)
869 /* nothing more to read */
872 return log_error_errno(errno
, "failed to receive message: %m");
873 } else if (size
!= sizeof(struct worker_message
)) {
874 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
878 CMSG_FOREACH(cmsg
, &msghdr
) {
879 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
880 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
881 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
882 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
885 if (!ucred
|| ucred
->pid
<= 0) {
886 log_warning_errno(EIO
, "ignoring worker message without valid PID");
890 /* lookup worker who sent the signal */
891 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(ucred
->pid
));
893 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
897 if (worker
->state
!= WORKER_KILLED
)
898 worker
->state
= WORKER_IDLE
;
900 /* worker returned */
901 event_free(worker
->event
);
904 /* we have free workers, try to schedule events */
905 event_queue_start(manager
);
910 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
911 Manager
*manager
= userdata
;
912 struct udev_device
*dev
;
917 dev
= udev_monitor_receive_device(manager
->monitor
);
919 udev_device_ensure_usec_initialized(dev
, NULL
);
920 r
= event_queue_insert(manager
, dev
);
922 udev_device_unref(dev
);
924 /* we have fresh events, try to schedule them */
925 event_queue_start(manager
);
931 /* receive the udevd message from userspace */
932 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
933 Manager
*manager
= userdata
;
934 _cleanup_udev_ctrl_connection_unref_
struct udev_ctrl_connection
*ctrl_conn
= NULL
;
935 _cleanup_udev_ctrl_msg_unref_
struct udev_ctrl_msg
*ctrl_msg
= NULL
;
941 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
945 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
949 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
951 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
952 log_set_max_level(i
);
953 manager_kill_workers(manager
);
956 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
957 log_debug("udevd message (STOP_EXEC_QUEUE) received");
958 manager
->stop_exec_queue
= true;
961 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
962 log_debug("udevd message (START_EXEC_QUEUE) received");
963 manager
->stop_exec_queue
= false;
964 event_queue_start(manager
);
967 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
968 log_debug("udevd message (RELOAD) received");
969 manager_reload(manager
);
972 str
= udev_ctrl_get_set_env(ctrl_msg
);
974 _cleanup_free_
char *key
= NULL
;
980 val
= strchr(key
, '=');
984 if (val
[0] == '\0') {
985 log_debug("udevd message (ENV) received, unset '%s'", key
);
986 udev_list_entry_add(&manager
->properties
, key
, NULL
);
988 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
989 udev_list_entry_add(&manager
->properties
, key
, val
);
992 log_error("wrong key format '%s'", key
);
994 manager_kill_workers(manager
);
997 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
999 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
1000 arg_children_max
= i
;
1003 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1004 log_debug("udevd message (SYNC) received");
1006 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1007 log_debug("udevd message (EXIT) received");
1008 manager_exit(manager
);
1009 /* keep reference to block the client until we exit
1010 TODO: deal with several blocking exit requests */
1011 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1017 static int synthesize_change(struct udev_device
*dev
) {
1018 char filename
[UTIL_PATH_SIZE
];
1021 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1022 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1023 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1024 bool part_table_read
= false;
1025 bool has_partitions
= false;
1027 struct udev
*udev
= udev_device_get_udev(dev
);
1028 _cleanup_udev_enumerate_unref_
struct udev_enumerate
*e
= NULL
;
1029 struct udev_list_entry
*item
;
1032 * Try to re-read the partition table. This only succeeds if
1033 * none of the devices is busy. The kernel returns 0 if no
1034 * partition table is found, and we will not get an event for
1037 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1039 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1041 r
= ioctl(fd
, BLKRRPART
, 0);
1045 part_table_read
= true;
1048 /* search for partitions */
1049 e
= udev_enumerate_new(udev
);
1053 r
= udev_enumerate_add_match_parent(e
, dev
);
1057 r
= udev_enumerate_add_match_subsystem(e
, "block");
1061 r
= udev_enumerate_scan_devices(e
);
1065 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1066 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1068 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1072 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1075 has_partitions
= true;
1080 * We have partitions and re-read the table, the kernel already sent
1081 * out a "change" event for the disk, and "remove/add" for all
1084 if (part_table_read
&& has_partitions
)
1088 * We have partitions but re-reading the partition table did not
1089 * work, synthesize "change" for the disk and all partitions.
1091 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1092 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1093 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1095 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1096 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1098 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1102 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1105 log_debug("device %s closed, synthesising partition '%s' 'change'",
1106 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1107 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1108 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1114 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1115 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1116 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1121 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1122 Manager
*manager
= userdata
;
1123 union inotify_event_buffer buffer
;
1124 struct inotify_event
*e
;
1129 l
= read(fd
, &buffer
, sizeof(buffer
));
1131 if (errno
== EAGAIN
|| errno
== EINTR
)
1134 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1137 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1138 _cleanup_udev_device_unref_
struct udev_device
*dev
= NULL
;
1140 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1144 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1145 if (e
->mask
& IN_CLOSE_WRITE
) {
1146 synthesize_change(dev
);
1148 /* settle might be waiting on us to determine the queue
1149 * state. If we just handled an inotify event, we might have
1150 * generated a "change" event, but we won't have queued up
1151 * the resultant uevent yet. Do that.
1153 on_uevent(NULL
, -1, 0, manager
);
1154 } else if (e
->mask
& IN_IGNORED
)
1155 udev_watch_end(manager
->udev
, dev
);
1161 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1162 Manager
*manager
= userdata
;
1166 manager_exit(manager
);
1171 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1172 Manager
*manager
= userdata
;
1176 manager_reload(manager
);
1181 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1182 Manager
*manager
= userdata
;
1189 struct worker
*worker
;
1191 pid
= waitpid(-1, &status
, WNOHANG
);
1195 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(pid
));
1197 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1201 if (WIFEXITED(status
)) {
1202 if (WEXITSTATUS(status
) == 0)
1203 log_debug("worker ["PID_FMT
"] exited", pid
);
1205 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1206 } else if (WIFSIGNALED(status
)) {
1207 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), strsignal(WTERMSIG(status
)));
1208 } else if (WIFSTOPPED(status
)) {
1209 log_info("worker ["PID_FMT
"] stopped", pid
);
1211 } else if (WIFCONTINUED(status
)) {
1212 log_info("worker ["PID_FMT
"] continued", pid
);
1215 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1217 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1218 if (worker
->event
) {
1219 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1220 /* delete state from disk */
1221 udev_device_delete_db(worker
->event
->dev
);
1222 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1223 /* forward kernel event without amending it */
1224 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1228 worker_free(worker
);
1231 /* we can start new workers, try to schedule events */
1232 event_queue_start(manager
);
1237 static int on_post(sd_event_source
*s
, void *userdata
) {
1238 Manager
*manager
= userdata
;
1243 if (udev_list_node_is_empty(&manager
->events
)) {
1244 /* no pending events */
1245 if (!hashmap_isempty(manager
->workers
)) {
1246 /* there are idle workers */
1247 log_debug("cleanup idle workers");
1248 manager_kill_workers(manager
);
1251 if (manager
->exit
) {
1252 r
= sd_event_exit(manager
->event
, 0);
1255 } else if (manager
->cgroup
)
1256 /* cleanup possible left-over processes in our cgroup */
1257 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, false, true, NULL
);
1264 static int listen_fds(int *rctrl
, int *rnetlink
) {
1265 _cleanup_udev_unref_
struct udev
*udev
= NULL
;
1266 int ctrl_fd
= -1, netlink_fd
= -1;
1272 n
= sd_listen_fds(true);
1276 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1277 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1284 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1285 if (netlink_fd
>= 0)
1295 _cleanup_udev_ctrl_unref_
struct udev_ctrl
*ctrl
= NULL
;
1301 ctrl
= udev_ctrl_new(udev
);
1303 return log_error_errno(EINVAL
, "error initializing udev control socket");
1305 r
= udev_ctrl_enable_receiving(ctrl
);
1307 return log_error_errno(EINVAL
, "error binding udev control socket");
1309 fd
= udev_ctrl_get_fd(ctrl
);
1311 return log_error_errno(EIO
, "could not get ctrl fd");
1313 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1315 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1318 if (netlink_fd
< 0) {
1319 _cleanup_udev_monitor_unref_
struct udev_monitor
*monitor
= NULL
;
1327 monitor
= udev_monitor_new_from_netlink(udev
, "kernel");
1329 return log_error_errno(EINVAL
, "error initializing netlink socket");
1331 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1333 r
= udev_monitor_enable_receiving(monitor
);
1335 return log_error_errno(EINVAL
, "error binding netlink socket");
1337 fd
= udev_monitor_get_fd(monitor
);
1339 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1341 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1343 return log_error_errno(errno
, "could not dup netlink fd: %m");
1347 *rnetlink
= netlink_fd
;
1353 * read the kernel command line, in case we need to get into debug mode
1354 * udev.log-priority=<level> syslog priority
1355 * udev.children-max=<number of workers> events are fully serialized if set to 1
1356 * udev.exec-delay=<number of seconds> delay execution of every executed program
1357 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
1359 static int parse_proc_cmdline_item(const char *key
, const char *value
) {
1360 const char *full_key
= key
;
1368 if (startswith(key
, "rd."))
1369 key
+= strlen("rd.");
1371 if (startswith(key
, "udev."))
1372 key
+= strlen("udev.");
1376 if (streq(key
, "log-priority")) {
1379 prio
= util_log_priority(value
);
1382 log_set_max_level(prio
);
1383 } else if (streq(key
, "children-max")) {
1384 r
= safe_atou(value
, &arg_children_max
);
1387 } else if (streq(key
, "exec-delay")) {
1388 r
= safe_atoi(value
, &arg_exec_delay
);
1391 } else if (streq(key
, "event-timeout")) {
1392 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1395 arg_event_timeout_usec
*= USEC_PER_SEC
;
1396 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1401 log_warning("invalid %s ignored: %s", full_key
, value
);
1405 static void help(void) {
1406 printf("%s [OPTIONS...]\n\n"
1407 "Manages devices.\n\n"
1408 " -h --help Print this message\n"
1409 " --version Print version of the program\n"
1410 " --daemon Detach and run in the background\n"
1411 " --debug Enable debug output\n"
1412 " --children-max=INT Set maximum number of workers\n"
1413 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1414 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1415 " --resolve-names=early|late|never\n"
1416 " When to resolve users and groups\n"
1417 , program_invocation_short_name
);
1420 static int parse_argv(int argc
, char *argv
[]) {
1421 static const struct option options
[] = {
1422 { "daemon", no_argument
, NULL
, 'd' },
1423 { "debug", no_argument
, NULL
, 'D' },
1424 { "children-max", required_argument
, NULL
, 'c' },
1425 { "exec-delay", required_argument
, NULL
, 'e' },
1426 { "event-timeout", required_argument
, NULL
, 't' },
1427 { "resolve-names", required_argument
, NULL
, 'N' },
1428 { "help", no_argument
, NULL
, 'h' },
1429 { "version", no_argument
, NULL
, 'V' },
1438 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1444 arg_daemonize
= true;
1447 r
= safe_atou(optarg
, &arg_children_max
);
1449 log_warning("Invalid --children-max ignored: %s", optarg
);
1452 r
= safe_atoi(optarg
, &arg_exec_delay
);
1454 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1457 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1459 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1461 arg_event_timeout_usec
*= USEC_PER_SEC
;
1462 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1469 if (streq(optarg
, "early")) {
1470 arg_resolve_names
= 1;
1471 } else if (streq(optarg
, "late")) {
1472 arg_resolve_names
= 0;
1473 } else if (streq(optarg
, "never")) {
1474 arg_resolve_names
= -1;
1476 log_error("resolve-names must be early, late or never");
1484 printf("%s\n", VERSION
);
1489 assert_not_reached("Unhandled option");
1497 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1498 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1499 int r
, fd_worker
, one
= 1;
1502 assert(fd_ctrl
>= 0);
1503 assert(fd_uevent
>= 0);
1505 manager
= new0(Manager
, 1);
1509 manager
->fd_inotify
= -1;
1510 manager
->worker_watch
[WRITE_END
] = -1;
1511 manager
->worker_watch
[READ_END
] = -1;
1513 manager
->udev
= udev_new();
1515 return log_error_errno(errno
, "could not allocate udev context: %m");
1517 udev_builtin_init(manager
->udev
);
1519 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1520 if (!manager
->rules
)
1521 return log_error_errno(ENOMEM
, "error reading rules");
1523 udev_list_node_init(&manager
->events
);
1524 udev_list_init(manager
->udev
, &manager
->properties
, true);
1526 manager
->cgroup
= cgroup
;
1528 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, fd_ctrl
);
1530 return log_error_errno(EINVAL
, "error taking over udev control socket");
1532 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", fd_uevent
);
1533 if (!manager
->monitor
)
1534 return log_error_errno(EINVAL
, "error taking over netlink socket");
1536 /* unnamed socket from workers to the main daemon */
1537 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1539 return log_error_errno(errno
, "error creating socketpair: %m");
1541 fd_worker
= manager
->worker_watch
[READ_END
];
1543 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1545 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1547 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1548 if (manager
->fd_inotify
< 0)
1549 return log_error_errno(ENOMEM
, "error initializing inotify");
1551 udev_watch_restore(manager
->udev
);
1553 /* block and listen to all signals on signalfd */
1554 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1556 r
= sd_event_default(&manager
->event
);
1558 return log_error_errno(errno
, "could not allocate event loop: %m");
1560 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1562 return log_error_errno(r
, "error creating sigint event source: %m");
1564 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1566 return log_error_errno(r
, "error creating sigterm event source: %m");
1568 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1570 return log_error_errno(r
, "error creating sighup event source: %m");
1572 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1574 return log_error_errno(r
, "error creating sigchld event source: %m");
1576 r
= sd_event_set_watchdog(manager
->event
, true);
1578 return log_error_errno(r
, "error creating watchdog event source: %m");
1580 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1582 return log_error_errno(r
, "error creating ctrl event source: %m");
1584 /* This needs to be after the inotify and uevent handling, to make sure
1585 * that the ping is send back after fully processing the pending uevents
1586 * (including the synthetic ones we may create due to inotify events).
1588 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1590 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1592 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1594 return log_error_errno(r
, "error creating inotify event source: %m");
1596 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1598 return log_error_errno(r
, "error creating uevent event source: %m");
1600 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1602 return log_error_errno(r
, "error creating worker event source: %m");
1604 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1606 return log_error_errno(r
, "error creating post event source: %m");
1614 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1615 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1618 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1620 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1624 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1626 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1628 (void) sd_notify(false,
1630 "STATUS=Processing...");
1632 r
= sd_event_loop(manager
->event
);
1634 log_error_errno(r
, "event loop failed: %m");
1638 sd_event_get_exit_code(manager
->event
, &r
);
1643 "STATUS=Shutting down...");
1645 udev_ctrl_cleanup(manager
->ctrl
);
1649 int main(int argc
, char *argv
[]) {
1650 _cleanup_free_
char *cgroup
= NULL
;
1651 int r
, fd_ctrl
, fd_uevent
;
1653 log_set_target(LOG_TARGET_AUTO
);
1654 log_parse_environment();
1657 r
= parse_argv(argc
, argv
);
1661 r
= parse_proc_cmdline(parse_proc_cmdline_item
);
1663 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1666 log_set_target(LOG_TARGET_CONSOLE
);
1667 log_set_max_level(LOG_DEBUG
);
1670 if (getuid() != 0) {
1671 r
= log_error_errno(EPERM
, "root privileges required");
1675 if (arg_children_max
== 0) {
1678 arg_children_max
= 8;
1680 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1681 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1683 log_debug("set children_max to %u", arg_children_max
);
1686 /* set umask before creating any file/directory */
1689 r
= log_error_errno(errno
, "could not change dir to /: %m");
1695 r
= mac_selinux_init("/dev");
1697 log_error_errno(r
, "could not initialize labelling: %m");
1701 r
= mkdir("/run/udev", 0755);
1702 if (r
< 0 && errno
!= EEXIST
) {
1703 r
= log_error_errno(errno
, "could not create /run/udev: %m");
1707 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1709 if (getppid() == 1) {
1710 /* get our own cgroup, we regularly kill everything udev has left behind
1711 we only do this on systemd systems, and only if we are directly spawned
1712 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1713 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1715 if (r
== -ENOENT
|| r
== -ENOEXEC
)
1716 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1718 log_warning_errno(r
, "failed to get cgroup: %m");
1722 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1724 r
= log_error_errno(r
, "could not listen on fds: %m");
1728 if (arg_daemonize
) {
1731 log_info("starting version " VERSION
);
1733 /* connect /dev/null to stdin, stdout, stderr */
1734 if (log_get_max_level() < LOG_DEBUG
)
1735 (void) make_null_stdio();
1742 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1745 mac_selinux_finish();
1747 _exit(EXIT_SUCCESS
);
1752 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1755 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1758 mac_selinux_finish();
1760 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;