2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
30 #include <sys/epoll.h>
32 #include <sys/inotify.h>
33 #include <sys/ioctl.h>
34 #include <sys/mount.h>
35 #include <sys/prctl.h>
36 #include <sys/signalfd.h>
37 #include <sys/socket.h>
43 #include "sd-daemon.h"
46 #include "alloc-util.h"
47 #include "cgroup-util.h"
48 #include "cpu-set-util.h"
49 #include "dev-setup.h"
52 #include "formats-util.h"
56 #include "netlink-util.h"
57 #include "parse-util.h"
58 #include "proc-cmdline.h"
59 #include "process-util.h"
60 #include "selinux-util.h"
61 #include "signal-util.h"
62 #include "socket-util.h"
63 #include "string-util.h"
64 #include "terminal-util.h"
65 #include "udev-util.h"
67 #include "user-util.h"
69 static bool arg_debug
= false;
70 static int arg_daemonize
= false;
71 static int arg_resolve_names
= 1;
72 static unsigned arg_children_max
;
73 static int arg_exec_delay
;
74 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
75 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
77 typedef struct Manager
{
81 struct udev_list_node events
;
83 pid_t pid
; /* the process that originally allocated the manager object */
85 struct udev_rules
*rules
;
86 struct udev_list properties
;
88 struct udev_monitor
*monitor
;
89 struct udev_ctrl
*ctrl
;
90 struct udev_ctrl_connection
*ctrl_conn_blocking
;
94 sd_event_source
*ctrl_event
;
95 sd_event_source
*uevent_event
;
96 sd_event_source
*inotify_event
;
100 bool stop_exec_queue
:1;
111 struct udev_list_node node
;
114 struct udev_device
*dev
;
115 struct udev_device
*dev_kernel
;
116 struct worker
*worker
;
117 enum event_state state
;
118 unsigned long long int delaying_seqnum
;
119 unsigned long long int seqnum
;
122 const char *devpath_old
;
126 sd_event_source
*timeout_warning
;
127 sd_event_source
*timeout
;
130 static inline struct event
*node_to_event(struct udev_list_node
*node
) {
131 return container_of(node
, struct event
, node
);
134 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
145 struct udev_list_node node
;
148 struct udev_monitor
*monitor
;
149 enum worker_state state
;
153 /* passed from worker to main process */
154 struct worker_message
{
157 static void event_free(struct event
*event
) {
163 udev_list_node_remove(&event
->node
);
164 udev_device_unref(event
->dev
);
165 udev_device_unref(event
->dev_kernel
);
167 sd_event_source_unref(event
->timeout_warning
);
168 sd_event_source_unref(event
->timeout
);
171 event
->worker
->event
= NULL
;
173 assert(event
->manager
);
175 if (udev_list_node_is_empty(&event
->manager
->events
)) {
176 /* only clean up the queue from the process that created it */
177 if (event
->manager
->pid
== getpid()) {
178 r
= unlink("/run/udev/queue");
180 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
187 static void worker_free(struct worker
*worker
) {
191 assert(worker
->manager
);
193 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
194 udev_monitor_unref(worker
->monitor
);
195 event_free(worker
->event
);
200 static void manager_workers_free(Manager
*manager
) {
201 struct worker
*worker
;
206 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
209 manager
->workers
= hashmap_free(manager
->workers
);
212 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
213 _cleanup_free_
struct worker
*worker
= NULL
;
218 assert(worker_monitor
);
221 worker
= new0(struct worker
, 1);
225 worker
->refcount
= 1;
226 worker
->manager
= manager
;
227 /* close monitor, but keep address around */
228 udev_monitor_disconnect(worker_monitor
);
229 worker
->monitor
= udev_monitor_ref(worker_monitor
);
232 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
236 r
= hashmap_put(manager
->workers
, PID_TO_PTR(pid
), worker
);
246 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
247 struct event
*event
= userdata
;
250 assert(event
->worker
);
252 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
253 event
->worker
->state
= WORKER_KILLED
;
255 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
260 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
261 struct event
*event
= userdata
;
265 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
270 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
275 assert(worker
->manager
);
277 assert(!event
->worker
);
278 assert(!worker
->event
);
280 worker
->state
= WORKER_RUNNING
;
281 worker
->event
= event
;
282 event
->state
= EVENT_RUNNING
;
283 event
->worker
= worker
;
285 e
= worker
->manager
->event
;
287 assert_se(sd_event_now(e
, clock_boottime_or_monotonic(), &usec
) >= 0);
289 (void) sd_event_add_time(e
, &event
->timeout_warning
, clock_boottime_or_monotonic(),
290 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
292 (void) sd_event_add_time(e
, &event
->timeout
, clock_boottime_or_monotonic(),
293 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
296 static void manager_free(Manager
*manager
) {
300 udev_builtin_exit(manager
->udev
);
302 sd_event_source_unref(manager
->ctrl_event
);
303 sd_event_source_unref(manager
->uevent_event
);
304 sd_event_source_unref(manager
->inotify_event
);
306 udev_unref(manager
->udev
);
307 sd_event_unref(manager
->event
);
308 manager_workers_free(manager
);
309 event_queue_cleanup(manager
, EVENT_UNDEF
);
311 udev_monitor_unref(manager
->monitor
);
312 udev_ctrl_unref(manager
->ctrl
);
313 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
315 udev_list_cleanup(&manager
->properties
);
316 udev_rules_unref(manager
->rules
);
318 safe_close(manager
->fd_inotify
);
319 safe_close_pair(manager
->worker_watch
);
324 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
326 static int worker_send_message(int fd
) {
327 struct worker_message message
= {};
329 return loop_write(fd
, &message
, sizeof(message
), false);
332 static void worker_spawn(Manager
*manager
, struct event
*event
) {
333 struct udev
*udev
= event
->udev
;
334 _cleanup_udev_monitor_unref_
struct udev_monitor
*worker_monitor
= NULL
;
338 /* listen for new events */
339 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
340 if (worker_monitor
== NULL
)
342 /* allow the main daemon netlink address to send devices to the worker */
343 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
344 r
= udev_monitor_enable_receiving(worker_monitor
);
346 log_error_errno(r
, "worker: could not enable receiving of device: %m");
351 struct udev_device
*dev
= NULL
;
352 _cleanup_(sd_netlink_unrefp
) sd_netlink
*rtnl
= NULL
;
354 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
355 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
356 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
359 /* take initial device from queue */
363 unsetenv("NOTIFY_SOCKET");
365 manager_workers_free(manager
);
366 event_queue_cleanup(manager
, EVENT_UNDEF
);
368 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
369 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
370 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
371 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
372 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
374 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
375 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
376 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
378 manager
->event
= sd_event_unref(manager
->event
);
381 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
383 r
= log_error_errno(errno
, "error creating signalfd %m");
386 ep_signal
.data
.fd
= fd_signal
;
388 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
389 ep_monitor
.data
.fd
= fd_monitor
;
391 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
393 r
= log_error_errno(errno
, "error creating epoll fd: %m");
397 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
398 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
399 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
403 /* Request TERM signal if parent exits.
404 Ignore error, not much we can do in that case. */
405 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
407 /* Reset OOM score, we only protect the main daemon. */
408 write_string_file("/proc/self/oom_score_adj", "0", 0);
411 struct udev_event
*udev_event
;
416 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
417 udev_event
= udev_event_new(dev
);
418 if (udev_event
== NULL
) {
423 if (arg_exec_delay
> 0)
424 udev_event
->exec_delay
= arg_exec_delay
;
427 * Take a shared lock on the device node; this establishes
428 * a concept of device "ownership" to serialize device
429 * access. External processes holding an exclusive lock will
430 * cause udev to skip the event handling; in the case udev
431 * acquired the lock, the external process can block until
432 * udev has finished its event handling.
434 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
435 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
436 !startswith(udev_device_get_sysname(dev
), "dm-") &&
437 !startswith(udev_device_get_sysname(dev
), "md")) {
438 struct udev_device
*d
= dev
;
440 if (streq_ptr("partition", udev_device_get_devtype(d
)))
441 d
= udev_device_get_parent(d
);
444 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
445 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
446 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
447 fd_lock
= safe_close(fd_lock
);
453 /* needed for renaming netifs */
454 udev_event
->rtnl
= rtnl
;
456 /* apply rules, create node, symlinks */
457 udev_event_execute_rules(udev_event
,
458 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
459 &manager
->properties
,
462 udev_event_execute_run(udev_event
,
463 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
465 if (udev_event
->rtnl
)
466 /* in case rtnl was initialized */
467 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
469 /* apply/restore inotify watch */
470 if (udev_event
->inotify_watch
) {
471 udev_watch_begin(udev
, dev
);
472 udev_device_update_db(dev
);
477 /* send processed event back to libudev listeners */
478 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
481 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
483 /* send udevd the result of the event execution */
484 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
486 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
487 udev_device_get_seqnum(dev
));
489 udev_device_unref(dev
);
492 udev_event_unref(udev_event
);
494 /* wait for more device messages from main udevd, or term signal */
495 while (dev
== NULL
) {
496 struct epoll_event ev
[4];
500 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
504 r
= log_error_errno(errno
, "failed to poll: %m");
508 for (i
= 0; i
< fdcount
; i
++) {
509 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
510 dev
= udev_monitor_receive_device(worker_monitor
);
512 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
513 struct signalfd_siginfo fdsi
;
516 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
517 if (size
!= sizeof(struct signalfd_siginfo
))
519 switch (fdsi
.ssi_signo
) {
528 udev_device_unref(dev
);
529 manager_free(manager
);
531 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
534 event
->state
= EVENT_QUEUED
;
535 log_error_errno(errno
, "fork of child failed: %m");
539 struct worker
*worker
;
541 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
545 worker_attach_event(worker
, event
);
547 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
553 static void event_run(Manager
*manager
, struct event
*event
) {
554 struct worker
*worker
;
560 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
563 if (worker
->state
!= WORKER_IDLE
)
566 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
568 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
570 kill(worker
->pid
, SIGKILL
);
571 worker
->state
= WORKER_KILLED
;
574 worker_attach_event(worker
, event
);
578 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
579 if (arg_children_max
> 1)
580 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
584 /* start new worker and pass initial device */
585 worker_spawn(manager
, event
);
588 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
595 /* only one process can add events to the queue */
596 if (manager
->pid
== 0)
597 manager
->pid
= getpid();
599 assert(manager
->pid
== getpid());
601 event
= new0(struct event
, 1);
605 event
->udev
= udev_device_get_udev(dev
);
606 event
->manager
= manager
;
608 event
->dev_kernel
= udev_device_shallow_clone(dev
);
609 udev_device_copy_properties(event
->dev_kernel
, dev
);
610 event
->seqnum
= udev_device_get_seqnum(dev
);
611 event
->devpath
= udev_device_get_devpath(dev
);
612 event
->devpath_len
= strlen(event
->devpath
);
613 event
->devpath_old
= udev_device_get_devpath_old(dev
);
614 event
->devnum
= udev_device_get_devnum(dev
);
615 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
616 event
->ifindex
= udev_device_get_ifindex(dev
);
618 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
619 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
621 event
->state
= EVENT_QUEUED
;
623 if (udev_list_node_is_empty(&manager
->events
)) {
624 r
= touch("/run/udev/queue");
626 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
629 udev_list_node_append(&event
->node
, &manager
->events
);
634 static void manager_kill_workers(Manager
*manager
) {
635 struct worker
*worker
;
640 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
641 if (worker
->state
== WORKER_KILLED
)
644 worker
->state
= WORKER_KILLED
;
645 kill(worker
->pid
, SIGTERM
);
649 /* lookup event for identical, parent, child device */
650 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
651 struct udev_list_node
*loop
;
654 /* check if queue contains events we depend on */
655 udev_list_node_foreach(loop
, &manager
->events
) {
656 struct event
*loop_event
= node_to_event(loop
);
658 /* we already found a later event, earlier can not block us, no need to check again */
659 if (loop_event
->seqnum
< event
->delaying_seqnum
)
662 /* event we checked earlier still exists, no need to check again */
663 if (loop_event
->seqnum
== event
->delaying_seqnum
)
666 /* found ourself, no later event can block us */
667 if (loop_event
->seqnum
>= event
->seqnum
)
670 /* check major/minor */
671 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
674 /* check network device ifindex */
675 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
678 /* check our old name */
679 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
680 event
->delaying_seqnum
= loop_event
->seqnum
;
684 /* compare devpath */
685 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
687 /* one devpath is contained in the other? */
688 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
691 /* identical device event found */
692 if (loop_event
->devpath_len
== event
->devpath_len
) {
693 /* devices names might have changed/swapped in the meantime */
694 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
696 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
698 event
->delaying_seqnum
= loop_event
->seqnum
;
702 /* parent device event found */
703 if (event
->devpath
[common
] == '/') {
704 event
->delaying_seqnum
= loop_event
->seqnum
;
708 /* child device event found */
709 if (loop_event
->devpath
[common
] == '/') {
710 event
->delaying_seqnum
= loop_event
->seqnum
;
714 /* no matching device */
721 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
722 Manager
*manager
= userdata
;
726 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
728 sd_event_exit(manager
->event
, -ETIMEDOUT
);
733 static void manager_exit(Manager
*manager
) {
739 manager
->exit
= true;
743 "STATUS=Starting shutdown...");
745 /* close sources of new events and discard buffered events */
746 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
747 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
749 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
750 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
752 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
753 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
755 /* discard queued events and kill workers */
756 event_queue_cleanup(manager
, EVENT_QUEUED
);
757 manager_kill_workers(manager
);
759 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
761 r
= sd_event_add_time(manager
->event
, NULL
, clock_boottime_or_monotonic(),
762 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
767 /* reload requested, HUP signal received, rules changed, builtin changed */
768 static void manager_reload(Manager
*manager
) {
774 "STATUS=Flushing configuration...");
776 manager_kill_workers(manager
);
777 manager
->rules
= udev_rules_unref(manager
->rules
);
778 udev_builtin_exit(manager
->udev
);
782 "STATUS=Processing...");
785 static void event_queue_start(Manager
*manager
) {
786 struct udev_list_node
*loop
;
791 if (udev_list_node_is_empty(&manager
->events
) ||
792 manager
->exit
|| manager
->stop_exec_queue
)
795 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
796 /* check for changed config, every 3 seconds at most */
797 if (manager
->last_usec
== 0 ||
798 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
799 if (udev_rules_check_timestamp(manager
->rules
) ||
800 udev_builtin_validate(manager
->udev
))
801 manager_reload(manager
);
803 manager
->last_usec
= usec
;
806 udev_builtin_init(manager
->udev
);
808 if (!manager
->rules
) {
809 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
814 udev_list_node_foreach(loop
, &manager
->events
) {
815 struct event
*event
= node_to_event(loop
);
817 if (event
->state
!= EVENT_QUEUED
)
820 /* do not start event if parent or child event is still running */
821 if (is_devpath_busy(manager
, event
))
824 event_run(manager
, event
);
828 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
829 struct udev_list_node
*loop
, *tmp
;
831 udev_list_node_foreach_safe(loop
, tmp
, &manager
->events
) {
832 struct event
*event
= node_to_event(loop
);
834 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
841 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
842 Manager
*manager
= userdata
;
847 struct worker_message msg
;
848 struct iovec iovec
= {
850 .iov_len
= sizeof(msg
),
853 struct cmsghdr cmsghdr
;
854 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
856 struct msghdr msghdr
= {
859 .msg_control
= &control
,
860 .msg_controllen
= sizeof(control
),
862 struct cmsghdr
*cmsg
;
864 struct ucred
*ucred
= NULL
;
865 struct worker
*worker
;
867 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
871 else if (errno
== EAGAIN
)
872 /* nothing more to read */
875 return log_error_errno(errno
, "failed to receive message: %m");
876 } else if (size
!= sizeof(struct worker_message
)) {
877 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
881 CMSG_FOREACH(cmsg
, &msghdr
) {
882 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
883 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
884 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
885 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
888 if (!ucred
|| ucred
->pid
<= 0) {
889 log_warning_errno(EIO
, "ignoring worker message without valid PID");
893 /* lookup worker who sent the signal */
894 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
896 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
900 if (worker
->state
!= WORKER_KILLED
)
901 worker
->state
= WORKER_IDLE
;
903 /* worker returned */
904 event_free(worker
->event
);
907 /* we have free workers, try to schedule events */
908 event_queue_start(manager
);
913 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
914 Manager
*manager
= userdata
;
915 struct udev_device
*dev
;
920 dev
= udev_monitor_receive_device(manager
->monitor
);
922 udev_device_ensure_usec_initialized(dev
, NULL
);
923 r
= event_queue_insert(manager
, dev
);
925 udev_device_unref(dev
);
927 /* we have fresh events, try to schedule them */
928 event_queue_start(manager
);
934 /* receive the udevd message from userspace */
935 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
936 Manager
*manager
= userdata
;
937 _cleanup_udev_ctrl_connection_unref_
struct udev_ctrl_connection
*ctrl_conn
= NULL
;
938 _cleanup_udev_ctrl_msg_unref_
struct udev_ctrl_msg
*ctrl_msg
= NULL
;
944 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
948 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
952 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
954 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
955 log_set_max_level(i
);
956 manager_kill_workers(manager
);
959 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
960 log_debug("udevd message (STOP_EXEC_QUEUE) received");
961 manager
->stop_exec_queue
= true;
964 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
965 log_debug("udevd message (START_EXEC_QUEUE) received");
966 manager
->stop_exec_queue
= false;
967 event_queue_start(manager
);
970 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
971 log_debug("udevd message (RELOAD) received");
972 manager_reload(manager
);
975 str
= udev_ctrl_get_set_env(ctrl_msg
);
977 _cleanup_free_
char *key
= NULL
;
983 val
= strchr(key
, '=');
987 if (val
[0] == '\0') {
988 log_debug("udevd message (ENV) received, unset '%s'", key
);
989 udev_list_entry_add(&manager
->properties
, key
, NULL
);
991 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
992 udev_list_entry_add(&manager
->properties
, key
, val
);
995 log_error("wrong key format '%s'", key
);
997 manager_kill_workers(manager
);
1000 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
1002 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
1003 arg_children_max
= i
;
1006 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1007 log_debug("udevd message (SYNC) received");
1009 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1010 log_debug("udevd message (EXIT) received");
1011 manager_exit(manager
);
1012 /* keep reference to block the client until we exit
1013 TODO: deal with several blocking exit requests */
1014 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1020 static int synthesize_change(struct udev_device
*dev
) {
1021 char filename
[UTIL_PATH_SIZE
];
1024 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1025 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1026 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1027 bool part_table_read
= false;
1028 bool has_partitions
= false;
1030 struct udev
*udev
= udev_device_get_udev(dev
);
1031 _cleanup_udev_enumerate_unref_
struct udev_enumerate
*e
= NULL
;
1032 struct udev_list_entry
*item
;
1035 * Try to re-read the partition table. This only succeeds if
1036 * none of the devices is busy. The kernel returns 0 if no
1037 * partition table is found, and we will not get an event for
1040 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1042 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1044 r
= ioctl(fd
, BLKRRPART
, 0);
1048 part_table_read
= true;
1051 /* search for partitions */
1052 e
= udev_enumerate_new(udev
);
1056 r
= udev_enumerate_add_match_parent(e
, dev
);
1060 r
= udev_enumerate_add_match_subsystem(e
, "block");
1064 r
= udev_enumerate_scan_devices(e
);
1068 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1069 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1071 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1075 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1078 has_partitions
= true;
1083 * We have partitions and re-read the table, the kernel already sent
1084 * out a "change" event for the disk, and "remove/add" for all
1087 if (part_table_read
&& has_partitions
)
1091 * We have partitions but re-reading the partition table did not
1092 * work, synthesize "change" for the disk and all partitions.
1094 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1095 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1096 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1098 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1099 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1101 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1105 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1108 log_debug("device %s closed, synthesising partition '%s' 'change'",
1109 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1110 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1111 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1117 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1118 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1119 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1124 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1125 Manager
*manager
= userdata
;
1126 union inotify_event_buffer buffer
;
1127 struct inotify_event
*e
;
1132 l
= read(fd
, &buffer
, sizeof(buffer
));
1134 if (errno
== EAGAIN
|| errno
== EINTR
)
1137 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1140 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1141 _cleanup_udev_device_unref_
struct udev_device
*dev
= NULL
;
1143 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1147 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1148 if (e
->mask
& IN_CLOSE_WRITE
) {
1149 synthesize_change(dev
);
1151 /* settle might be waiting on us to determine the queue
1152 * state. If we just handled an inotify event, we might have
1153 * generated a "change" event, but we won't have queued up
1154 * the resultant uevent yet. Do that.
1156 on_uevent(NULL
, -1, 0, manager
);
1157 } else if (e
->mask
& IN_IGNORED
)
1158 udev_watch_end(manager
->udev
, dev
);
1164 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1165 Manager
*manager
= userdata
;
1169 manager_exit(manager
);
1174 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1175 Manager
*manager
= userdata
;
1179 manager_reload(manager
);
1184 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1185 Manager
*manager
= userdata
;
1192 struct worker
*worker
;
1194 pid
= waitpid(-1, &status
, WNOHANG
);
1198 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1200 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1204 if (WIFEXITED(status
)) {
1205 if (WEXITSTATUS(status
) == 0)
1206 log_debug("worker ["PID_FMT
"] exited", pid
);
1208 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1209 } else if (WIFSIGNALED(status
)) {
1210 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), strsignal(WTERMSIG(status
)));
1211 } else if (WIFSTOPPED(status
)) {
1212 log_info("worker ["PID_FMT
"] stopped", pid
);
1214 } else if (WIFCONTINUED(status
)) {
1215 log_info("worker ["PID_FMT
"] continued", pid
);
1218 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1220 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1221 if (worker
->event
) {
1222 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1223 /* delete state from disk */
1224 udev_device_delete_db(worker
->event
->dev
);
1225 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1226 /* forward kernel event without amending it */
1227 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1231 worker_free(worker
);
1234 /* we can start new workers, try to schedule events */
1235 event_queue_start(manager
);
1240 static int on_post(sd_event_source
*s
, void *userdata
) {
1241 Manager
*manager
= userdata
;
1246 if (udev_list_node_is_empty(&manager
->events
)) {
1247 /* no pending events */
1248 if (!hashmap_isempty(manager
->workers
)) {
1249 /* there are idle workers */
1250 log_debug("cleanup idle workers");
1251 manager_kill_workers(manager
);
1254 if (manager
->exit
) {
1255 r
= sd_event_exit(manager
->event
, 0);
1258 } else if (manager
->cgroup
)
1259 /* cleanup possible left-over processes in our cgroup */
1260 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, false, true, NULL
);
1267 static int listen_fds(int *rctrl
, int *rnetlink
) {
1268 _cleanup_udev_unref_
struct udev
*udev
= NULL
;
1269 int ctrl_fd
= -1, netlink_fd
= -1;
1275 n
= sd_listen_fds(true);
1279 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1280 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1287 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1288 if (netlink_fd
>= 0)
1298 _cleanup_udev_ctrl_unref_
struct udev_ctrl
*ctrl
= NULL
;
1304 ctrl
= udev_ctrl_new(udev
);
1306 return log_error_errno(EINVAL
, "error initializing udev control socket");
1308 r
= udev_ctrl_enable_receiving(ctrl
);
1310 return log_error_errno(EINVAL
, "error binding udev control socket");
1312 fd
= udev_ctrl_get_fd(ctrl
);
1314 return log_error_errno(EIO
, "could not get ctrl fd");
1316 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1318 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1321 if (netlink_fd
< 0) {
1322 _cleanup_udev_monitor_unref_
struct udev_monitor
*monitor
= NULL
;
1330 monitor
= udev_monitor_new_from_netlink(udev
, "kernel");
1332 return log_error_errno(EINVAL
, "error initializing netlink socket");
1334 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1336 r
= udev_monitor_enable_receiving(monitor
);
1338 return log_error_errno(EINVAL
, "error binding netlink socket");
1340 fd
= udev_monitor_get_fd(monitor
);
1342 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1344 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1346 return log_error_errno(errno
, "could not dup netlink fd: %m");
1350 *rnetlink
= netlink_fd
;
1356 * read the kernel command line, in case we need to get into debug mode
1357 * udev.log-priority=<level> syslog priority
1358 * udev.children-max=<number of workers> events are fully serialized if set to 1
1359 * udev.exec-delay=<number of seconds> delay execution of every executed program
1360 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
1362 static int parse_proc_cmdline_item(const char *key
, const char *value
) {
1363 const char *full_key
= key
;
1371 if (startswith(key
, "rd."))
1372 key
+= strlen("rd.");
1374 if (startswith(key
, "udev."))
1375 key
+= strlen("udev.");
1379 if (streq(key
, "log-priority")) {
1382 prio
= util_log_priority(value
);
1385 log_set_max_level(prio
);
1386 } else if (streq(key
, "children-max")) {
1387 r
= safe_atou(value
, &arg_children_max
);
1390 } else if (streq(key
, "exec-delay")) {
1391 r
= safe_atoi(value
, &arg_exec_delay
);
1394 } else if (streq(key
, "event-timeout")) {
1395 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1398 arg_event_timeout_usec
*= USEC_PER_SEC
;
1399 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1404 log_warning("invalid %s ignored: %s", full_key
, value
);
1408 static void help(void) {
1409 printf("%s [OPTIONS...]\n\n"
1410 "Manages devices.\n\n"
1411 " -h --help Print this message\n"
1412 " --version Print version of the program\n"
1413 " --daemon Detach and run in the background\n"
1414 " --debug Enable debug output\n"
1415 " --children-max=INT Set maximum number of workers\n"
1416 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1417 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1418 " --resolve-names=early|late|never\n"
1419 " When to resolve users and groups\n"
1420 , program_invocation_short_name
);
1423 static int parse_argv(int argc
, char *argv
[]) {
1424 static const struct option options
[] = {
1425 { "daemon", no_argument
, NULL
, 'd' },
1426 { "debug", no_argument
, NULL
, 'D' },
1427 { "children-max", required_argument
, NULL
, 'c' },
1428 { "exec-delay", required_argument
, NULL
, 'e' },
1429 { "event-timeout", required_argument
, NULL
, 't' },
1430 { "resolve-names", required_argument
, NULL
, 'N' },
1431 { "help", no_argument
, NULL
, 'h' },
1432 { "version", no_argument
, NULL
, 'V' },
1441 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1447 arg_daemonize
= true;
1450 r
= safe_atou(optarg
, &arg_children_max
);
1452 log_warning("Invalid --children-max ignored: %s", optarg
);
1455 r
= safe_atoi(optarg
, &arg_exec_delay
);
1457 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1460 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1462 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1464 arg_event_timeout_usec
*= USEC_PER_SEC
;
1465 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1472 if (streq(optarg
, "early")) {
1473 arg_resolve_names
= 1;
1474 } else if (streq(optarg
, "late")) {
1475 arg_resolve_names
= 0;
1476 } else if (streq(optarg
, "never")) {
1477 arg_resolve_names
= -1;
1479 log_error("resolve-names must be early, late or never");
1487 printf("%s\n", VERSION
);
1492 assert_not_reached("Unhandled option");
1500 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1501 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1502 int r
, fd_worker
, one
= 1;
1505 assert(fd_ctrl
>= 0);
1506 assert(fd_uevent
>= 0);
1508 manager
= new0(Manager
, 1);
1512 manager
->fd_inotify
= -1;
1513 manager
->worker_watch
[WRITE_END
] = -1;
1514 manager
->worker_watch
[READ_END
] = -1;
1516 manager
->udev
= udev_new();
1518 return log_error_errno(errno
, "could not allocate udev context: %m");
1520 udev_builtin_init(manager
->udev
);
1522 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1523 if (!manager
->rules
)
1524 return log_error_errno(ENOMEM
, "error reading rules");
1526 udev_list_node_init(&manager
->events
);
1527 udev_list_init(manager
->udev
, &manager
->properties
, true);
1529 manager
->cgroup
= cgroup
;
1531 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, fd_ctrl
);
1533 return log_error_errno(EINVAL
, "error taking over udev control socket");
1535 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", fd_uevent
);
1536 if (!manager
->monitor
)
1537 return log_error_errno(EINVAL
, "error taking over netlink socket");
1539 /* unnamed socket from workers to the main daemon */
1540 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1542 return log_error_errno(errno
, "error creating socketpair: %m");
1544 fd_worker
= manager
->worker_watch
[READ_END
];
1546 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1548 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1550 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1551 if (manager
->fd_inotify
< 0)
1552 return log_error_errno(ENOMEM
, "error initializing inotify");
1554 udev_watch_restore(manager
->udev
);
1556 /* block and listen to all signals on signalfd */
1557 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1559 r
= sd_event_default(&manager
->event
);
1561 return log_error_errno(r
, "could not allocate event loop: %m");
1563 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1565 return log_error_errno(r
, "error creating sigint event source: %m");
1567 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1569 return log_error_errno(r
, "error creating sigterm event source: %m");
1571 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1573 return log_error_errno(r
, "error creating sighup event source: %m");
1575 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1577 return log_error_errno(r
, "error creating sigchld event source: %m");
1579 r
= sd_event_set_watchdog(manager
->event
, true);
1581 return log_error_errno(r
, "error creating watchdog event source: %m");
1583 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1585 return log_error_errno(r
, "error creating ctrl event source: %m");
1587 /* This needs to be after the inotify and uevent handling, to make sure
1588 * that the ping is send back after fully processing the pending uevents
1589 * (including the synthetic ones we may create due to inotify events).
1591 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1593 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1595 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1597 return log_error_errno(r
, "error creating inotify event source: %m");
1599 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1601 return log_error_errno(r
, "error creating uevent event source: %m");
1603 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1605 return log_error_errno(r
, "error creating worker event source: %m");
1607 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1609 return log_error_errno(r
, "error creating post event source: %m");
1617 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1618 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1621 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1623 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1627 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1629 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1631 (void) sd_notify(false,
1633 "STATUS=Processing...");
1635 r
= sd_event_loop(manager
->event
);
1637 log_error_errno(r
, "event loop failed: %m");
1641 sd_event_get_exit_code(manager
->event
, &r
);
1646 "STATUS=Shutting down...");
1648 udev_ctrl_cleanup(manager
->ctrl
);
1652 int main(int argc
, char *argv
[]) {
1653 _cleanup_free_
char *cgroup
= NULL
;
1654 int fd_ctrl
= -1, fd_uevent
= -1;
1657 log_set_target(LOG_TARGET_AUTO
);
1658 log_parse_environment();
1661 r
= parse_argv(argc
, argv
);
1665 r
= parse_proc_cmdline(parse_proc_cmdline_item
);
1667 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1670 log_set_target(LOG_TARGET_CONSOLE
);
1671 log_set_max_level(LOG_DEBUG
);
1674 if (getuid() != 0) {
1675 r
= log_error_errno(EPERM
, "root privileges required");
1679 if (arg_children_max
== 0) {
1682 arg_children_max
= 8;
1684 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1685 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1687 log_debug("set children_max to %u", arg_children_max
);
1690 /* set umask before creating any file/directory */
1693 r
= log_error_errno(errno
, "could not change dir to /: %m");
1699 r
= mac_selinux_init();
1701 log_error_errno(r
, "could not initialize labelling: %m");
1705 r
= mkdir("/run/udev", 0755);
1706 if (r
< 0 && errno
!= EEXIST
) {
1707 r
= log_error_errno(errno
, "could not create /run/udev: %m");
1711 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1713 if (getppid() == 1) {
1714 /* get our own cgroup, we regularly kill everything udev has left behind
1715 we only do this on systemd systems, and only if we are directly spawned
1716 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1717 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1719 if (r
== -ENOENT
|| r
== -ENOMEDIUM
)
1720 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1722 log_warning_errno(r
, "failed to get cgroup: %m");
1726 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1728 r
= log_error_errno(r
, "could not listen on fds: %m");
1732 if (arg_daemonize
) {
1735 log_info("starting version " VERSION
);
1737 /* connect /dev/null to stdin, stdout, stderr */
1738 if (log_get_max_level() < LOG_DEBUG
)
1739 (void) make_null_stdio();
1746 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1749 mac_selinux_finish();
1751 _exit(EXIT_SUCCESS
);
1756 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1759 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1762 mac_selinux_finish();
1764 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;