2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
30 #include <sys/epoll.h>
32 #include <sys/inotify.h>
33 #include <sys/ioctl.h>
34 #include <sys/mount.h>
35 #include <sys/prctl.h>
36 #include <sys/signalfd.h>
37 #include <sys/socket.h>
43 #include "sd-daemon.h"
46 #include "alloc-util.h"
47 #include "cgroup-util.h"
48 #include "cpu-set-util.h"
49 #include "dev-setup.h"
52 #include "format-util.h"
56 #include "netlink-util.h"
57 #include "parse-util.h"
58 #include "proc-cmdline.h"
59 #include "process-util.h"
60 #include "selinux-util.h"
61 #include "signal-util.h"
62 #include "socket-util.h"
63 #include "string-util.h"
64 #include "terminal-util.h"
65 #include "udev-util.h"
67 #include "user-util.h"
69 static bool arg_debug
= false;
70 static int arg_daemonize
= false;
71 static int arg_resolve_names
= 1;
72 static unsigned arg_children_max
;
73 static int arg_exec_delay
;
74 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
75 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
77 typedef struct Manager
{
81 struct udev_list_node events
;
83 pid_t pid
; /* the process that originally allocated the manager object */
85 struct udev_rules
*rules
;
86 struct udev_list properties
;
88 struct udev_monitor
*monitor
;
89 struct udev_ctrl
*ctrl
;
90 struct udev_ctrl_connection
*ctrl_conn_blocking
;
94 sd_event_source
*ctrl_event
;
95 sd_event_source
*uevent_event
;
96 sd_event_source
*inotify_event
;
100 bool stop_exec_queue
:1;
111 struct udev_list_node node
;
114 struct udev_device
*dev
;
115 struct udev_device
*dev_kernel
;
116 struct worker
*worker
;
117 enum event_state state
;
118 unsigned long long int delaying_seqnum
;
119 unsigned long long int seqnum
;
122 const char *devpath_old
;
126 sd_event_source
*timeout_warning
;
127 sd_event_source
*timeout
;
130 static inline struct event
*node_to_event(struct udev_list_node
*node
) {
131 return container_of(node
, struct event
, node
);
134 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
145 struct udev_list_node node
;
148 struct udev_monitor
*monitor
;
149 enum worker_state state
;
153 /* passed from worker to main process */
154 struct worker_message
{
157 static void event_free(struct event
*event
) {
163 udev_list_node_remove(&event
->node
);
164 udev_device_unref(event
->dev
);
165 udev_device_unref(event
->dev_kernel
);
167 sd_event_source_unref(event
->timeout_warning
);
168 sd_event_source_unref(event
->timeout
);
171 event
->worker
->event
= NULL
;
173 assert(event
->manager
);
175 if (udev_list_node_is_empty(&event
->manager
->events
)) {
176 /* only clean up the queue from the process that created it */
177 if (event
->manager
->pid
== getpid()) {
178 r
= unlink("/run/udev/queue");
180 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
187 static void worker_free(struct worker
*worker
) {
191 assert(worker
->manager
);
193 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
194 udev_monitor_unref(worker
->monitor
);
195 event_free(worker
->event
);
200 static void manager_workers_free(Manager
*manager
) {
201 struct worker
*worker
;
206 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
209 manager
->workers
= hashmap_free(manager
->workers
);
212 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
213 _cleanup_free_
struct worker
*worker
= NULL
;
218 assert(worker_monitor
);
221 worker
= new0(struct worker
, 1);
225 worker
->refcount
= 1;
226 worker
->manager
= manager
;
227 /* close monitor, but keep address around */
228 udev_monitor_disconnect(worker_monitor
);
229 worker
->monitor
= udev_monitor_ref(worker_monitor
);
232 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
236 r
= hashmap_put(manager
->workers
, PID_TO_PTR(pid
), worker
);
246 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
247 struct event
*event
= userdata
;
250 assert(event
->worker
);
252 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
253 event
->worker
->state
= WORKER_KILLED
;
255 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
260 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
261 struct event
*event
= userdata
;
265 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
270 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
275 assert(worker
->manager
);
277 assert(!event
->worker
);
278 assert(!worker
->event
);
280 worker
->state
= WORKER_RUNNING
;
281 worker
->event
= event
;
282 event
->state
= EVENT_RUNNING
;
283 event
->worker
= worker
;
285 e
= worker
->manager
->event
;
287 assert_se(sd_event_now(e
, clock_boottime_or_monotonic(), &usec
) >= 0);
289 (void) sd_event_add_time(e
, &event
->timeout_warning
, clock_boottime_or_monotonic(),
290 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
292 (void) sd_event_add_time(e
, &event
->timeout
, clock_boottime_or_monotonic(),
293 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
296 static void manager_free(Manager
*manager
) {
300 udev_builtin_exit(manager
->udev
);
302 sd_event_source_unref(manager
->ctrl_event
);
303 sd_event_source_unref(manager
->uevent_event
);
304 sd_event_source_unref(manager
->inotify_event
);
306 udev_unref(manager
->udev
);
307 sd_event_unref(manager
->event
);
308 manager_workers_free(manager
);
309 event_queue_cleanup(manager
, EVENT_UNDEF
);
311 udev_monitor_unref(manager
->monitor
);
312 udev_ctrl_unref(manager
->ctrl
);
313 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
315 udev_list_cleanup(&manager
->properties
);
316 udev_rules_unref(manager
->rules
);
318 safe_close(manager
->fd_inotify
);
319 safe_close_pair(manager
->worker_watch
);
324 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
326 static int worker_send_message(int fd
) {
327 struct worker_message message
= {};
329 return loop_write(fd
, &message
, sizeof(message
), false);
332 static void worker_spawn(Manager
*manager
, struct event
*event
) {
333 struct udev
*udev
= event
->udev
;
334 _cleanup_udev_monitor_unref_
struct udev_monitor
*worker_monitor
= NULL
;
338 /* listen for new events */
339 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
340 if (worker_monitor
== NULL
)
342 /* allow the main daemon netlink address to send devices to the worker */
343 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
344 r
= udev_monitor_enable_receiving(worker_monitor
);
346 log_error_errno(r
, "worker: could not enable receiving of device: %m");
351 struct udev_device
*dev
= NULL
;
352 _cleanup_(sd_netlink_unrefp
) sd_netlink
*rtnl
= NULL
;
354 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
355 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
356 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
359 /* take initial device from queue */
363 unsetenv("NOTIFY_SOCKET");
365 manager_workers_free(manager
);
366 event_queue_cleanup(manager
, EVENT_UNDEF
);
368 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
369 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
370 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
371 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
373 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
374 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
375 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
377 manager
->event
= sd_event_unref(manager
->event
);
380 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
382 r
= log_error_errno(errno
, "error creating signalfd %m");
385 ep_signal
.data
.fd
= fd_signal
;
387 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
388 ep_monitor
.data
.fd
= fd_monitor
;
390 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
392 r
= log_error_errno(errno
, "error creating epoll fd: %m");
396 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
397 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
398 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
402 /* Request TERM signal if parent exits.
403 Ignore error, not much we can do in that case. */
404 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
406 /* Reset OOM score, we only protect the main daemon. */
407 write_string_file("/proc/self/oom_score_adj", "0", 0);
410 struct udev_event
*udev_event
;
415 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
416 udev_event
= udev_event_new(dev
);
417 if (udev_event
== NULL
) {
422 if (arg_exec_delay
> 0)
423 udev_event
->exec_delay
= arg_exec_delay
;
426 * Take a shared lock on the device node; this establishes
427 * a concept of device "ownership" to serialize device
428 * access. External processes holding an exclusive lock will
429 * cause udev to skip the event handling; in the case udev
430 * acquired the lock, the external process can block until
431 * udev has finished its event handling.
433 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
434 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
435 !startswith(udev_device_get_sysname(dev
), "dm-") &&
436 !startswith(udev_device_get_sysname(dev
), "md")) {
437 struct udev_device
*d
= dev
;
439 if (streq_ptr("partition", udev_device_get_devtype(d
)))
440 d
= udev_device_get_parent(d
);
443 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
444 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
445 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
446 fd_lock
= safe_close(fd_lock
);
452 /* needed for renaming netifs */
453 udev_event
->rtnl
= rtnl
;
455 /* apply rules, create node, symlinks */
456 udev_event_execute_rules(udev_event
,
457 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
458 &manager
->properties
,
461 udev_event_execute_run(udev_event
,
462 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
464 if (udev_event
->rtnl
)
465 /* in case rtnl was initialized */
466 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
468 /* apply/restore inotify watch */
469 if (udev_event
->inotify_watch
) {
470 udev_watch_begin(udev
, dev
);
471 udev_device_update_db(dev
);
476 /* send processed event back to libudev listeners */
477 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
480 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
482 /* send udevd the result of the event execution */
483 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
485 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
486 udev_device_get_seqnum(dev
));
488 udev_device_unref(dev
);
491 udev_event_unref(udev_event
);
493 /* wait for more device messages from main udevd, or term signal */
494 while (dev
== NULL
) {
495 struct epoll_event ev
[4];
499 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
503 r
= log_error_errno(errno
, "failed to poll: %m");
507 for (i
= 0; i
< fdcount
; i
++) {
508 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
509 dev
= udev_monitor_receive_device(worker_monitor
);
511 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
512 struct signalfd_siginfo fdsi
;
515 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
516 if (size
!= sizeof(struct signalfd_siginfo
))
518 switch (fdsi
.ssi_signo
) {
527 udev_device_unref(dev
);
528 manager_free(manager
);
530 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
533 event
->state
= EVENT_QUEUED
;
534 log_error_errno(errno
, "fork of child failed: %m");
538 struct worker
*worker
;
540 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
544 worker_attach_event(worker
, event
);
546 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
552 static void event_run(Manager
*manager
, struct event
*event
) {
553 struct worker
*worker
;
559 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
562 if (worker
->state
!= WORKER_IDLE
)
565 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
567 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
569 kill(worker
->pid
, SIGKILL
);
570 worker
->state
= WORKER_KILLED
;
573 worker_attach_event(worker
, event
);
577 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
578 if (arg_children_max
> 1)
579 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
583 /* start new worker and pass initial device */
584 worker_spawn(manager
, event
);
587 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
594 /* only one process can add events to the queue */
595 if (manager
->pid
== 0)
596 manager
->pid
= getpid();
598 assert(manager
->pid
== getpid());
600 event
= new0(struct event
, 1);
604 event
->udev
= udev_device_get_udev(dev
);
605 event
->manager
= manager
;
607 event
->dev_kernel
= udev_device_shallow_clone(dev
);
608 udev_device_copy_properties(event
->dev_kernel
, dev
);
609 event
->seqnum
= udev_device_get_seqnum(dev
);
610 event
->devpath
= udev_device_get_devpath(dev
);
611 event
->devpath_len
= strlen(event
->devpath
);
612 event
->devpath_old
= udev_device_get_devpath_old(dev
);
613 event
->devnum
= udev_device_get_devnum(dev
);
614 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
615 event
->ifindex
= udev_device_get_ifindex(dev
);
617 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
618 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
620 event
->state
= EVENT_QUEUED
;
622 if (udev_list_node_is_empty(&manager
->events
)) {
623 r
= touch("/run/udev/queue");
625 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
628 udev_list_node_append(&event
->node
, &manager
->events
);
633 static void manager_kill_workers(Manager
*manager
) {
634 struct worker
*worker
;
639 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
640 if (worker
->state
== WORKER_KILLED
)
643 worker
->state
= WORKER_KILLED
;
644 kill(worker
->pid
, SIGTERM
);
648 /* lookup event for identical, parent, child device */
649 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
650 struct udev_list_node
*loop
;
653 /* check if queue contains events we depend on */
654 udev_list_node_foreach(loop
, &manager
->events
) {
655 struct event
*loop_event
= node_to_event(loop
);
657 /* we already found a later event, earlier can not block us, no need to check again */
658 if (loop_event
->seqnum
< event
->delaying_seqnum
)
661 /* event we checked earlier still exists, no need to check again */
662 if (loop_event
->seqnum
== event
->delaying_seqnum
)
665 /* found ourself, no later event can block us */
666 if (loop_event
->seqnum
>= event
->seqnum
)
669 /* check major/minor */
670 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
673 /* check network device ifindex */
674 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
677 /* check our old name */
678 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
679 event
->delaying_seqnum
= loop_event
->seqnum
;
683 /* compare devpath */
684 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
686 /* one devpath is contained in the other? */
687 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
690 /* identical device event found */
691 if (loop_event
->devpath_len
== event
->devpath_len
) {
692 /* devices names might have changed/swapped in the meantime */
693 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
695 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
697 event
->delaying_seqnum
= loop_event
->seqnum
;
701 /* parent device event found */
702 if (event
->devpath
[common
] == '/') {
703 event
->delaying_seqnum
= loop_event
->seqnum
;
707 /* child device event found */
708 if (loop_event
->devpath
[common
] == '/') {
709 event
->delaying_seqnum
= loop_event
->seqnum
;
713 /* no matching device */
720 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
721 Manager
*manager
= userdata
;
725 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
727 sd_event_exit(manager
->event
, -ETIMEDOUT
);
732 static void manager_exit(Manager
*manager
) {
738 manager
->exit
= true;
742 "STATUS=Starting shutdown...");
744 /* close sources of new events and discard buffered events */
745 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
746 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
748 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
749 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
751 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
752 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
754 /* discard queued events and kill workers */
755 event_queue_cleanup(manager
, EVENT_QUEUED
);
756 manager_kill_workers(manager
);
758 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
760 r
= sd_event_add_time(manager
->event
, NULL
, clock_boottime_or_monotonic(),
761 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
766 /* reload requested, HUP signal received, rules changed, builtin changed */
767 static void manager_reload(Manager
*manager
) {
773 "STATUS=Flushing configuration...");
775 manager_kill_workers(manager
);
776 manager
->rules
= udev_rules_unref(manager
->rules
);
777 udev_builtin_exit(manager
->udev
);
781 "STATUS=Processing with %u children at max", arg_children_max
);
784 static void event_queue_start(Manager
*manager
) {
785 struct udev_list_node
*loop
;
790 if (udev_list_node_is_empty(&manager
->events
) ||
791 manager
->exit
|| manager
->stop_exec_queue
)
794 assert_se(sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
) >= 0);
795 /* check for changed config, every 3 seconds at most */
796 if (manager
->last_usec
== 0 ||
797 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
798 if (udev_rules_check_timestamp(manager
->rules
) ||
799 udev_builtin_validate(manager
->udev
))
800 manager_reload(manager
);
802 manager
->last_usec
= usec
;
805 udev_builtin_init(manager
->udev
);
807 if (!manager
->rules
) {
808 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
813 udev_list_node_foreach(loop
, &manager
->events
) {
814 struct event
*event
= node_to_event(loop
);
816 if (event
->state
!= EVENT_QUEUED
)
819 /* do not start event if parent or child event is still running */
820 if (is_devpath_busy(manager
, event
))
823 event_run(manager
, event
);
827 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
828 struct udev_list_node
*loop
, *tmp
;
830 udev_list_node_foreach_safe(loop
, tmp
, &manager
->events
) {
831 struct event
*event
= node_to_event(loop
);
833 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
840 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
841 Manager
*manager
= userdata
;
846 struct worker_message msg
;
847 struct iovec iovec
= {
849 .iov_len
= sizeof(msg
),
852 struct cmsghdr cmsghdr
;
853 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
855 struct msghdr msghdr
= {
858 .msg_control
= &control
,
859 .msg_controllen
= sizeof(control
),
861 struct cmsghdr
*cmsg
;
863 struct ucred
*ucred
= NULL
;
864 struct worker
*worker
;
866 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
870 else if (errno
== EAGAIN
)
871 /* nothing more to read */
874 return log_error_errno(errno
, "failed to receive message: %m");
875 } else if (size
!= sizeof(struct worker_message
)) {
876 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
880 CMSG_FOREACH(cmsg
, &msghdr
) {
881 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
882 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
883 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
884 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
887 if (!ucred
|| ucred
->pid
<= 0) {
888 log_warning_errno(EIO
, "ignoring worker message without valid PID");
892 /* lookup worker who sent the signal */
893 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
895 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
899 if (worker
->state
!= WORKER_KILLED
)
900 worker
->state
= WORKER_IDLE
;
902 /* worker returned */
903 event_free(worker
->event
);
906 /* we have free workers, try to schedule events */
907 event_queue_start(manager
);
912 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
913 Manager
*manager
= userdata
;
914 struct udev_device
*dev
;
919 dev
= udev_monitor_receive_device(manager
->monitor
);
921 udev_device_ensure_usec_initialized(dev
, NULL
);
922 r
= event_queue_insert(manager
, dev
);
924 udev_device_unref(dev
);
926 /* we have fresh events, try to schedule them */
927 event_queue_start(manager
);
933 /* receive the udevd message from userspace */
934 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
935 Manager
*manager
= userdata
;
936 _cleanup_udev_ctrl_connection_unref_
struct udev_ctrl_connection
*ctrl_conn
= NULL
;
937 _cleanup_udev_ctrl_msg_unref_
struct udev_ctrl_msg
*ctrl_msg
= NULL
;
943 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
947 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
951 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
953 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
954 log_set_max_level(i
);
955 manager_kill_workers(manager
);
958 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
959 log_debug("udevd message (STOP_EXEC_QUEUE) received");
960 manager
->stop_exec_queue
= true;
963 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
964 log_debug("udevd message (START_EXEC_QUEUE) received");
965 manager
->stop_exec_queue
= false;
966 event_queue_start(manager
);
969 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
970 log_debug("udevd message (RELOAD) received");
971 manager_reload(manager
);
974 str
= udev_ctrl_get_set_env(ctrl_msg
);
976 _cleanup_free_
char *key
= NULL
;
982 val
= strchr(key
, '=');
986 if (val
[0] == '\0') {
987 log_debug("udevd message (ENV) received, unset '%s'", key
);
988 udev_list_entry_add(&manager
->properties
, key
, NULL
);
990 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
991 udev_list_entry_add(&manager
->properties
, key
, val
);
994 log_error("wrong key format '%s'", key
);
996 manager_kill_workers(manager
);
999 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
1001 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
1002 arg_children_max
= i
;
1004 (void) sd_notifyf(false,
1006 "STATUS=Processing with %u children at max", arg_children_max
);
1009 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1010 log_debug("udevd message (SYNC) received");
1012 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1013 log_debug("udevd message (EXIT) received");
1014 manager_exit(manager
);
1015 /* keep reference to block the client until we exit
1016 TODO: deal with several blocking exit requests */
1017 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1023 static int synthesize_change(struct udev_device
*dev
) {
1024 char filename
[UTIL_PATH_SIZE
];
1027 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1028 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1029 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1030 bool part_table_read
= false;
1031 bool has_partitions
= false;
1033 struct udev
*udev
= udev_device_get_udev(dev
);
1034 _cleanup_udev_enumerate_unref_
struct udev_enumerate
*e
= NULL
;
1035 struct udev_list_entry
*item
;
1038 * Try to re-read the partition table. This only succeeds if
1039 * none of the devices is busy. The kernel returns 0 if no
1040 * partition table is found, and we will not get an event for
1043 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1045 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1047 r
= ioctl(fd
, BLKRRPART
, 0);
1051 part_table_read
= true;
1054 /* search for partitions */
1055 e
= udev_enumerate_new(udev
);
1059 r
= udev_enumerate_add_match_parent(e
, dev
);
1063 r
= udev_enumerate_add_match_subsystem(e
, "block");
1067 r
= udev_enumerate_scan_devices(e
);
1071 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1072 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1074 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1078 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1081 has_partitions
= true;
1086 * We have partitions and re-read the table, the kernel already sent
1087 * out a "change" event for the disk, and "remove/add" for all
1090 if (part_table_read
&& has_partitions
)
1094 * We have partitions but re-reading the partition table did not
1095 * work, synthesize "change" for the disk and all partitions.
1097 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1098 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1099 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1101 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1102 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1104 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1108 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1111 log_debug("device %s closed, synthesising partition '%s' 'change'",
1112 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1113 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1114 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1120 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1121 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1122 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1127 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1128 Manager
*manager
= userdata
;
1129 union inotify_event_buffer buffer
;
1130 struct inotify_event
*e
;
1135 l
= read(fd
, &buffer
, sizeof(buffer
));
1137 if (errno
== EAGAIN
|| errno
== EINTR
)
1140 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1143 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1144 _cleanup_udev_device_unref_
struct udev_device
*dev
= NULL
;
1146 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1150 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1151 if (e
->mask
& IN_CLOSE_WRITE
) {
1152 synthesize_change(dev
);
1154 /* settle might be waiting on us to determine the queue
1155 * state. If we just handled an inotify event, we might have
1156 * generated a "change" event, but we won't have queued up
1157 * the resultant uevent yet. Do that.
1159 on_uevent(NULL
, -1, 0, manager
);
1160 } else if (e
->mask
& IN_IGNORED
)
1161 udev_watch_end(manager
->udev
, dev
);
1167 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1168 Manager
*manager
= userdata
;
1172 manager_exit(manager
);
1177 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1178 Manager
*manager
= userdata
;
1182 manager_reload(manager
);
1187 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1188 Manager
*manager
= userdata
;
1195 struct worker
*worker
;
1197 pid
= waitpid(-1, &status
, WNOHANG
);
1201 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1203 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1207 if (WIFEXITED(status
)) {
1208 if (WEXITSTATUS(status
) == 0)
1209 log_debug("worker ["PID_FMT
"] exited", pid
);
1211 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1212 } else if (WIFSIGNALED(status
)) {
1213 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), strsignal(WTERMSIG(status
)));
1214 } else if (WIFSTOPPED(status
)) {
1215 log_info("worker ["PID_FMT
"] stopped", pid
);
1217 } else if (WIFCONTINUED(status
)) {
1218 log_info("worker ["PID_FMT
"] continued", pid
);
1221 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1223 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1224 if (worker
->event
) {
1225 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1226 /* delete state from disk */
1227 udev_device_delete_db(worker
->event
->dev
);
1228 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1229 /* forward kernel event without amending it */
1230 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1234 worker_free(worker
);
1237 /* we can start new workers, try to schedule events */
1238 event_queue_start(manager
);
1243 static int on_post(sd_event_source
*s
, void *userdata
) {
1244 Manager
*manager
= userdata
;
1249 if (udev_list_node_is_empty(&manager
->events
)) {
1250 /* no pending events */
1251 if (!hashmap_isempty(manager
->workers
)) {
1252 /* there are idle workers */
1253 log_debug("cleanup idle workers");
1254 manager_kill_workers(manager
);
1257 if (manager
->exit
) {
1258 r
= sd_event_exit(manager
->event
, 0);
1261 } else if (manager
->cgroup
)
1262 /* cleanup possible left-over processes in our cgroup */
1263 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, CGROUP_IGNORE_SELF
, NULL
, NULL
, NULL
);
1270 static int listen_fds(int *rctrl
, int *rnetlink
) {
1271 _cleanup_udev_unref_
struct udev
*udev
= NULL
;
1272 int ctrl_fd
= -1, netlink_fd
= -1;
1278 n
= sd_listen_fds(true);
1282 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1283 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1290 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1291 if (netlink_fd
>= 0)
1301 _cleanup_udev_ctrl_unref_
struct udev_ctrl
*ctrl
= NULL
;
1307 ctrl
= udev_ctrl_new(udev
);
1309 return log_error_errno(EINVAL
, "error initializing udev control socket");
1311 r
= udev_ctrl_enable_receiving(ctrl
);
1313 return log_error_errno(EINVAL
, "error binding udev control socket");
1315 fd
= udev_ctrl_get_fd(ctrl
);
1317 return log_error_errno(EIO
, "could not get ctrl fd");
1319 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1321 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1324 if (netlink_fd
< 0) {
1325 _cleanup_udev_monitor_unref_
struct udev_monitor
*monitor
= NULL
;
1333 monitor
= udev_monitor_new_from_netlink(udev
, "kernel");
1335 return log_error_errno(EINVAL
, "error initializing netlink socket");
1337 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1339 r
= udev_monitor_enable_receiving(monitor
);
1341 return log_error_errno(EINVAL
, "error binding netlink socket");
1343 fd
= udev_monitor_get_fd(monitor
);
1345 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1347 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1349 return log_error_errno(errno
, "could not dup netlink fd: %m");
1353 *rnetlink
= netlink_fd
;
1359 * read the kernel command line, in case we need to get into debug mode
1360 * udev.log-priority=<level> syslog priority
1361 * udev.children-max=<number of workers> events are fully serialized if set to 1
1362 * udev.exec-delay=<number of seconds> delay execution of every executed program
1363 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
1365 static int parse_proc_cmdline_item(const char *key
, const char *value
, void *data
) {
1373 if (streq(key
, "udev.log-priority") && value
) {
1374 r
= util_log_priority(value
);
1376 log_set_max_level(r
);
1377 } else if (streq(key
, "udev.event-timeout") && value
) {
1378 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1380 arg_event_timeout_usec
*= USEC_PER_SEC
;
1381 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1383 } else if (streq(key
, "udev.children-max") && value
)
1384 r
= safe_atou(value
, &arg_children_max
);
1385 else if (streq(key
, "udev.exec-delay") && value
)
1386 r
= safe_atoi(value
, &arg_exec_delay
);
1387 else if (startswith(key
, "udev."))
1388 log_warning("Unknown udev kernel command line option \"%s\"", key
);
1391 log_warning_errno(r
, "Failed to parse \"%s=%s\", ignoring: %m", key
, value
);
1395 static void help(void) {
1396 printf("%s [OPTIONS...]\n\n"
1397 "Manages devices.\n\n"
1398 " -h --help Print this message\n"
1399 " --version Print version of the program\n"
1400 " --daemon Detach and run in the background\n"
1401 " --debug Enable debug output\n"
1402 " --children-max=INT Set maximum number of workers\n"
1403 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1404 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1405 " --resolve-names=early|late|never\n"
1406 " When to resolve users and groups\n"
1407 , program_invocation_short_name
);
1410 static int parse_argv(int argc
, char *argv
[]) {
1411 static const struct option options
[] = {
1412 { "daemon", no_argument
, NULL
, 'd' },
1413 { "debug", no_argument
, NULL
, 'D' },
1414 { "children-max", required_argument
, NULL
, 'c' },
1415 { "exec-delay", required_argument
, NULL
, 'e' },
1416 { "event-timeout", required_argument
, NULL
, 't' },
1417 { "resolve-names", required_argument
, NULL
, 'N' },
1418 { "help", no_argument
, NULL
, 'h' },
1419 { "version", no_argument
, NULL
, 'V' },
1428 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1434 arg_daemonize
= true;
1437 r
= safe_atou(optarg
, &arg_children_max
);
1439 log_warning("Invalid --children-max ignored: %s", optarg
);
1442 r
= safe_atoi(optarg
, &arg_exec_delay
);
1444 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1447 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1449 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1451 arg_event_timeout_usec
*= USEC_PER_SEC
;
1452 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1459 if (streq(optarg
, "early")) {
1460 arg_resolve_names
= 1;
1461 } else if (streq(optarg
, "late")) {
1462 arg_resolve_names
= 0;
1463 } else if (streq(optarg
, "never")) {
1464 arg_resolve_names
= -1;
1466 log_error("resolve-names must be early, late or never");
1474 printf("%s\n", VERSION
);
1479 assert_not_reached("Unhandled option");
1487 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1488 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1489 int r
, fd_worker
, one
= 1;
1492 assert(fd_ctrl
>= 0);
1493 assert(fd_uevent
>= 0);
1495 manager
= new0(Manager
, 1);
1499 manager
->fd_inotify
= -1;
1500 manager
->worker_watch
[WRITE_END
] = -1;
1501 manager
->worker_watch
[READ_END
] = -1;
1503 manager
->udev
= udev_new();
1505 return log_error_errno(errno
, "could not allocate udev context: %m");
1507 udev_builtin_init(manager
->udev
);
1509 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1510 if (!manager
->rules
)
1511 return log_error_errno(ENOMEM
, "error reading rules");
1513 udev_list_node_init(&manager
->events
);
1514 udev_list_init(manager
->udev
, &manager
->properties
, true);
1516 manager
->cgroup
= cgroup
;
1518 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, fd_ctrl
);
1520 return log_error_errno(EINVAL
, "error taking over udev control socket");
1522 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", fd_uevent
);
1523 if (!manager
->monitor
)
1524 return log_error_errno(EINVAL
, "error taking over netlink socket");
1526 /* unnamed socket from workers to the main daemon */
1527 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1529 return log_error_errno(errno
, "error creating socketpair: %m");
1531 fd_worker
= manager
->worker_watch
[READ_END
];
1533 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1535 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1537 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1538 if (manager
->fd_inotify
< 0)
1539 return log_error_errno(ENOMEM
, "error initializing inotify");
1541 udev_watch_restore(manager
->udev
);
1543 /* block and listen to all signals on signalfd */
1544 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1546 r
= sd_event_default(&manager
->event
);
1548 return log_error_errno(r
, "could not allocate event loop: %m");
1550 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1552 return log_error_errno(r
, "error creating sigint event source: %m");
1554 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1556 return log_error_errno(r
, "error creating sigterm event source: %m");
1558 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1560 return log_error_errno(r
, "error creating sighup event source: %m");
1562 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1564 return log_error_errno(r
, "error creating sigchld event source: %m");
1566 r
= sd_event_set_watchdog(manager
->event
, true);
1568 return log_error_errno(r
, "error creating watchdog event source: %m");
1570 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1572 return log_error_errno(r
, "error creating ctrl event source: %m");
1574 /* This needs to be after the inotify and uevent handling, to make sure
1575 * that the ping is send back after fully processing the pending uevents
1576 * (including the synthetic ones we may create due to inotify events).
1578 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1580 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1582 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1584 return log_error_errno(r
, "error creating inotify event source: %m");
1586 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1588 return log_error_errno(r
, "error creating uevent event source: %m");
1590 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1592 return log_error_errno(r
, "error creating worker event source: %m");
1594 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1596 return log_error_errno(r
, "error creating post event source: %m");
1604 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1605 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1608 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1610 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1614 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1616 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1618 (void) sd_notifyf(false,
1620 "STATUS=Processing with %u children at max", arg_children_max
);
1622 r
= sd_event_loop(manager
->event
);
1624 log_error_errno(r
, "event loop failed: %m");
1628 sd_event_get_exit_code(manager
->event
, &r
);
1633 "STATUS=Shutting down...");
1635 udev_ctrl_cleanup(manager
->ctrl
);
1639 int main(int argc
, char *argv
[]) {
1640 _cleanup_free_
char *cgroup
= NULL
;
1641 int fd_ctrl
= -1, fd_uevent
= -1;
1644 log_set_target(LOG_TARGET_AUTO
);
1645 log_parse_environment();
1648 r
= parse_argv(argc
, argv
);
1652 r
= parse_proc_cmdline(parse_proc_cmdline_item
, NULL
, true);
1654 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1657 log_set_target(LOG_TARGET_CONSOLE
);
1658 log_set_max_level(LOG_DEBUG
);
1661 if (getuid() != 0) {
1662 r
= log_error_errno(EPERM
, "root privileges required");
1666 if (arg_children_max
== 0) {
1669 arg_children_max
= 8;
1671 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1672 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1674 log_debug("set children_max to %u", arg_children_max
);
1677 /* set umask before creating any file/directory */
1680 r
= log_error_errno(errno
, "could not change dir to /: %m");
1686 r
= mac_selinux_init();
1688 log_error_errno(r
, "could not initialize labelling: %m");
1692 r
= mkdir("/run/udev", 0755);
1693 if (r
< 0 && errno
!= EEXIST
) {
1694 r
= log_error_errno(errno
, "could not create /run/udev: %m");
1698 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1700 if (getppid() == 1) {
1701 /* get our own cgroup, we regularly kill everything udev has left behind
1702 we only do this on systemd systems, and only if we are directly spawned
1703 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1704 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1706 if (r
== -ENOENT
|| r
== -ENOMEDIUM
)
1707 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1709 log_warning_errno(r
, "failed to get cgroup: %m");
1713 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1715 r
= log_error_errno(r
, "could not listen on fds: %m");
1719 if (arg_daemonize
) {
1722 log_info("starting version " VERSION
);
1724 /* connect /dev/null to stdin, stdout, stderr */
1725 if (log_get_max_level() < LOG_DEBUG
) {
1726 r
= make_null_stdio();
1728 log_warning_errno(r
, "Failed to redirect standard streams to /dev/null: %m");
1738 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1741 mac_selinux_finish();
1743 _exit(EXIT_SUCCESS
);
1748 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1751 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1754 mac_selinux_finish();
1756 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;