1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
4 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
5 * Copyright (C) 2009 Canonical Ltd.
6 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
31 #include <sys/epoll.h>
33 #include <sys/inotify.h>
34 #include <sys/ioctl.h>
35 #include <sys/mount.h>
36 #include <sys/prctl.h>
37 #include <sys/signalfd.h>
38 #include <sys/socket.h>
44 #include "sd-daemon.h"
47 #include "alloc-util.h"
48 #include "cgroup-util.h"
49 #include "cpu-set-util.h"
50 #include "dev-setup.h"
53 #include "format-util.h"
58 #include "netlink-util.h"
59 #include "parse-util.h"
60 #include "proc-cmdline.h"
61 #include "process-util.h"
62 #include "selinux-util.h"
63 #include "signal-util.h"
64 #include "socket-util.h"
65 #include "string-util.h"
66 #include "terminal-util.h"
67 #include "udev-util.h"
69 #include "user-util.h"
71 static bool arg_debug
= false;
72 static int arg_daemonize
= false;
73 static int arg_resolve_names
= 1;
74 static unsigned arg_children_max
;
75 static int arg_exec_delay
;
76 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
77 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
79 typedef struct Manager
{
83 LIST_HEAD(struct event
, events
);
85 pid_t pid
; /* the process that originally allocated the manager object */
87 struct udev_rules
*rules
;
88 struct udev_list properties
;
90 struct udev_monitor
*monitor
;
91 struct udev_ctrl
*ctrl
;
92 struct udev_ctrl_connection
*ctrl_conn_blocking
;
96 sd_event_source
*ctrl_event
;
97 sd_event_source
*uevent_event
;
98 sd_event_source
*inotify_event
;
102 bool stop_exec_queue
:1;
113 LIST_FIELDS(struct event
, event
);
116 struct udev_device
*dev
;
117 struct udev_device
*dev_kernel
;
118 struct worker
*worker
;
119 enum event_state state
;
120 unsigned long long int delaying_seqnum
;
121 unsigned long long int seqnum
;
124 const char *devpath_old
;
128 sd_event_source
*timeout_warning
;
129 sd_event_source
*timeout
;
132 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
145 struct udev_monitor
*monitor
;
146 enum worker_state state
;
150 /* passed from worker to main process */
151 struct worker_message
{
154 static void event_free(struct event
*event
) {
159 assert(event
->manager
);
161 LIST_REMOVE(event
, event
->manager
->events
, event
);
162 udev_device_unref(event
->dev
);
163 udev_device_unref(event
->dev_kernel
);
165 sd_event_source_unref(event
->timeout_warning
);
166 sd_event_source_unref(event
->timeout
);
169 event
->worker
->event
= NULL
;
171 if (LIST_IS_EMPTY(event
->manager
->events
)) {
172 /* only clean up the queue from the process that created it */
173 if (event
->manager
->pid
== getpid_cached()) {
174 r
= unlink("/run/udev/queue");
176 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
183 static void worker_free(struct worker
*worker
) {
187 assert(worker
->manager
);
189 hashmap_remove(worker
->manager
->workers
, PID_TO_PTR(worker
->pid
));
190 udev_monitor_unref(worker
->monitor
);
191 event_free(worker
->event
);
196 static void manager_workers_free(Manager
*manager
) {
197 struct worker
*worker
;
202 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
205 manager
->workers
= hashmap_free(manager
->workers
);
208 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
209 _cleanup_free_
struct worker
*worker
= NULL
;
214 assert(worker_monitor
);
217 worker
= new0(struct worker
, 1);
221 worker
->refcount
= 1;
222 worker
->manager
= manager
;
223 /* close monitor, but keep address around */
224 udev_monitor_disconnect(worker_monitor
);
225 worker
->monitor
= udev_monitor_ref(worker_monitor
);
228 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
232 r
= hashmap_put(manager
->workers
, PID_TO_PTR(pid
), worker
);
236 *ret
= TAKE_PTR(worker
);
241 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
242 struct event
*event
= userdata
;
245 assert(event
->worker
);
247 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
248 event
->worker
->state
= WORKER_KILLED
;
250 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
255 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
256 struct event
*event
= userdata
;
260 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
265 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
270 assert(worker
->manager
);
272 assert(!event
->worker
);
273 assert(!worker
->event
);
275 worker
->state
= WORKER_RUNNING
;
276 worker
->event
= event
;
277 event
->state
= EVENT_RUNNING
;
278 event
->worker
= worker
;
280 e
= worker
->manager
->event
;
282 assert_se(sd_event_now(e
, CLOCK_MONOTONIC
, &usec
) >= 0);
284 (void) sd_event_add_time(e
, &event
->timeout_warning
, CLOCK_MONOTONIC
,
285 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
287 (void) sd_event_add_time(e
, &event
->timeout
, CLOCK_MONOTONIC
,
288 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
291 static void manager_free(Manager
*manager
) {
295 udev_builtin_exit(manager
->udev
);
297 sd_event_source_unref(manager
->ctrl_event
);
298 sd_event_source_unref(manager
->uevent_event
);
299 sd_event_source_unref(manager
->inotify_event
);
301 udev_unref(manager
->udev
);
302 sd_event_unref(manager
->event
);
303 manager_workers_free(manager
);
304 event_queue_cleanup(manager
, EVENT_UNDEF
);
306 udev_monitor_unref(manager
->monitor
);
307 udev_ctrl_unref(manager
->ctrl
);
308 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
310 udev_list_cleanup(&manager
->properties
);
311 udev_rules_unref(manager
->rules
);
313 safe_close(manager
->fd_inotify
);
314 safe_close_pair(manager
->worker_watch
);
319 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
321 static int worker_send_message(int fd
) {
322 struct worker_message message
= {};
324 return loop_write(fd
, &message
, sizeof(message
), false);
327 static void worker_spawn(Manager
*manager
, struct event
*event
) {
328 struct udev
*udev
= event
->udev
;
329 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*worker_monitor
= NULL
;
333 /* listen for new events */
334 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
335 if (worker_monitor
== NULL
)
337 /* allow the main daemon netlink address to send devices to the worker */
338 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
339 r
= udev_monitor_enable_receiving(worker_monitor
);
341 log_error_errno(r
, "worker: could not enable receiving of device: %m");
346 struct udev_device
*dev
= NULL
;
347 _cleanup_(sd_netlink_unrefp
) sd_netlink
*rtnl
= NULL
;
349 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
350 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
351 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
354 /* take initial device from queue */
355 dev
= TAKE_PTR(event
->dev
);
357 unsetenv("NOTIFY_SOCKET");
359 manager_workers_free(manager
);
360 event_queue_cleanup(manager
, EVENT_UNDEF
);
362 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
363 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
364 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
365 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
367 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
368 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
369 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
371 manager
->event
= sd_event_unref(manager
->event
);
374 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
376 r
= log_error_errno(errno
, "error creating signalfd %m");
379 ep_signal
.data
.fd
= fd_signal
;
381 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
382 ep_monitor
.data
.fd
= fd_monitor
;
384 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
386 r
= log_error_errno(errno
, "error creating epoll fd: %m");
390 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
391 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
392 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
396 /* Request TERM signal if parent exits.
397 Ignore error, not much we can do in that case. */
398 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
400 /* Reset OOM score, we only protect the main daemon. */
401 write_string_file("/proc/self/oom_score_adj", "0", 0);
404 struct udev_event
*udev_event
;
409 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
410 udev_event
= udev_event_new(dev
);
411 if (udev_event
== NULL
) {
416 if (arg_exec_delay
> 0)
417 udev_event
->exec_delay
= arg_exec_delay
;
420 * Take a shared lock on the device node; this establishes
421 * a concept of device "ownership" to serialize device
422 * access. External processes holding an exclusive lock will
423 * cause udev to skip the event handling; in the case udev
424 * acquired the lock, the external process can block until
425 * udev has finished its event handling.
427 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
428 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
429 !startswith(udev_device_get_sysname(dev
), "dm-") &&
430 !startswith(udev_device_get_sysname(dev
), "md")) {
431 struct udev_device
*d
= dev
;
433 if (streq_ptr("partition", udev_device_get_devtype(d
)))
434 d
= udev_device_get_parent(d
);
437 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
438 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
439 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
440 fd_lock
= safe_close(fd_lock
);
446 /* needed for renaming netifs */
447 udev_event
->rtnl
= rtnl
;
449 /* apply rules, create node, symlinks */
450 udev_event_execute_rules(udev_event
,
451 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
452 &manager
->properties
,
455 udev_event_execute_run(udev_event
,
456 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
458 if (udev_event
->rtnl
)
459 /* in case rtnl was initialized */
460 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
462 /* apply/restore inotify watch */
463 if (udev_event
->inotify_watch
) {
464 udev_watch_begin(udev
, dev
);
465 udev_device_update_db(dev
);
470 /* send processed event back to libudev listeners */
471 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
474 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
476 /* send udevd the result of the event execution */
477 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
479 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
480 udev_device_get_seqnum(dev
));
482 udev_device_unref(dev
);
485 udev_event_unref(udev_event
);
487 /* wait for more device messages from main udevd, or term signal */
488 while (dev
== NULL
) {
489 struct epoll_event ev
[4];
493 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
497 r
= log_error_errno(errno
, "failed to poll: %m");
501 for (i
= 0; i
< fdcount
; i
++) {
502 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
503 dev
= udev_monitor_receive_device(worker_monitor
);
505 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
506 struct signalfd_siginfo fdsi
;
509 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
510 if (size
!= sizeof(struct signalfd_siginfo
))
512 switch (fdsi
.ssi_signo
) {
521 udev_device_unref(dev
);
522 manager_free(manager
);
524 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
527 event
->state
= EVENT_QUEUED
;
528 log_error_errno(errno
, "fork of child failed: %m");
532 struct worker
*worker
;
534 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
538 worker_attach_event(worker
, event
);
540 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
546 static void event_run(Manager
*manager
, struct event
*event
) {
547 struct worker
*worker
;
553 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
556 if (worker
->state
!= WORKER_IDLE
)
559 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
561 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
563 kill(worker
->pid
, SIGKILL
);
564 worker
->state
= WORKER_KILLED
;
567 worker_attach_event(worker
, event
);
571 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
572 if (arg_children_max
> 1)
573 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
577 /* start new worker and pass initial device */
578 worker_spawn(manager
, event
);
581 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
588 /* only one process can add events to the queue */
589 if (manager
->pid
== 0)
590 manager
->pid
= getpid_cached();
592 assert(manager
->pid
== getpid_cached());
594 event
= new0(struct event
, 1);
598 event
->udev
= udev_device_get_udev(dev
);
599 event
->manager
= manager
;
601 event
->dev_kernel
= udev_device_shallow_clone(dev
);
602 udev_device_copy_properties(event
->dev_kernel
, dev
);
603 event
->seqnum
= udev_device_get_seqnum(dev
);
604 event
->devpath
= udev_device_get_devpath(dev
);
605 event
->devpath_len
= strlen(event
->devpath
);
606 event
->devpath_old
= udev_device_get_devpath_old(dev
);
607 event
->devnum
= udev_device_get_devnum(dev
);
608 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
609 event
->ifindex
= udev_device_get_ifindex(dev
);
611 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
612 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
614 event
->state
= EVENT_QUEUED
;
616 if (LIST_IS_EMPTY(manager
->events
)) {
617 r
= touch("/run/udev/queue");
619 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
622 LIST_APPEND(event
, manager
->events
, event
);
627 static void manager_kill_workers(Manager
*manager
) {
628 struct worker
*worker
;
633 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
634 if (worker
->state
== WORKER_KILLED
)
637 worker
->state
= WORKER_KILLED
;
638 kill(worker
->pid
, SIGTERM
);
642 /* lookup event for identical, parent, child device */
643 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
644 struct event
*loop_event
;
647 /* check if queue contains events we depend on */
648 LIST_FOREACH(event
, loop_event
, manager
->events
) {
649 /* we already found a later event, earlier cannot block us, no need to check again */
650 if (loop_event
->seqnum
< event
->delaying_seqnum
)
653 /* event we checked earlier still exists, no need to check again */
654 if (loop_event
->seqnum
== event
->delaying_seqnum
)
657 /* found ourself, no later event can block us */
658 if (loop_event
->seqnum
>= event
->seqnum
)
661 /* check major/minor */
662 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
665 /* check network device ifindex */
666 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
669 /* check our old name */
670 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
671 event
->delaying_seqnum
= loop_event
->seqnum
;
675 /* compare devpath */
676 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
678 /* one devpath is contained in the other? */
679 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
682 /* identical device event found */
683 if (loop_event
->devpath_len
== event
->devpath_len
) {
684 /* devices names might have changed/swapped in the meantime */
685 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
687 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
689 event
->delaying_seqnum
= loop_event
->seqnum
;
693 /* parent device event found */
694 if (event
->devpath
[common
] == '/') {
695 event
->delaying_seqnum
= loop_event
->seqnum
;
699 /* child device event found */
700 if (loop_event
->devpath
[common
] == '/') {
701 event
->delaying_seqnum
= loop_event
->seqnum
;
705 /* no matching device */
712 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
713 Manager
*manager
= userdata
;
717 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
719 sd_event_exit(manager
->event
, -ETIMEDOUT
);
724 static void manager_exit(Manager
*manager
) {
730 manager
->exit
= true;
734 "STATUS=Starting shutdown...");
736 /* close sources of new events and discard buffered events */
737 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
738 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
740 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
741 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
743 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
744 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
746 /* discard queued events and kill workers */
747 event_queue_cleanup(manager
, EVENT_QUEUED
);
748 manager_kill_workers(manager
);
750 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
752 r
= sd_event_add_time(manager
->event
, NULL
, CLOCK_MONOTONIC
,
753 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
758 /* reload requested, HUP signal received, rules changed, builtin changed */
759 static void manager_reload(Manager
*manager
) {
765 "STATUS=Flushing configuration...");
767 manager_kill_workers(manager
);
768 manager
->rules
= udev_rules_unref(manager
->rules
);
769 udev_builtin_exit(manager
->udev
);
773 "STATUS=Processing with %u children at max", arg_children_max
);
776 static void event_queue_start(Manager
*manager
) {
782 if (LIST_IS_EMPTY(manager
->events
) ||
783 manager
->exit
|| manager
->stop_exec_queue
)
786 assert_se(sd_event_now(manager
->event
, CLOCK_MONOTONIC
, &usec
) >= 0);
787 /* check for changed config, every 3 seconds at most */
788 if (manager
->last_usec
== 0 ||
789 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
790 if (udev_rules_check_timestamp(manager
->rules
) ||
791 udev_builtin_validate(manager
->udev
))
792 manager_reload(manager
);
794 manager
->last_usec
= usec
;
797 udev_builtin_init(manager
->udev
);
799 if (!manager
->rules
) {
800 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
805 LIST_FOREACH(event
,event
,manager
->events
) {
806 if (event
->state
!= EVENT_QUEUED
)
809 /* do not start event if parent or child event is still running */
810 if (is_devpath_busy(manager
, event
))
813 event_run(manager
, event
);
817 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
818 struct event
*event
, *tmp
;
820 LIST_FOREACH_SAFE(event
, event
, tmp
, manager
->events
) {
821 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
828 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
829 Manager
*manager
= userdata
;
834 struct worker_message msg
;
835 struct iovec iovec
= {
837 .iov_len
= sizeof(msg
),
840 struct cmsghdr cmsghdr
;
841 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
843 struct msghdr msghdr
= {
846 .msg_control
= &control
,
847 .msg_controllen
= sizeof(control
),
849 struct cmsghdr
*cmsg
;
851 struct ucred
*ucred
= NULL
;
852 struct worker
*worker
;
854 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
858 else if (errno
== EAGAIN
)
859 /* nothing more to read */
862 return log_error_errno(errno
, "failed to receive message: %m");
863 } else if (size
!= sizeof(struct worker_message
)) {
864 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
868 CMSG_FOREACH(cmsg
, &msghdr
) {
869 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
870 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
871 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
872 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
875 if (!ucred
|| ucred
->pid
<= 0) {
876 log_warning_errno(EIO
, "ignoring worker message without valid PID");
880 /* lookup worker who sent the signal */
881 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(ucred
->pid
));
883 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
887 if (worker
->state
!= WORKER_KILLED
)
888 worker
->state
= WORKER_IDLE
;
890 /* worker returned */
891 event_free(worker
->event
);
894 /* we have free workers, try to schedule events */
895 event_queue_start(manager
);
900 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
901 Manager
*manager
= userdata
;
902 struct udev_device
*dev
;
907 dev
= udev_monitor_receive_device(manager
->monitor
);
909 udev_device_ensure_usec_initialized(dev
, NULL
);
910 r
= event_queue_insert(manager
, dev
);
912 udev_device_unref(dev
);
914 /* we have fresh events, try to schedule them */
915 event_queue_start(manager
);
921 /* receive the udevd message from userspace */
922 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
923 Manager
*manager
= userdata
;
924 _cleanup_(udev_ctrl_connection_unrefp
) struct udev_ctrl_connection
*ctrl_conn
= NULL
;
925 _cleanup_(udev_ctrl_msg_unrefp
) struct udev_ctrl_msg
*ctrl_msg
= NULL
;
931 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
935 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
939 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
941 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
942 log_set_max_level(i
);
943 manager_kill_workers(manager
);
946 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
947 log_debug("udevd message (STOP_EXEC_QUEUE) received");
948 manager
->stop_exec_queue
= true;
951 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
952 log_debug("udevd message (START_EXEC_QUEUE) received");
953 manager
->stop_exec_queue
= false;
954 event_queue_start(manager
);
957 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
958 log_debug("udevd message (RELOAD) received");
959 manager_reload(manager
);
962 str
= udev_ctrl_get_set_env(ctrl_msg
);
964 _cleanup_free_
char *key
= NULL
;
970 val
= strchr(key
, '=');
974 if (val
[0] == '\0') {
975 log_debug("udevd message (ENV) received, unset '%s'", key
);
976 udev_list_entry_add(&manager
->properties
, key
, NULL
);
978 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
979 udev_list_entry_add(&manager
->properties
, key
, val
);
982 log_error("wrong key format '%s'", key
);
984 manager_kill_workers(manager
);
987 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
989 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
990 arg_children_max
= i
;
992 (void) sd_notifyf(false,
994 "STATUS=Processing with %u children at max", arg_children_max
);
997 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
998 log_debug("udevd message (SYNC) received");
1000 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1001 log_debug("udevd message (EXIT) received");
1002 manager_exit(manager
);
1003 /* keep reference to block the client until we exit
1004 TODO: deal with several blocking exit requests */
1005 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1011 static int synthesize_change(struct udev_device
*dev
) {
1012 char filename
[UTIL_PATH_SIZE
];
1015 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1016 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1017 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1018 bool part_table_read
= false;
1019 bool has_partitions
= false;
1021 struct udev
*udev
= udev_device_get_udev(dev
);
1022 _cleanup_(udev_enumerate_unrefp
) struct udev_enumerate
*e
= NULL
;
1023 struct udev_list_entry
*item
;
1026 * Try to re-read the partition table. This only succeeds if
1027 * none of the devices is busy. The kernel returns 0 if no
1028 * partition table is found, and we will not get an event for
1031 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1033 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1035 r
= ioctl(fd
, BLKRRPART
, 0);
1039 part_table_read
= true;
1042 /* search for partitions */
1043 e
= udev_enumerate_new(udev
);
1047 r
= udev_enumerate_add_match_parent(e
, dev
);
1051 r
= udev_enumerate_add_match_subsystem(e
, "block");
1055 r
= udev_enumerate_scan_devices(e
);
1059 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1060 _cleanup_(udev_device_unrefp
) struct udev_device
*d
= NULL
;
1062 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1066 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1069 has_partitions
= true;
1074 * We have partitions and re-read the table, the kernel already sent
1075 * out a "change" event for the disk, and "remove/add" for all
1078 if (part_table_read
&& has_partitions
)
1082 * We have partitions but re-reading the partition table did not
1083 * work, synthesize "change" for the disk and all partitions.
1085 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1086 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1087 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1089 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1090 _cleanup_(udev_device_unrefp
) struct udev_device
*d
= NULL
;
1092 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1096 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1099 log_debug("device %s closed, synthesising partition '%s' 'change'",
1100 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1101 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1102 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1108 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1109 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1110 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1115 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1116 Manager
*manager
= userdata
;
1117 union inotify_event_buffer buffer
;
1118 struct inotify_event
*e
;
1123 l
= read(fd
, &buffer
, sizeof(buffer
));
1125 if (IN_SET(errno
, EAGAIN
, EINTR
))
1128 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1131 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1132 _cleanup_(udev_device_unrefp
) struct udev_device
*dev
= NULL
;
1134 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1138 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1139 if (e
->mask
& IN_CLOSE_WRITE
) {
1140 synthesize_change(dev
);
1142 /* settle might be waiting on us to determine the queue
1143 * state. If we just handled an inotify event, we might have
1144 * generated a "change" event, but we won't have queued up
1145 * the resultant uevent yet. Do that.
1147 on_uevent(NULL
, -1, 0, manager
);
1148 } else if (e
->mask
& IN_IGNORED
)
1149 udev_watch_end(manager
->udev
, dev
);
1155 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1156 Manager
*manager
= userdata
;
1160 manager_exit(manager
);
1165 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1166 Manager
*manager
= userdata
;
1170 manager_reload(manager
);
1175 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1176 Manager
*manager
= userdata
;
1183 struct worker
*worker
;
1185 pid
= waitpid(-1, &status
, WNOHANG
);
1189 worker
= hashmap_get(manager
->workers
, PID_TO_PTR(pid
));
1191 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1195 if (WIFEXITED(status
)) {
1196 if (WEXITSTATUS(status
) == 0)
1197 log_debug("worker ["PID_FMT
"] exited", pid
);
1199 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1200 } else if (WIFSIGNALED(status
)) {
1201 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), signal_to_string(WTERMSIG(status
)));
1202 } else if (WIFSTOPPED(status
)) {
1203 log_info("worker ["PID_FMT
"] stopped", pid
);
1205 } else if (WIFCONTINUED(status
)) {
1206 log_info("worker ["PID_FMT
"] continued", pid
);
1209 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1211 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1212 if (worker
->event
) {
1213 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1214 /* delete state from disk */
1215 udev_device_delete_db(worker
->event
->dev
);
1216 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1217 /* forward kernel event without amending it */
1218 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1222 worker_free(worker
);
1225 /* we can start new workers, try to schedule events */
1226 event_queue_start(manager
);
1231 static int on_post(sd_event_source
*s
, void *userdata
) {
1232 Manager
*manager
= userdata
;
1237 if (LIST_IS_EMPTY(manager
->events
)) {
1238 /* no pending events */
1239 if (!hashmap_isempty(manager
->workers
)) {
1240 /* there are idle workers */
1241 log_debug("cleanup idle workers");
1242 manager_kill_workers(manager
);
1245 if (manager
->exit
) {
1246 r
= sd_event_exit(manager
->event
, 0);
1249 } else if (manager
->cgroup
)
1250 /* cleanup possible left-over processes in our cgroup */
1251 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, CGROUP_IGNORE_SELF
, NULL
, NULL
, NULL
);
1258 static int listen_fds(int *rctrl
, int *rnetlink
) {
1259 _cleanup_(udev_unrefp
) struct udev
*udev
= NULL
;
1260 int ctrl_fd
= -1, netlink_fd
= -1;
1266 n
= sd_listen_fds(true);
1270 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1271 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1278 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1279 if (netlink_fd
>= 0)
1289 _cleanup_(udev_ctrl_unrefp
) struct udev_ctrl
*ctrl
= NULL
;
1295 ctrl
= udev_ctrl_new(udev
);
1297 return log_error_errno(EINVAL
, "error initializing udev control socket");
1299 r
= udev_ctrl_enable_receiving(ctrl
);
1301 return log_error_errno(EINVAL
, "error binding udev control socket");
1303 fd
= udev_ctrl_get_fd(ctrl
);
1305 return log_error_errno(EIO
, "could not get ctrl fd");
1307 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1309 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1312 if (netlink_fd
< 0) {
1313 _cleanup_(udev_monitor_unrefp
) struct udev_monitor
*monitor
= NULL
;
1321 monitor
= udev_monitor_new_from_netlink(udev
, "kernel");
1323 return log_error_errno(EINVAL
, "error initializing netlink socket");
1325 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1327 r
= udev_monitor_enable_receiving(monitor
);
1329 return log_error_errno(EINVAL
, "error binding netlink socket");
1331 fd
= udev_monitor_get_fd(monitor
);
1333 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1335 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1337 return log_error_errno(errno
, "could not dup netlink fd: %m");
1341 *rnetlink
= netlink_fd
;
1347 * read the kernel command line, in case we need to get into debug mode
1348 * udev.log_priority=<level> syslog priority
1349 * udev.children_max=<number of workers> events are fully serialized if set to 1
1350 * udev.exec_delay=<number of seconds> delay execution of every executed program
1351 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
1353 static int parse_proc_cmdline_item(const char *key
, const char *value
, void *data
) {
1361 if (proc_cmdline_key_streq(key
, "udev.log_priority")) {
1363 if (proc_cmdline_value_missing(key
, value
))
1366 r
= util_log_priority(value
);
1368 log_set_max_level(r
);
1370 } else if (proc_cmdline_key_streq(key
, "udev.event_timeout")) {
1372 if (proc_cmdline_value_missing(key
, value
))
1375 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1377 arg_event_timeout_usec
*= USEC_PER_SEC
;
1378 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1381 } else if (proc_cmdline_key_streq(key
, "udev.children_max")) {
1383 if (proc_cmdline_value_missing(key
, value
))
1386 r
= safe_atou(value
, &arg_children_max
);
1388 } else if (proc_cmdline_key_streq(key
, "udev.exec_delay")) {
1390 if (proc_cmdline_value_missing(key
, value
))
1393 r
= safe_atoi(value
, &arg_exec_delay
);
1395 } else if (startswith(key
, "udev."))
1396 log_warning("Unknown udev kernel command line option \"%s\"", key
);
1399 log_warning_errno(r
, "Failed to parse \"%s=%s\", ignoring: %m", key
, value
);
1404 static void help(void) {
1405 printf("%s [OPTIONS...]\n\n"
1406 "Manages devices.\n\n"
1407 " -h --help Print this message\n"
1408 " -V --version Print version of the program\n"
1409 " -d --daemon Detach and run in the background\n"
1410 " -D --debug Enable debug output\n"
1411 " -c --children-max=INT Set maximum number of workers\n"
1412 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1413 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1414 " -N --resolve-names=early|late|never\n"
1415 " When to resolve users and groups\n"
1416 , program_invocation_short_name
);
1419 static int parse_argv(int argc
, char *argv
[]) {
1420 static const struct option options
[] = {
1421 { "daemon", no_argument
, NULL
, 'd' },
1422 { "debug", no_argument
, NULL
, 'D' },
1423 { "children-max", required_argument
, NULL
, 'c' },
1424 { "exec-delay", required_argument
, NULL
, 'e' },
1425 { "event-timeout", required_argument
, NULL
, 't' },
1426 { "resolve-names", required_argument
, NULL
, 'N' },
1427 { "help", no_argument
, NULL
, 'h' },
1428 { "version", no_argument
, NULL
, 'V' },
1437 while ((c
= getopt_long(argc
, argv
, "c:de:Dt:N:hV", options
, NULL
)) >= 0) {
1443 arg_daemonize
= true;
1446 r
= safe_atou(optarg
, &arg_children_max
);
1448 log_warning("Invalid --children-max ignored: %s", optarg
);
1451 r
= safe_atoi(optarg
, &arg_exec_delay
);
1453 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1456 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1458 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1460 arg_event_timeout_usec
*= USEC_PER_SEC
;
1461 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1468 if (streq(optarg
, "early")) {
1469 arg_resolve_names
= 1;
1470 } else if (streq(optarg
, "late")) {
1471 arg_resolve_names
= 0;
1472 } else if (streq(optarg
, "never")) {
1473 arg_resolve_names
= -1;
1475 log_error("resolve-names must be early, late or never");
1483 printf("%s\n", PACKAGE_VERSION
);
1488 assert_not_reached("Unhandled option");
1496 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1497 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1498 int r
, fd_worker
, one
= 1;
1501 assert(fd_ctrl
>= 0);
1502 assert(fd_uevent
>= 0);
1504 manager
= new0(Manager
, 1);
1508 manager
->fd_inotify
= -1;
1509 manager
->worker_watch
[WRITE_END
] = -1;
1510 manager
->worker_watch
[READ_END
] = -1;
1512 manager
->udev
= udev_new();
1514 return log_error_errno(errno
, "could not allocate udev context: %m");
1516 udev_builtin_init(manager
->udev
);
1518 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1519 if (!manager
->rules
)
1520 return log_error_errno(ENOMEM
, "error reading rules");
1522 LIST_HEAD_INIT(manager
->events
);
1523 udev_list_init(manager
->udev
, &manager
->properties
, true);
1525 manager
->cgroup
= cgroup
;
1527 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, fd_ctrl
);
1529 return log_error_errno(EINVAL
, "error taking over udev control socket");
1531 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", fd_uevent
);
1532 if (!manager
->monitor
)
1533 return log_error_errno(EINVAL
, "error taking over netlink socket");
1535 /* unnamed socket from workers to the main daemon */
1536 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1538 return log_error_errno(errno
, "error creating socketpair: %m");
1540 fd_worker
= manager
->worker_watch
[READ_END
];
1542 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1544 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1546 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1547 if (manager
->fd_inotify
< 0)
1548 return log_error_errno(ENOMEM
, "error initializing inotify");
1550 udev_watch_restore(manager
->udev
);
1552 /* block and listen to all signals on signalfd */
1553 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1555 r
= sd_event_default(&manager
->event
);
1557 return log_error_errno(r
, "could not allocate event loop: %m");
1559 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1561 return log_error_errno(r
, "error creating sigint event source: %m");
1563 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1565 return log_error_errno(r
, "error creating sigterm event source: %m");
1567 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1569 return log_error_errno(r
, "error creating sighup event source: %m");
1571 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1573 return log_error_errno(r
, "error creating sigchld event source: %m");
1575 r
= sd_event_set_watchdog(manager
->event
, true);
1577 return log_error_errno(r
, "error creating watchdog event source: %m");
1579 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1581 return log_error_errno(r
, "error creating ctrl event source: %m");
1583 /* This needs to be after the inotify and uevent handling, to make sure
1584 * that the ping is send back after fully processing the pending uevents
1585 * (including the synthetic ones we may create due to inotify events).
1587 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1589 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1591 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1593 return log_error_errno(r
, "error creating inotify event source: %m");
1595 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1597 return log_error_errno(r
, "error creating uevent event source: %m");
1599 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1601 return log_error_errno(r
, "error creating worker event source: %m");
1603 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1605 return log_error_errno(r
, "error creating post event source: %m");
1607 *ret
= TAKE_PTR(manager
);
1612 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1613 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1616 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1618 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1622 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1624 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1626 (void) sd_notifyf(false,
1628 "STATUS=Processing with %u children at max", arg_children_max
);
1630 r
= sd_event_loop(manager
->event
);
1632 log_error_errno(r
, "event loop failed: %m");
1636 sd_event_get_exit_code(manager
->event
, &r
);
1641 "STATUS=Shutting down...");
1643 udev_ctrl_cleanup(manager
->ctrl
);
1647 int main(int argc
, char *argv
[]) {
1648 _cleanup_free_
char *cgroup
= NULL
;
1649 int fd_ctrl
= -1, fd_uevent
= -1;
1652 log_set_target(LOG_TARGET_AUTO
);
1653 udev_parse_config();
1654 log_parse_environment();
1657 r
= parse_argv(argc
, argv
);
1661 r
= proc_cmdline_parse(parse_proc_cmdline_item
, NULL
, PROC_CMDLINE_STRIP_RD_PREFIX
);
1663 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1666 log_set_target(LOG_TARGET_CONSOLE
);
1667 log_set_max_level(LOG_DEBUG
);
1674 if (arg_children_max
== 0) {
1676 unsigned long mem_limit
;
1678 arg_children_max
= 8;
1680 if (sched_getaffinity(0, sizeof(cpu_set
), &cpu_set
) == 0)
1681 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1683 mem_limit
= physical_memory() / (128LU*1024*1024);
1684 arg_children_max
= MAX(10U, MIN(arg_children_max
, mem_limit
));
1686 log_debug("set children_max to %u", arg_children_max
);
1689 /* set umask before creating any file/directory */
1692 r
= log_error_errno(errno
, "could not change dir to /: %m");
1698 r
= mac_selinux_init();
1700 log_error_errno(r
, "could not initialize labelling: %m");
1704 r
= mkdir_errno_wrapper("/run/udev", 0755);
1705 if (r
< 0 && r
!= -EEXIST
) {
1706 log_error_errno(r
, "could not create /run/udev: %m");
1710 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1712 if (getppid() == 1) {
1713 /* get our own cgroup, we regularly kill everything udev has left behind
1714 we only do this on systemd systems, and only if we are directly spawned
1715 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1716 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1718 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
1719 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1721 log_warning_errno(r
, "failed to get cgroup: %m");
1725 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1727 r
= log_error_errno(r
, "could not listen on fds: %m");
1731 if (arg_daemonize
) {
1734 log_info("starting version " PACKAGE_VERSION
);
1736 /* connect /dev/null to stdin, stdout, stderr */
1737 if (log_get_max_level() < LOG_DEBUG
) {
1738 r
= make_null_stdio();
1740 log_warning_errno(r
, "Failed to redirect standard streams to /dev/null: %m");
1748 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1751 mac_selinux_finish();
1753 _exit(EXIT_SUCCESS
);
1758 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1761 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1764 mac_selinux_finish();
1766 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;