2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
33 #include <sys/prctl.h>
34 #include <sys/socket.h>
35 #include <sys/signalfd.h>
36 #include <sys/epoll.h>
37 #include <sys/mount.h>
40 #include <sys/ioctl.h>
41 #include <sys/inotify.h>
43 #include "sd-daemon.h"
46 #include "terminal-util.h"
47 #include "signal-util.h"
48 #include "event-util.h"
49 #include "netlink-util.h"
50 #include "cgroup-util.h"
51 #include "process-util.h"
52 #include "dev-setup.h"
54 #include "selinux-util.h"
56 #include "udev-util.h"
57 #include "formats-util.h"
60 static bool arg_debug
= false;
61 static int arg_daemonize
= false;
62 static int arg_resolve_names
= 1;
63 static unsigned arg_children_max
;
64 static int arg_exec_delay
;
65 static usec_t arg_event_timeout_usec
= 180 * USEC_PER_SEC
;
66 static usec_t arg_event_timeout_warn_usec
= 180 * USEC_PER_SEC
/ 3;
68 typedef struct Manager
{
72 struct udev_list_node events
;
74 pid_t pid
; /* the process that originally allocated the manager object */
76 struct udev_rules
*rules
;
77 struct udev_list properties
;
79 struct udev_monitor
*monitor
;
80 struct udev_ctrl
*ctrl
;
81 struct udev_ctrl_connection
*ctrl_conn_blocking
;
85 sd_event_source
*ctrl_event
;
86 sd_event_source
*uevent_event
;
87 sd_event_source
*inotify_event
;
91 bool stop_exec_queue
:1;
102 struct udev_list_node node
;
105 struct udev_device
*dev
;
106 struct udev_device
*dev_kernel
;
107 struct worker
*worker
;
108 enum event_state state
;
109 unsigned long long int delaying_seqnum
;
110 unsigned long long int seqnum
;
113 const char *devpath_old
;
117 sd_event_source
*timeout_warning
;
118 sd_event_source
*timeout
;
121 static inline struct event
*node_to_event(struct udev_list_node
*node
) {
122 return container_of(node
, struct event
, node
);
125 static void event_queue_cleanup(Manager
*manager
, enum event_state type
);
136 struct udev_list_node node
;
139 struct udev_monitor
*monitor
;
140 enum worker_state state
;
144 /* passed from worker to main process */
145 struct worker_message
{
148 static void event_free(struct event
*event
) {
154 udev_list_node_remove(&event
->node
);
155 udev_device_unref(event
->dev
);
156 udev_device_unref(event
->dev_kernel
);
158 sd_event_source_unref(event
->timeout_warning
);
159 sd_event_source_unref(event
->timeout
);
162 event
->worker
->event
= NULL
;
164 assert(event
->manager
);
166 if (udev_list_node_is_empty(&event
->manager
->events
)) {
167 /* only clean up the queue from the process that created it */
168 if (event
->manager
->pid
== getpid()) {
169 r
= unlink("/run/udev/queue");
171 log_warning_errno(errno
, "could not unlink /run/udev/queue: %m");
178 static void worker_free(struct worker
*worker
) {
182 assert(worker
->manager
);
184 hashmap_remove(worker
->manager
->workers
, UINT_TO_PTR(worker
->pid
));
185 udev_monitor_unref(worker
->monitor
);
186 event_free(worker
->event
);
191 static void manager_workers_free(Manager
*manager
) {
192 struct worker
*worker
;
197 HASHMAP_FOREACH(worker
, manager
->workers
, i
)
200 manager
->workers
= hashmap_free(manager
->workers
);
203 static int worker_new(struct worker
**ret
, Manager
*manager
, struct udev_monitor
*worker_monitor
, pid_t pid
) {
204 _cleanup_free_
struct worker
*worker
= NULL
;
209 assert(worker_monitor
);
212 worker
= new0(struct worker
, 1);
216 worker
->refcount
= 1;
217 worker
->manager
= manager
;
218 /* close monitor, but keep address around */
219 udev_monitor_disconnect(worker_monitor
);
220 worker
->monitor
= udev_monitor_ref(worker_monitor
);
223 r
= hashmap_ensure_allocated(&manager
->workers
, NULL
);
227 r
= hashmap_put(manager
->workers
, UINT_TO_PTR(pid
), worker
);
237 static int on_event_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
238 struct event
*event
= userdata
;
241 assert(event
->worker
);
243 kill_and_sigcont(event
->worker
->pid
, SIGKILL
);
244 event
->worker
->state
= WORKER_KILLED
;
246 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event
->dev
), event
->devpath
);
251 static int on_event_timeout_warning(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
252 struct event
*event
= userdata
;
256 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event
->dev
), event
->devpath
);
261 static void worker_attach_event(struct worker
*worker
, struct event
*event
) {
267 assert(worker
->manager
);
269 assert(!event
->worker
);
270 assert(!worker
->event
);
272 worker
->state
= WORKER_RUNNING
;
273 worker
->event
= event
;
274 event
->state
= EVENT_RUNNING
;
275 event
->worker
= worker
;
277 e
= worker
->manager
->event
;
279 r
= sd_event_now(e
, clock_boottime_or_monotonic(), &usec
);
283 (void) sd_event_add_time(e
, &event
->timeout_warning
, clock_boottime_or_monotonic(),
284 usec
+ arg_event_timeout_warn_usec
, USEC_PER_SEC
, on_event_timeout_warning
, event
);
286 (void) sd_event_add_time(e
, &event
->timeout
, clock_boottime_or_monotonic(),
287 usec
+ arg_event_timeout_usec
, USEC_PER_SEC
, on_event_timeout
, event
);
290 static void manager_free(Manager
*manager
) {
294 udev_builtin_exit(manager
->udev
);
296 sd_event_source_unref(manager
->ctrl_event
);
297 sd_event_source_unref(manager
->uevent_event
);
298 sd_event_source_unref(manager
->inotify_event
);
300 udev_unref(manager
->udev
);
301 sd_event_unref(manager
->event
);
302 manager_workers_free(manager
);
303 event_queue_cleanup(manager
, EVENT_UNDEF
);
305 udev_monitor_unref(manager
->monitor
);
306 udev_ctrl_unref(manager
->ctrl
);
307 udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
309 udev_list_cleanup(&manager
->properties
);
310 udev_rules_unref(manager
->rules
);
312 safe_close(manager
->fd_inotify
);
313 safe_close_pair(manager
->worker_watch
);
318 DEFINE_TRIVIAL_CLEANUP_FUNC(Manager
*, manager_free
);
320 static int worker_send_message(int fd
) {
321 struct worker_message message
= {};
323 return loop_write(fd
, &message
, sizeof(message
), false);
326 static void worker_spawn(Manager
*manager
, struct event
*event
) {
327 struct udev
*udev
= event
->udev
;
328 _cleanup_udev_monitor_unref_
struct udev_monitor
*worker_monitor
= NULL
;
332 /* listen for new events */
333 worker_monitor
= udev_monitor_new_from_netlink(udev
, NULL
);
334 if (worker_monitor
== NULL
)
336 /* allow the main daemon netlink address to send devices to the worker */
337 udev_monitor_allow_unicast_sender(worker_monitor
, manager
->monitor
);
338 r
= udev_monitor_enable_receiving(worker_monitor
);
340 log_error_errno(r
, "worker: could not enable receiving of device: %m");
345 struct udev_device
*dev
= NULL
;
346 _cleanup_netlink_unref_ sd_netlink
*rtnl
= NULL
;
348 _cleanup_close_
int fd_signal
= -1, fd_ep
= -1;
349 struct epoll_event ep_signal
= { .events
= EPOLLIN
};
350 struct epoll_event ep_monitor
= { .events
= EPOLLIN
};
353 /* take initial device from queue */
357 unsetenv("NOTIFY_SOCKET");
359 manager_workers_free(manager
);
360 event_queue_cleanup(manager
, EVENT_UNDEF
);
362 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
363 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
364 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
365 manager
->ctrl_conn_blocking
= udev_ctrl_connection_unref(manager
->ctrl_conn_blocking
);
366 manager
->worker_watch
[READ_END
] = safe_close(manager
->worker_watch
[READ_END
]);
368 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
369 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
370 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
372 manager
->event
= sd_event_unref(manager
->event
);
375 fd_signal
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
377 r
= log_error_errno(errno
, "error creating signalfd %m");
380 ep_signal
.data
.fd
= fd_signal
;
382 fd_monitor
= udev_monitor_get_fd(worker_monitor
);
383 ep_monitor
.data
.fd
= fd_monitor
;
385 fd_ep
= epoll_create1(EPOLL_CLOEXEC
);
387 r
= log_error_errno(errno
, "error creating epoll fd: %m");
391 if (epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_signal
, &ep_signal
) < 0 ||
392 epoll_ctl(fd_ep
, EPOLL_CTL_ADD
, fd_monitor
, &ep_monitor
) < 0) {
393 r
= log_error_errno(errno
, "fail to add fds to epoll: %m");
397 /* request TERM signal if parent exits */
398 prctl(PR_SET_PDEATHSIG
, SIGTERM
);
400 /* reset OOM score, we only protect the main daemon */
401 write_string_file("/proc/self/oom_score_adj", "0", 0);
404 struct udev_event
*udev_event
;
409 log_debug("seq %llu running", udev_device_get_seqnum(dev
));
410 udev_event
= udev_event_new(dev
);
411 if (udev_event
== NULL
) {
416 if (arg_exec_delay
> 0)
417 udev_event
->exec_delay
= arg_exec_delay
;
420 * Take a shared lock on the device node; this establishes
421 * a concept of device "ownership" to serialize device
422 * access. External processes holding an exclusive lock will
423 * cause udev to skip the event handling; in the case udev
424 * acquired the lock, the external process can block until
425 * udev has finished its event handling.
427 if (!streq_ptr(udev_device_get_action(dev
), "remove") &&
428 streq_ptr("block", udev_device_get_subsystem(dev
)) &&
429 !startswith(udev_device_get_sysname(dev
), "dm-") &&
430 !startswith(udev_device_get_sysname(dev
), "md")) {
431 struct udev_device
*d
= dev
;
433 if (streq_ptr("partition", udev_device_get_devtype(d
)))
434 d
= udev_device_get_parent(d
);
437 fd_lock
= open(udev_device_get_devnode(d
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
438 if (fd_lock
>= 0 && flock(fd_lock
, LOCK_SH
|LOCK_NB
) < 0) {
439 log_debug_errno(errno
, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d
));
440 fd_lock
= safe_close(fd_lock
);
446 /* needed for renaming netifs */
447 udev_event
->rtnl
= rtnl
;
449 /* apply rules, create node, symlinks */
450 udev_event_execute_rules(udev_event
,
451 arg_event_timeout_usec
, arg_event_timeout_warn_usec
,
452 &manager
->properties
,
455 udev_event_execute_run(udev_event
,
456 arg_event_timeout_usec
, arg_event_timeout_warn_usec
);
458 if (udev_event
->rtnl
)
459 /* in case rtnl was initialized */
460 rtnl
= sd_netlink_ref(udev_event
->rtnl
);
462 /* apply/restore inotify watch */
463 if (udev_event
->inotify_watch
) {
464 udev_watch_begin(udev
, dev
);
465 udev_device_update_db(dev
);
470 /* send processed event back to libudev listeners */
471 udev_monitor_send_device(worker_monitor
, NULL
, dev
);
474 log_debug("seq %llu processed", udev_device_get_seqnum(dev
));
476 /* send udevd the result of the event execution */
477 r
= worker_send_message(manager
->worker_watch
[WRITE_END
]);
479 log_error_errno(r
, "failed to send result of seq %llu to main daemon: %m",
480 udev_device_get_seqnum(dev
));
482 udev_device_unref(dev
);
485 udev_event_unref(udev_event
);
487 /* wait for more device messages from main udevd, or term signal */
488 while (dev
== NULL
) {
489 struct epoll_event ev
[4];
493 fdcount
= epoll_wait(fd_ep
, ev
, ELEMENTSOF(ev
), -1);
497 r
= log_error_errno(errno
, "failed to poll: %m");
501 for (i
= 0; i
< fdcount
; i
++) {
502 if (ev
[i
].data
.fd
== fd_monitor
&& ev
[i
].events
& EPOLLIN
) {
503 dev
= udev_monitor_receive_device(worker_monitor
);
505 } else if (ev
[i
].data
.fd
== fd_signal
&& ev
[i
].events
& EPOLLIN
) {
506 struct signalfd_siginfo fdsi
;
509 size
= read(fd_signal
, &fdsi
, sizeof(struct signalfd_siginfo
));
510 if (size
!= sizeof(struct signalfd_siginfo
))
512 switch (fdsi
.ssi_signo
) {
521 udev_device_unref(dev
);
522 manager_free(manager
);
524 _exit(r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
);
527 event
->state
= EVENT_QUEUED
;
528 log_error_errno(errno
, "fork of child failed: %m");
532 struct worker
*worker
;
534 r
= worker_new(&worker
, manager
, worker_monitor
, pid
);
538 worker_attach_event(worker
, event
);
540 log_debug("seq %llu forked new worker ["PID_FMT
"]", udev_device_get_seqnum(event
->dev
), pid
);
546 static void event_run(Manager
*manager
, struct event
*event
) {
547 struct worker
*worker
;
553 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
556 if (worker
->state
!= WORKER_IDLE
)
559 count
= udev_monitor_send_device(manager
->monitor
, worker
->monitor
, event
->dev
);
561 log_error_errno(errno
, "worker ["PID_FMT
"] did not accept message %zi (%m), kill it",
563 kill(worker
->pid
, SIGKILL
);
564 worker
->state
= WORKER_KILLED
;
567 worker_attach_event(worker
, event
);
571 if (hashmap_size(manager
->workers
) >= arg_children_max
) {
572 if (arg_children_max
> 1)
573 log_debug("maximum number (%i) of children reached", hashmap_size(manager
->workers
));
577 /* start new worker and pass initial device */
578 worker_spawn(manager
, event
);
581 static int event_queue_insert(Manager
*manager
, struct udev_device
*dev
) {
588 /* only one process can add events to the queue */
589 if (manager
->pid
== 0)
590 manager
->pid
= getpid();
592 assert(manager
->pid
== getpid());
594 event
= new0(struct event
, 1);
598 event
->udev
= udev_device_get_udev(dev
);
599 event
->manager
= manager
;
601 event
->dev_kernel
= udev_device_shallow_clone(dev
);
602 udev_device_copy_properties(event
->dev_kernel
, dev
);
603 event
->seqnum
= udev_device_get_seqnum(dev
);
604 event
->devpath
= udev_device_get_devpath(dev
);
605 event
->devpath_len
= strlen(event
->devpath
);
606 event
->devpath_old
= udev_device_get_devpath_old(dev
);
607 event
->devnum
= udev_device_get_devnum(dev
);
608 event
->is_block
= streq("block", udev_device_get_subsystem(dev
));
609 event
->ifindex
= udev_device_get_ifindex(dev
);
611 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev
),
612 udev_device_get_action(dev
), udev_device_get_subsystem(dev
));
614 event
->state
= EVENT_QUEUED
;
616 if (udev_list_node_is_empty(&manager
->events
)) {
617 r
= touch("/run/udev/queue");
619 log_warning_errno(r
, "could not touch /run/udev/queue: %m");
622 udev_list_node_append(&event
->node
, &manager
->events
);
627 static void manager_kill_workers(Manager
*manager
) {
628 struct worker
*worker
;
633 HASHMAP_FOREACH(worker
, manager
->workers
, i
) {
634 if (worker
->state
== WORKER_KILLED
)
637 worker
->state
= WORKER_KILLED
;
638 kill(worker
->pid
, SIGTERM
);
642 /* lookup event for identical, parent, child device */
643 static bool is_devpath_busy(Manager
*manager
, struct event
*event
) {
644 struct udev_list_node
*loop
;
647 /* check if queue contains events we depend on */
648 udev_list_node_foreach(loop
, &manager
->events
) {
649 struct event
*loop_event
= node_to_event(loop
);
651 /* we already found a later event, earlier can not block us, no need to check again */
652 if (loop_event
->seqnum
< event
->delaying_seqnum
)
655 /* event we checked earlier still exists, no need to check again */
656 if (loop_event
->seqnum
== event
->delaying_seqnum
)
659 /* found ourself, no later event can block us */
660 if (loop_event
->seqnum
>= event
->seqnum
)
663 /* check major/minor */
664 if (major(event
->devnum
) != 0 && event
->devnum
== loop_event
->devnum
&& event
->is_block
== loop_event
->is_block
)
667 /* check network device ifindex */
668 if (event
->ifindex
!= 0 && event
->ifindex
== loop_event
->ifindex
)
671 /* check our old name */
672 if (event
->devpath_old
!= NULL
&& streq(loop_event
->devpath
, event
->devpath_old
)) {
673 event
->delaying_seqnum
= loop_event
->seqnum
;
677 /* compare devpath */
678 common
= MIN(loop_event
->devpath_len
, event
->devpath_len
);
680 /* one devpath is contained in the other? */
681 if (memcmp(loop_event
->devpath
, event
->devpath
, common
) != 0)
684 /* identical device event found */
685 if (loop_event
->devpath_len
== event
->devpath_len
) {
686 /* devices names might have changed/swapped in the meantime */
687 if (major(event
->devnum
) != 0 && (event
->devnum
!= loop_event
->devnum
|| event
->is_block
!= loop_event
->is_block
))
689 if (event
->ifindex
!= 0 && event
->ifindex
!= loop_event
->ifindex
)
691 event
->delaying_seqnum
= loop_event
->seqnum
;
695 /* parent device event found */
696 if (event
->devpath
[common
] == '/') {
697 event
->delaying_seqnum
= loop_event
->seqnum
;
701 /* child device event found */
702 if (loop_event
->devpath
[common
] == '/') {
703 event
->delaying_seqnum
= loop_event
->seqnum
;
707 /* no matching device */
714 static int on_exit_timeout(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
715 Manager
*manager
= userdata
;
719 log_error_errno(ETIMEDOUT
, "giving up waiting for workers to finish");
721 sd_event_exit(manager
->event
, -ETIMEDOUT
);
726 static void manager_exit(Manager
*manager
) {
732 manager
->exit
= true;
736 "STATUS=Starting shutdown...");
738 /* close sources of new events and discard buffered events */
739 manager
->ctrl_event
= sd_event_source_unref(manager
->ctrl_event
);
740 manager
->ctrl
= udev_ctrl_unref(manager
->ctrl
);
742 manager
->inotify_event
= sd_event_source_unref(manager
->inotify_event
);
743 manager
->fd_inotify
= safe_close(manager
->fd_inotify
);
745 manager
->uevent_event
= sd_event_source_unref(manager
->uevent_event
);
746 manager
->monitor
= udev_monitor_unref(manager
->monitor
);
748 /* discard queued events and kill workers */
749 event_queue_cleanup(manager
, EVENT_QUEUED
);
750 manager_kill_workers(manager
);
752 r
= sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
);
756 r
= sd_event_add_time(manager
->event
, NULL
, clock_boottime_or_monotonic(),
757 usec
+ 30 * USEC_PER_SEC
, USEC_PER_SEC
, on_exit_timeout
, manager
);
762 /* reload requested, HUP signal received, rules changed, builtin changed */
763 static void manager_reload(Manager
*manager
) {
769 "STATUS=Flushing configuration...");
771 manager_kill_workers(manager
);
772 manager
->rules
= udev_rules_unref(manager
->rules
);
773 udev_builtin_exit(manager
->udev
);
777 "STATUS=Processing...");
780 static void event_queue_start(Manager
*manager
) {
781 struct udev_list_node
*loop
;
787 if (udev_list_node_is_empty(&manager
->events
) ||
788 manager
->exit
|| manager
->stop_exec_queue
)
791 r
= sd_event_now(manager
->event
, clock_boottime_or_monotonic(), &usec
);
793 /* check for changed config, every 3 seconds at most */
794 if (manager
->last_usec
== 0 ||
795 (usec
- manager
->last_usec
) > 3 * USEC_PER_SEC
) {
796 if (udev_rules_check_timestamp(manager
->rules
) ||
797 udev_builtin_validate(manager
->udev
))
798 manager_reload(manager
);
800 manager
->last_usec
= usec
;
804 udev_builtin_init(manager
->udev
);
806 if (!manager
->rules
) {
807 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
812 udev_list_node_foreach(loop
, &manager
->events
) {
813 struct event
*event
= node_to_event(loop
);
815 if (event
->state
!= EVENT_QUEUED
)
818 /* do not start event if parent or child event is still running */
819 if (is_devpath_busy(manager
, event
))
822 event_run(manager
, event
);
826 static void event_queue_cleanup(Manager
*manager
, enum event_state match_type
) {
827 struct udev_list_node
*loop
, *tmp
;
829 udev_list_node_foreach_safe(loop
, tmp
, &manager
->events
) {
830 struct event
*event
= node_to_event(loop
);
832 if (match_type
!= EVENT_UNDEF
&& match_type
!= event
->state
)
839 static int on_worker(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
840 Manager
*manager
= userdata
;
845 struct worker_message msg
;
846 struct iovec iovec
= {
848 .iov_len
= sizeof(msg
),
851 struct cmsghdr cmsghdr
;
852 uint8_t buf
[CMSG_SPACE(sizeof(struct ucred
))];
854 struct msghdr msghdr
= {
857 .msg_control
= &control
,
858 .msg_controllen
= sizeof(control
),
860 struct cmsghdr
*cmsg
;
862 struct ucred
*ucred
= NULL
;
863 struct worker
*worker
;
865 size
= recvmsg(fd
, &msghdr
, MSG_DONTWAIT
);
869 else if (errno
== EAGAIN
)
870 /* nothing more to read */
873 return log_error_errno(errno
, "failed to receive message: %m");
874 } else if (size
!= sizeof(struct worker_message
)) {
875 log_warning_errno(EIO
, "ignoring worker message with invalid size %zi bytes", size
);
879 CMSG_FOREACH(cmsg
, &msghdr
) {
880 if (cmsg
->cmsg_level
== SOL_SOCKET
&&
881 cmsg
->cmsg_type
== SCM_CREDENTIALS
&&
882 cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)))
883 ucred
= (struct ucred
*) CMSG_DATA(cmsg
);
886 if (!ucred
|| ucred
->pid
<= 0) {
887 log_warning_errno(EIO
, "ignoring worker message without valid PID");
891 /* lookup worker who sent the signal */
892 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(ucred
->pid
));
894 log_debug("worker ["PID_FMT
"] returned, but is no longer tracked", ucred
->pid
);
898 if (worker
->state
!= WORKER_KILLED
)
899 worker
->state
= WORKER_IDLE
;
901 /* worker returned */
902 event_free(worker
->event
);
905 /* we have free workers, try to schedule events */
906 event_queue_start(manager
);
911 static int on_uevent(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
912 Manager
*manager
= userdata
;
913 struct udev_device
*dev
;
918 dev
= udev_monitor_receive_device(manager
->monitor
);
920 udev_device_ensure_usec_initialized(dev
, NULL
);
921 r
= event_queue_insert(manager
, dev
);
923 udev_device_unref(dev
);
925 /* we have fresh events, try to schedule them */
926 event_queue_start(manager
);
932 /* receive the udevd message from userspace */
933 static int on_ctrl_msg(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
934 Manager
*manager
= userdata
;
935 _cleanup_udev_ctrl_connection_unref_
struct udev_ctrl_connection
*ctrl_conn
= NULL
;
936 _cleanup_udev_ctrl_msg_unref_
struct udev_ctrl_msg
*ctrl_msg
= NULL
;
942 ctrl_conn
= udev_ctrl_get_connection(manager
->ctrl
);
946 ctrl_msg
= udev_ctrl_receive_msg(ctrl_conn
);
950 i
= udev_ctrl_get_set_log_level(ctrl_msg
);
952 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i
);
953 log_set_max_level(i
);
954 manager_kill_workers(manager
);
957 if (udev_ctrl_get_stop_exec_queue(ctrl_msg
) > 0) {
958 log_debug("udevd message (STOP_EXEC_QUEUE) received");
959 manager
->stop_exec_queue
= true;
962 if (udev_ctrl_get_start_exec_queue(ctrl_msg
) > 0) {
963 log_debug("udevd message (START_EXEC_QUEUE) received");
964 manager
->stop_exec_queue
= false;
965 event_queue_start(manager
);
968 if (udev_ctrl_get_reload(ctrl_msg
) > 0) {
969 log_debug("udevd message (RELOAD) received");
970 manager_reload(manager
);
973 str
= udev_ctrl_get_set_env(ctrl_msg
);
975 _cleanup_free_
char *key
= NULL
;
981 val
= strchr(key
, '=');
985 if (val
[0] == '\0') {
986 log_debug("udevd message (ENV) received, unset '%s'", key
);
987 udev_list_entry_add(&manager
->properties
, key
, NULL
);
989 log_debug("udevd message (ENV) received, set '%s=%s'", key
, val
);
990 udev_list_entry_add(&manager
->properties
, key
, val
);
993 log_error("wrong key format '%s'", key
);
995 manager_kill_workers(manager
);
998 i
= udev_ctrl_get_set_children_max(ctrl_msg
);
1000 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i
);
1001 arg_children_max
= i
;
1004 if (udev_ctrl_get_ping(ctrl_msg
) > 0)
1005 log_debug("udevd message (SYNC) received");
1007 if (udev_ctrl_get_exit(ctrl_msg
) > 0) {
1008 log_debug("udevd message (EXIT) received");
1009 manager_exit(manager
);
1010 /* keep reference to block the client until we exit
1011 TODO: deal with several blocking exit requests */
1012 manager
->ctrl_conn_blocking
= udev_ctrl_connection_ref(ctrl_conn
);
1018 static int synthesize_change(struct udev_device
*dev
) {
1019 char filename
[UTIL_PATH_SIZE
];
1022 if (streq_ptr("block", udev_device_get_subsystem(dev
)) &&
1023 streq_ptr("disk", udev_device_get_devtype(dev
)) &&
1024 !startswith(udev_device_get_sysname(dev
), "dm-")) {
1025 bool part_table_read
= false;
1026 bool has_partitions
= false;
1028 struct udev
*udev
= udev_device_get_udev(dev
);
1029 _cleanup_udev_enumerate_unref_
struct udev_enumerate
*e
= NULL
;
1030 struct udev_list_entry
*item
;
1033 * Try to re-read the partition table. This only succeeds if
1034 * none of the devices is busy. The kernel returns 0 if no
1035 * partition table is found, and we will not get an event for
1038 fd
= open(udev_device_get_devnode(dev
), O_RDONLY
|O_CLOEXEC
|O_NOFOLLOW
|O_NONBLOCK
);
1040 r
= flock(fd
, LOCK_EX
|LOCK_NB
);
1042 r
= ioctl(fd
, BLKRRPART
, 0);
1046 part_table_read
= true;
1049 /* search for partitions */
1050 e
= udev_enumerate_new(udev
);
1054 r
= udev_enumerate_add_match_parent(e
, dev
);
1058 r
= udev_enumerate_add_match_subsystem(e
, "block");
1062 r
= udev_enumerate_scan_devices(e
);
1066 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1067 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1069 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1073 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1076 has_partitions
= true;
1081 * We have partitions and re-read the table, the kernel already sent
1082 * out a "change" event for the disk, and "remove/add" for all
1085 if (part_table_read
&& has_partitions
)
1089 * We have partitions but re-reading the partition table did not
1090 * work, synthesize "change" for the disk and all partitions.
1092 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1093 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1094 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1096 udev_list_entry_foreach(item
, udev_enumerate_get_list_entry(e
)) {
1097 _cleanup_udev_device_unref_
struct udev_device
*d
= NULL
;
1099 d
= udev_device_new_from_syspath(udev
, udev_list_entry_get_name(item
));
1103 if (!streq_ptr("partition", udev_device_get_devtype(d
)))
1106 log_debug("device %s closed, synthesising partition '%s' 'change'",
1107 udev_device_get_devnode(dev
), udev_device_get_devnode(d
));
1108 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(d
), "/uevent", NULL
);
1109 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1115 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev
));
1116 strscpyl(filename
, sizeof(filename
), udev_device_get_syspath(dev
), "/uevent", NULL
);
1117 write_string_file(filename
, "change", WRITE_STRING_FILE_CREATE
);
1122 static int on_inotify(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1123 Manager
*manager
= userdata
;
1124 union inotify_event_buffer buffer
;
1125 struct inotify_event
*e
;
1130 l
= read(fd
, &buffer
, sizeof(buffer
));
1132 if (errno
== EAGAIN
|| errno
== EINTR
)
1135 return log_error_errno(errno
, "Failed to read inotify fd: %m");
1138 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1139 _cleanup_udev_device_unref_
struct udev_device
*dev
= NULL
;
1141 dev
= udev_watch_lookup(manager
->udev
, e
->wd
);
1145 log_debug("inotify event: %x for %s", e
->mask
, udev_device_get_devnode(dev
));
1146 if (e
->mask
& IN_CLOSE_WRITE
) {
1147 synthesize_change(dev
);
1149 /* settle might be waiting on us to determine the queue
1150 * state. If we just handled an inotify event, we might have
1151 * generated a "change" event, but we won't have queued up
1152 * the resultant uevent yet. Do that.
1154 on_uevent(NULL
, -1, 0, manager
);
1155 } else if (e
->mask
& IN_IGNORED
)
1156 udev_watch_end(manager
->udev
, dev
);
1162 static int on_sigterm(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1163 Manager
*manager
= userdata
;
1167 manager_exit(manager
);
1172 static int on_sighup(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1173 Manager
*manager
= userdata
;
1177 manager_reload(manager
);
1182 static int on_sigchld(sd_event_source
*s
, const struct signalfd_siginfo
*si
, void *userdata
) {
1183 Manager
*manager
= userdata
;
1190 struct worker
*worker
;
1192 pid
= waitpid(-1, &status
, WNOHANG
);
1196 worker
= hashmap_get(manager
->workers
, UINT_TO_PTR(pid
));
1198 log_warning("worker ["PID_FMT
"] is unknown, ignoring", pid
);
1202 if (WIFEXITED(status
)) {
1203 if (WEXITSTATUS(status
) == 0)
1204 log_debug("worker ["PID_FMT
"] exited", pid
);
1206 log_warning("worker ["PID_FMT
"] exited with return code %i", pid
, WEXITSTATUS(status
));
1207 } else if (WIFSIGNALED(status
)) {
1208 log_warning("worker ["PID_FMT
"] terminated by signal %i (%s)", pid
, WTERMSIG(status
), strsignal(WTERMSIG(status
)));
1209 } else if (WIFSTOPPED(status
)) {
1210 log_info("worker ["PID_FMT
"] stopped", pid
);
1212 } else if (WIFCONTINUED(status
)) {
1213 log_info("worker ["PID_FMT
"] continued", pid
);
1216 log_warning("worker ["PID_FMT
"] exit with status 0x%04x", pid
, status
);
1218 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) {
1219 if (worker
->event
) {
1220 log_error("worker ["PID_FMT
"] failed while handling '%s'", pid
, worker
->event
->devpath
);
1221 /* delete state from disk */
1222 udev_device_delete_db(worker
->event
->dev
);
1223 udev_device_tag_index(worker
->event
->dev
, NULL
, false);
1224 /* forward kernel event without amending it */
1225 udev_monitor_send_device(manager
->monitor
, NULL
, worker
->event
->dev_kernel
);
1229 worker_free(worker
);
1232 /* we can start new workers, try to schedule events */
1233 event_queue_start(manager
);
1238 static int on_post(sd_event_source
*s
, void *userdata
) {
1239 Manager
*manager
= userdata
;
1244 if (udev_list_node_is_empty(&manager
->events
)) {
1245 /* no pending events */
1246 if (!hashmap_isempty(manager
->workers
)) {
1247 /* there are idle workers */
1248 log_debug("cleanup idle workers");
1249 manager_kill_workers(manager
);
1252 if (manager
->exit
) {
1253 r
= sd_event_exit(manager
->event
, 0);
1256 } else if (manager
->cgroup
)
1257 /* cleanup possible left-over processes in our cgroup */
1258 cg_kill(SYSTEMD_CGROUP_CONTROLLER
, manager
->cgroup
, SIGKILL
, false, true, NULL
);
1265 static int listen_fds(int *rctrl
, int *rnetlink
) {
1266 _cleanup_udev_unref_
struct udev
*udev
= NULL
;
1267 int ctrl_fd
= -1, netlink_fd
= -1;
1273 n
= sd_listen_fds(true);
1277 for (fd
= SD_LISTEN_FDS_START
; fd
< n
+ SD_LISTEN_FDS_START
; fd
++) {
1278 if (sd_is_socket(fd
, AF_LOCAL
, SOCK_SEQPACKET
, -1)) {
1285 if (sd_is_socket(fd
, AF_NETLINK
, SOCK_RAW
, -1)) {
1286 if (netlink_fd
>= 0)
1296 _cleanup_udev_ctrl_unref_
struct udev_ctrl
*ctrl
= NULL
;
1302 ctrl
= udev_ctrl_new(udev
);
1304 return log_error_errno(EINVAL
, "error initializing udev control socket");
1306 r
= udev_ctrl_enable_receiving(ctrl
);
1308 return log_error_errno(EINVAL
, "error binding udev control socket");
1310 fd
= udev_ctrl_get_fd(ctrl
);
1312 return log_error_errno(EIO
, "could not get ctrl fd");
1314 ctrl_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1316 return log_error_errno(errno
, "could not dup ctrl fd: %m");
1319 if (netlink_fd
< 0) {
1320 _cleanup_udev_monitor_unref_
struct udev_monitor
*monitor
= NULL
;
1328 monitor
= udev_monitor_new_from_netlink(udev
, "kernel");
1330 return log_error_errno(EINVAL
, "error initializing netlink socket");
1332 (void) udev_monitor_set_receive_buffer_size(monitor
, 128 * 1024 * 1024);
1334 r
= udev_monitor_enable_receiving(monitor
);
1336 return log_error_errno(EINVAL
, "error binding netlink socket");
1338 fd
= udev_monitor_get_fd(monitor
);
1340 return log_error_errno(netlink_fd
, "could not get uevent fd: %m");
1342 netlink_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1344 return log_error_errno(errno
, "could not dup netlink fd: %m");
1348 *rnetlink
= netlink_fd
;
1354 * read the kernel command line, in case we need to get into debug mode
1355 * udev.log-priority=<level> syslog priority
1356 * udev.children-max=<number of workers> events are fully serialized if set to 1
1357 * udev.exec-delay=<number of seconds> delay execution of every executed program
1358 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
1360 static int parse_proc_cmdline_item(const char *key
, const char *value
) {
1361 const char *full_key
= key
;
1369 if (startswith(key
, "rd."))
1370 key
+= strlen("rd.");
1372 if (startswith(key
, "udev."))
1373 key
+= strlen("udev.");
1377 if (streq(key
, "log-priority")) {
1380 prio
= util_log_priority(value
);
1383 log_set_max_level(prio
);
1384 } else if (streq(key
, "children-max")) {
1385 r
= safe_atou(value
, &arg_children_max
);
1388 } else if (streq(key
, "exec-delay")) {
1389 r
= safe_atoi(value
, &arg_exec_delay
);
1392 } else if (streq(key
, "event-timeout")) {
1393 r
= safe_atou64(value
, &arg_event_timeout_usec
);
1396 arg_event_timeout_usec
*= USEC_PER_SEC
;
1397 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1402 log_warning("invalid %s ignored: %s", full_key
, value
);
1406 static void help(void) {
1407 printf("%s [OPTIONS...]\n\n"
1408 "Manages devices.\n\n"
1409 " -h --help Print this message\n"
1410 " --version Print version of the program\n"
1411 " --daemon Detach and run in the background\n"
1412 " --debug Enable debug output\n"
1413 " --children-max=INT Set maximum number of workers\n"
1414 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1415 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1416 " --resolve-names=early|late|never\n"
1417 " When to resolve users and groups\n"
1418 , program_invocation_short_name
);
1421 static int parse_argv(int argc
, char *argv
[]) {
1422 static const struct option options
[] = {
1423 { "daemon", no_argument
, NULL
, 'd' },
1424 { "debug", no_argument
, NULL
, 'D' },
1425 { "children-max", required_argument
, NULL
, 'c' },
1426 { "exec-delay", required_argument
, NULL
, 'e' },
1427 { "event-timeout", required_argument
, NULL
, 't' },
1428 { "resolve-names", required_argument
, NULL
, 'N' },
1429 { "help", no_argument
, NULL
, 'h' },
1430 { "version", no_argument
, NULL
, 'V' },
1439 while ((c
= getopt_long(argc
, argv
, "c:de:DtN:hV", options
, NULL
)) >= 0) {
1445 arg_daemonize
= true;
1448 r
= safe_atou(optarg
, &arg_children_max
);
1450 log_warning("Invalid --children-max ignored: %s", optarg
);
1453 r
= safe_atoi(optarg
, &arg_exec_delay
);
1455 log_warning("Invalid --exec-delay ignored: %s", optarg
);
1458 r
= safe_atou64(optarg
, &arg_event_timeout_usec
);
1460 log_warning("Invalid --event-timeout ignored: %s", optarg
);
1462 arg_event_timeout_usec
*= USEC_PER_SEC
;
1463 arg_event_timeout_warn_usec
= (arg_event_timeout_usec
/ 3) ? : 1;
1470 if (streq(optarg
, "early")) {
1471 arg_resolve_names
= 1;
1472 } else if (streq(optarg
, "late")) {
1473 arg_resolve_names
= 0;
1474 } else if (streq(optarg
, "never")) {
1475 arg_resolve_names
= -1;
1477 log_error("resolve-names must be early, late or never");
1485 printf("%s\n", VERSION
);
1490 assert_not_reached("Unhandled option");
1498 static int manager_new(Manager
**ret
, int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1499 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1500 int r
, fd_worker
, one
= 1;
1503 assert(fd_ctrl
>= 0);
1504 assert(fd_uevent
>= 0);
1506 manager
= new0(Manager
, 1);
1510 manager
->fd_inotify
= -1;
1511 manager
->worker_watch
[WRITE_END
] = -1;
1512 manager
->worker_watch
[READ_END
] = -1;
1514 manager
->udev
= udev_new();
1516 return log_error_errno(errno
, "could not allocate udev context: %m");
1518 udev_builtin_init(manager
->udev
);
1520 manager
->rules
= udev_rules_new(manager
->udev
, arg_resolve_names
);
1521 if (!manager
->rules
)
1522 return log_error_errno(ENOMEM
, "error reading rules");
1524 udev_list_node_init(&manager
->events
);
1525 udev_list_init(manager
->udev
, &manager
->properties
, true);
1527 manager
->cgroup
= cgroup
;
1529 manager
->ctrl
= udev_ctrl_new_from_fd(manager
->udev
, fd_ctrl
);
1531 return log_error_errno(EINVAL
, "error taking over udev control socket");
1533 manager
->monitor
= udev_monitor_new_from_netlink_fd(manager
->udev
, "kernel", fd_uevent
);
1534 if (!manager
->monitor
)
1535 return log_error_errno(EINVAL
, "error taking over netlink socket");
1537 /* unnamed socket from workers to the main daemon */
1538 r
= socketpair(AF_LOCAL
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, manager
->worker_watch
);
1540 return log_error_errno(errno
, "error creating socketpair: %m");
1542 fd_worker
= manager
->worker_watch
[READ_END
];
1544 r
= setsockopt(fd_worker
, SOL_SOCKET
, SO_PASSCRED
, &one
, sizeof(one
));
1546 return log_error_errno(errno
, "could not enable SO_PASSCRED: %m");
1548 manager
->fd_inotify
= udev_watch_init(manager
->udev
);
1549 if (manager
->fd_inotify
< 0)
1550 return log_error_errno(ENOMEM
, "error initializing inotify");
1552 udev_watch_restore(manager
->udev
);
1554 /* block and listen to all signals on signalfd */
1555 assert_se(sigprocmask_many(SIG_BLOCK
, NULL
, SIGTERM
, SIGINT
, SIGHUP
, SIGCHLD
, -1) >= 0);
1557 r
= sd_event_default(&manager
->event
);
1559 return log_error_errno(errno
, "could not allocate event loop: %m");
1561 r
= sd_event_add_signal(manager
->event
, NULL
, SIGINT
, on_sigterm
, manager
);
1563 return log_error_errno(r
, "error creating sigint event source: %m");
1565 r
= sd_event_add_signal(manager
->event
, NULL
, SIGTERM
, on_sigterm
, manager
);
1567 return log_error_errno(r
, "error creating sigterm event source: %m");
1569 r
= sd_event_add_signal(manager
->event
, NULL
, SIGHUP
, on_sighup
, manager
);
1571 return log_error_errno(r
, "error creating sighup event source: %m");
1573 r
= sd_event_add_signal(manager
->event
, NULL
, SIGCHLD
, on_sigchld
, manager
);
1575 return log_error_errno(r
, "error creating sigchld event source: %m");
1577 r
= sd_event_set_watchdog(manager
->event
, true);
1579 return log_error_errno(r
, "error creating watchdog event source: %m");
1581 r
= sd_event_add_io(manager
->event
, &manager
->ctrl_event
, fd_ctrl
, EPOLLIN
, on_ctrl_msg
, manager
);
1583 return log_error_errno(r
, "error creating ctrl event source: %m");
1585 /* This needs to be after the inotify and uevent handling, to make sure
1586 * that the ping is send back after fully processing the pending uevents
1587 * (including the synthetic ones we may create due to inotify events).
1589 r
= sd_event_source_set_priority(manager
->ctrl_event
, SD_EVENT_PRIORITY_IDLE
);
1591 return log_error_errno(r
, "cold not set IDLE event priority for ctrl event source: %m");
1593 r
= sd_event_add_io(manager
->event
, &manager
->inotify_event
, manager
->fd_inotify
, EPOLLIN
, on_inotify
, manager
);
1595 return log_error_errno(r
, "error creating inotify event source: %m");
1597 r
= sd_event_add_io(manager
->event
, &manager
->uevent_event
, fd_uevent
, EPOLLIN
, on_uevent
, manager
);
1599 return log_error_errno(r
, "error creating uevent event source: %m");
1601 r
= sd_event_add_io(manager
->event
, NULL
, fd_worker
, EPOLLIN
, on_worker
, manager
);
1603 return log_error_errno(r
, "error creating worker event source: %m");
1605 r
= sd_event_add_post(manager
->event
, NULL
, on_post
, manager
);
1607 return log_error_errno(r
, "error creating post event source: %m");
1615 static int run(int fd_ctrl
, int fd_uevent
, const char *cgroup
) {
1616 _cleanup_(manager_freep
) Manager
*manager
= NULL
;
1619 r
= manager_new(&manager
, fd_ctrl
, fd_uevent
, cgroup
);
1621 r
= log_error_errno(r
, "failed to allocate manager object: %m");
1625 r
= udev_rules_apply_static_dev_perms(manager
->rules
);
1627 log_error_errno(r
, "failed to apply permissions on static device nodes: %m");
1629 (void) sd_notify(false,
1631 "STATUS=Processing...");
1633 r
= sd_event_loop(manager
->event
);
1635 log_error_errno(r
, "event loop failed: %m");
1639 sd_event_get_exit_code(manager
->event
, &r
);
1644 "STATUS=Shutting down...");
1646 udev_ctrl_cleanup(manager
->ctrl
);
1650 int main(int argc
, char *argv
[]) {
1651 _cleanup_free_
char *cgroup
= NULL
;
1652 int r
, fd_ctrl
, fd_uevent
;
1654 log_set_target(LOG_TARGET_AUTO
);
1655 log_parse_environment();
1658 r
= parse_argv(argc
, argv
);
1662 r
= parse_proc_cmdline(parse_proc_cmdline_item
);
1664 log_warning_errno(r
, "failed to parse kernel command line, ignoring: %m");
1667 log_set_target(LOG_TARGET_CONSOLE
);
1668 log_set_max_level(LOG_DEBUG
);
1671 if (getuid() != 0) {
1672 r
= log_error_errno(EPERM
, "root privileges required");
1676 if (arg_children_max
== 0) {
1679 arg_children_max
= 8;
1681 if (sched_getaffinity(0, sizeof (cpu_set
), &cpu_set
) == 0) {
1682 arg_children_max
+= CPU_COUNT(&cpu_set
) * 2;
1685 log_debug("set children_max to %u", arg_children_max
);
1688 /* set umask before creating any file/directory */
1691 r
= log_error_errno(errno
, "could not change dir to /: %m");
1697 r
= mac_selinux_init("/dev");
1699 log_error_errno(r
, "could not initialize labelling: %m");
1703 r
= mkdir("/run/udev", 0755);
1704 if (r
< 0 && errno
!= EEXIST
) {
1705 r
= log_error_errno(errno
, "could not create /run/udev: %m");
1709 dev_setup(NULL
, UID_INVALID
, GID_INVALID
);
1711 if (getppid() == 1) {
1712 /* get our own cgroup, we regularly kill everything udev has left behind
1713 we only do this on systemd systems, and only if we are directly spawned
1714 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1715 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &cgroup
);
1718 log_debug_errno(r
, "did not find dedicated cgroup: %m");
1720 log_warning_errno(r
, "failed to get cgroup: %m");
1724 r
= listen_fds(&fd_ctrl
, &fd_uevent
);
1726 r
= log_error_errno(r
, "could not listen on fds: %m");
1730 if (arg_daemonize
) {
1733 log_info("starting version " VERSION
);
1735 /* connect /dev/null to stdin, stdout, stderr */
1736 if (log_get_max_level() < LOG_DEBUG
)
1737 (void) make_null_stdio();
1744 r
= log_error_errno(errno
, "fork of daemon failed: %m");
1747 mac_selinux_finish();
1749 _exit(EXIT_SUCCESS
);
1754 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
1757 r
= run(fd_ctrl
, fd_uevent
, cgroup
);
1760 mac_selinux_finish();
1762 return r
< 0 ? EXIT_FAILURE
: EXIT_SUCCESS
;