]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/udev/udevd.c
udev: switch to systemd logging functions
[thirdparty/systemd.git] / src / udev / udevd.c
1 /*
2 * Copyright (C) 2004-2012 Kay Sievers <kay.sievers@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <ctype.h>
30 #include <fcntl.h>
31 #include <time.h>
32 #include <getopt.h>
33 #include <dirent.h>
34 #include <sys/time.h>
35 #include <sys/prctl.h>
36 #include <sys/socket.h>
37 #include <sys/un.h>
38 #include <sys/signalfd.h>
39 #include <sys/epoll.h>
40 #include <sys/poll.h>
41 #include <sys/wait.h>
42 #include <sys/stat.h>
43 #include <sys/ioctl.h>
44 #include <sys/inotify.h>
45 #include <sys/utsname.h>
46
47 #include "udev.h"
48 #include "sd-daemon.h"
49
50 static bool debug;
51
52 void udev_main_log(struct udev *udev, int priority,
53 const char *file, int line, const char *fn,
54 const char *format, va_list args)
55 {
56 log_metav(priority, file, line, fn, format, args);
57 }
58
59 static struct udev_rules *rules;
60 static struct udev_queue_export *udev_queue_export;
61 static struct udev_ctrl *udev_ctrl;
62 static struct udev_monitor *monitor;
63 static int worker_watch[2] = { -1, -1 };
64 static int fd_signal = -1;
65 static int fd_ep = -1;
66 static int fd_inotify = -1;
67 static bool stop_exec_queue;
68 static bool reload;
69 static int children;
70 static int children_max;
71 static int exec_delay;
72 static sigset_t sigmask_orig;
73 static UDEV_LIST(event_list);
74 static UDEV_LIST(worker_list);
75 static bool udev_exit;
76
77 enum event_state {
78 EVENT_UNDEF,
79 EVENT_QUEUED,
80 EVENT_RUNNING,
81 };
82
83 struct event {
84 struct udev_list_node node;
85 struct udev *udev;
86 struct udev_device *dev;
87 enum event_state state;
88 int exitcode;
89 unsigned long long int delaying_seqnum;
90 unsigned long long int seqnum;
91 const char *devpath;
92 size_t devpath_len;
93 const char *devpath_old;
94 dev_t devnum;
95 bool is_block;
96 int ifindex;
97 };
98
99 static struct event *node_to_event(struct udev_list_node *node)
100 {
101 char *event;
102
103 event = (char *)node;
104 event -= offsetof(struct event, node);
105 return (struct event *)event;
106 }
107
108 static void event_queue_cleanup(struct udev *udev, enum event_state type);
109
110 enum worker_state {
111 WORKER_UNDEF,
112 WORKER_RUNNING,
113 WORKER_IDLE,
114 WORKER_KILLED,
115 };
116
117 struct worker {
118 struct udev_list_node node;
119 struct udev *udev;
120 int refcount;
121 pid_t pid;
122 struct udev_monitor *monitor;
123 enum worker_state state;
124 struct event *event;
125 unsigned long long event_start_usec;
126 };
127
128 /* passed from worker to main process */
129 struct worker_message {
130 pid_t pid;
131 int exitcode;
132 };
133
134 static struct worker *node_to_worker(struct udev_list_node *node)
135 {
136 char *worker;
137
138 worker = (char *)node;
139 worker -= offsetof(struct worker, node);
140 return (struct worker *)worker;
141 }
142
143 static void event_queue_delete(struct event *event, bool export)
144 {
145 udev_list_node_remove(&event->node);
146
147 if (export) {
148 udev_queue_export_device_finished(udev_queue_export, event->dev);
149 log_debug("seq %llu done with %i\n", udev_device_get_seqnum(event->dev), event->exitcode);
150 }
151 udev_device_unref(event->dev);
152 free(event);
153 }
154
155 static struct worker *worker_ref(struct worker *worker)
156 {
157 worker->refcount++;
158 return worker;
159 }
160
161 static void worker_cleanup(struct worker *worker)
162 {
163 udev_list_node_remove(&worker->node);
164 udev_monitor_unref(worker->monitor);
165 children--;
166 free(worker);
167 }
168
169 static void worker_unref(struct worker *worker)
170 {
171 worker->refcount--;
172 if (worker->refcount > 0)
173 return;
174 log_debug("worker [%u] cleaned up\n", worker->pid);
175 worker_cleanup(worker);
176 }
177
178 static void worker_list_cleanup(struct udev *udev)
179 {
180 struct udev_list_node *loop, *tmp;
181
182 udev_list_node_foreach_safe(loop, tmp, &worker_list) {
183 struct worker *worker = node_to_worker(loop);
184
185 worker_cleanup(worker);
186 }
187 }
188
189 static void worker_new(struct event *event)
190 {
191 struct udev *udev = event->udev;
192 struct worker *worker;
193 struct udev_monitor *worker_monitor;
194 pid_t pid;
195
196 /* listen for new events */
197 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
198 if (worker_monitor == NULL)
199 return;
200 /* allow the main daemon netlink address to send devices to the worker */
201 udev_monitor_allow_unicast_sender(worker_monitor, monitor);
202 udev_monitor_enable_receiving(worker_monitor);
203
204 worker = calloc(1, sizeof(struct worker));
205 if (worker == NULL) {
206 udev_monitor_unref(worker_monitor);
207 return;
208 }
209 /* worker + event reference */
210 worker->refcount = 2;
211 worker->udev = udev;
212
213 pid = fork();
214 switch (pid) {
215 case 0: {
216 struct udev_device *dev = NULL;
217 int fd_monitor;
218 struct epoll_event ep_signal, ep_monitor;
219 sigset_t mask;
220 int rc = EXIT_SUCCESS;
221
222 /* take initial device from queue */
223 dev = event->dev;
224 event->dev = NULL;
225
226 free(worker);
227 worker_list_cleanup(udev);
228 event_queue_cleanup(udev, EVENT_UNDEF);
229 udev_queue_export_unref(udev_queue_export);
230 udev_monitor_unref(monitor);
231 udev_ctrl_unref(udev_ctrl);
232 close(fd_signal);
233 close(fd_ep);
234 close(worker_watch[READ_END]);
235
236 sigfillset(&mask);
237 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
238 if (fd_signal < 0) {
239 log_error("error creating signalfd %m\n");
240 rc = 2;
241 goto out;
242 }
243
244 fd_ep = epoll_create1(EPOLL_CLOEXEC);
245 if (fd_ep < 0) {
246 log_error("error creating epoll fd: %m\n");
247 rc = 3;
248 goto out;
249 }
250
251 memset(&ep_signal, 0, sizeof(struct epoll_event));
252 ep_signal.events = EPOLLIN;
253 ep_signal.data.fd = fd_signal;
254
255 fd_monitor = udev_monitor_get_fd(worker_monitor);
256 memset(&ep_monitor, 0, sizeof(struct epoll_event));
257 ep_monitor.events = EPOLLIN;
258 ep_monitor.data.fd = fd_monitor;
259
260 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
261 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
262 log_error("fail to add fds to epoll: %m\n");
263 rc = 4;
264 goto out;
265 }
266
267 /* request TERM signal if parent exits */
268 prctl(PR_SET_PDEATHSIG, SIGTERM);
269
270 for (;;) {
271 struct udev_event *udev_event;
272 struct worker_message msg;
273 int err;
274
275 log_debug("seq %llu running\n", udev_device_get_seqnum(dev));
276 udev_event = udev_event_new(dev);
277 if (udev_event == NULL) {
278 rc = 5;
279 goto out;
280 }
281
282 /* needed for SIGCHLD/SIGTERM in spawn() */
283 udev_event->fd_signal = fd_signal;
284
285 if (exec_delay > 0)
286 udev_event->exec_delay = exec_delay;
287
288 /* apply rules, create node, symlinks */
289 err = udev_event_execute_rules(udev_event, rules, &sigmask_orig);
290
291 if (err == 0)
292 udev_event_execute_run(udev_event, &sigmask_orig);
293
294 /* apply/restore inotify watch */
295 if (err == 0 && udev_event->inotify_watch) {
296 udev_watch_begin(udev, dev);
297 udev_device_update_db(dev);
298 }
299
300 /* send processed event back to libudev listeners */
301 udev_monitor_send_device(worker_monitor, NULL, dev);
302
303 /* send udevd the result of the event execution */
304 memset(&msg, 0, sizeof(struct worker_message));
305 if (err != 0)
306 msg.exitcode = err;
307 msg.pid = getpid();
308 send(worker_watch[WRITE_END], &msg, sizeof(struct worker_message), 0);
309
310 log_debug("seq %llu processed with %i\n", udev_device_get_seqnum(dev), err);
311
312 udev_device_unref(dev);
313 dev = NULL;
314
315 if (udev_event->sigterm) {
316 udev_event_unref(udev_event);
317 goto out;
318 }
319
320 udev_event_unref(udev_event);
321
322 /* wait for more device messages from main udevd, or term signal */
323 while (dev == NULL) {
324 struct epoll_event ev[4];
325 int fdcount;
326 int i;
327
328 fdcount = epoll_wait(fd_ep, ev, ARRAY_SIZE(ev), -1);
329 if (fdcount < 0) {
330 if (errno == EINTR)
331 continue;
332 err = -errno;
333 log_error("failed to poll: %m\n");
334 goto out;
335 }
336
337 for (i = 0; i < fdcount; i++) {
338 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
339 dev = udev_monitor_receive_device(worker_monitor);
340 break;
341 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
342 struct signalfd_siginfo fdsi;
343 ssize_t size;
344
345 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
346 if (size != sizeof(struct signalfd_siginfo))
347 continue;
348 switch (fdsi.ssi_signo) {
349 case SIGTERM:
350 goto out;
351 }
352 }
353 }
354 }
355 }
356 out:
357 udev_device_unref(dev);
358 if (fd_signal >= 0)
359 close(fd_signal);
360 if (fd_ep >= 0)
361 close(fd_ep);
362 close(fd_inotify);
363 close(worker_watch[WRITE_END]);
364 udev_rules_unref(rules);
365 udev_builtin_exit(udev);
366 udev_monitor_unref(worker_monitor);
367 udev_unref(udev);
368 log_close();
369 exit(rc);
370 }
371 case -1:
372 udev_monitor_unref(worker_monitor);
373 event->state = EVENT_QUEUED;
374 free(worker);
375 log_error("fork of child failed: %m\n");
376 break;
377 default:
378 /* close monitor, but keep address around */
379 udev_monitor_disconnect(worker_monitor);
380 worker->monitor = worker_monitor;
381 worker->pid = pid;
382 worker->state = WORKER_RUNNING;
383 worker->event_start_usec = now_usec();
384 worker->event = event;
385 event->state = EVENT_RUNNING;
386 udev_list_node_append(&worker->node, &worker_list);
387 children++;
388 log_debug("seq %llu forked new worker [%u]\n", udev_device_get_seqnum(event->dev), pid);
389 break;
390 }
391 }
392
393 static void event_run(struct event *event)
394 {
395 struct udev_list_node *loop;
396
397 udev_list_node_foreach(loop, &worker_list) {
398 struct worker *worker = node_to_worker(loop);
399 ssize_t count;
400
401 if (worker->state != WORKER_IDLE)
402 continue;
403
404 count = udev_monitor_send_device(monitor, worker->monitor, event->dev);
405 if (count < 0) {
406 log_error("worker [%u] did not accept message %zi (%m), kill it\n", worker->pid, count);
407 kill(worker->pid, SIGKILL);
408 worker->state = WORKER_KILLED;
409 continue;
410 }
411 worker_ref(worker);
412 worker->event = event;
413 worker->state = WORKER_RUNNING;
414 worker->event_start_usec = now_usec();
415 event->state = EVENT_RUNNING;
416 return;
417 }
418
419 if (children >= children_max) {
420 if (children_max > 1)
421 log_debug("maximum number (%i) of children reached\n", children);
422 return;
423 }
424
425 /* start new worker and pass initial device */
426 worker_new(event);
427 }
428
429 static int event_queue_insert(struct udev_device *dev)
430 {
431 struct event *event;
432
433 event = calloc(1, sizeof(struct event));
434 if (event == NULL)
435 return -1;
436
437 event->udev = udev_device_get_udev(dev);
438 event->dev = dev;
439 event->seqnum = udev_device_get_seqnum(dev);
440 event->devpath = udev_device_get_devpath(dev);
441 event->devpath_len = strlen(event->devpath);
442 event->devpath_old = udev_device_get_devpath_old(dev);
443 event->devnum = udev_device_get_devnum(dev);
444 event->is_block = (strcmp("block", udev_device_get_subsystem(dev)) == 0);
445 event->ifindex = udev_device_get_ifindex(dev);
446
447 udev_queue_export_device_queued(udev_queue_export, dev);
448 log_debug("seq %llu queued, '%s' '%s'\n", udev_device_get_seqnum(dev),
449 udev_device_get_action(dev), udev_device_get_subsystem(dev));
450
451 event->state = EVENT_QUEUED;
452 udev_list_node_append(&event->node, &event_list);
453 return 0;
454 }
455
456 static void worker_kill(struct udev *udev, int retain)
457 {
458 struct udev_list_node *loop;
459 int max;
460
461 if (children <= retain)
462 return;
463
464 max = children - retain;
465
466 udev_list_node_foreach(loop, &worker_list) {
467 struct worker *worker = node_to_worker(loop);
468
469 if (max-- <= 0)
470 break;
471
472 if (worker->state == WORKER_KILLED)
473 continue;
474
475 worker->state = WORKER_KILLED;
476 kill(worker->pid, SIGTERM);
477 }
478 }
479
480 /* lookup event for identical, parent, child device */
481 static bool is_devpath_busy(struct event *event)
482 {
483 struct udev_list_node *loop;
484 size_t common;
485
486 /* check if queue contains events we depend on */
487 udev_list_node_foreach(loop, &event_list) {
488 struct event *loop_event = node_to_event(loop);
489
490 /* we already found a later event, earlier can not block us, no need to check again */
491 if (loop_event->seqnum < event->delaying_seqnum)
492 continue;
493
494 /* event we checked earlier still exists, no need to check again */
495 if (loop_event->seqnum == event->delaying_seqnum)
496 return true;
497
498 /* found ourself, no later event can block us */
499 if (loop_event->seqnum >= event->seqnum)
500 break;
501
502 /* check major/minor */
503 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
504 return true;
505
506 /* check network device ifindex */
507 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
508 return true;
509
510 /* check our old name */
511 if (event->devpath_old != NULL && strcmp(loop_event->devpath, event->devpath_old) == 0) {
512 event->delaying_seqnum = loop_event->seqnum;
513 return true;
514 }
515
516 /* compare devpath */
517 common = MIN(loop_event->devpath_len, event->devpath_len);
518
519 /* one devpath is contained in the other? */
520 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
521 continue;
522
523 /* identical device event found */
524 if (loop_event->devpath_len == event->devpath_len) {
525 /* devices names might have changed/swapped in the meantime */
526 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
527 continue;
528 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
529 continue;
530 event->delaying_seqnum = loop_event->seqnum;
531 return true;
532 }
533
534 /* parent device event found */
535 if (event->devpath[common] == '/') {
536 event->delaying_seqnum = loop_event->seqnum;
537 return true;
538 }
539
540 /* child device event found */
541 if (loop_event->devpath[common] == '/') {
542 event->delaying_seqnum = loop_event->seqnum;
543 return true;
544 }
545
546 /* no matching device */
547 continue;
548 }
549
550 return false;
551 }
552
553 static void event_queue_start(struct udev *udev)
554 {
555 struct udev_list_node *loop;
556
557 udev_list_node_foreach(loop, &event_list) {
558 struct event *event = node_to_event(loop);
559
560 if (event->state != EVENT_QUEUED)
561 continue;
562
563 /* do not start event if parent or child event is still running */
564 if (is_devpath_busy(event))
565 continue;
566
567 event_run(event);
568 }
569 }
570
571 static void event_queue_cleanup(struct udev *udev, enum event_state match_type)
572 {
573 struct udev_list_node *loop, *tmp;
574
575 udev_list_node_foreach_safe(loop, tmp, &event_list) {
576 struct event *event = node_to_event(loop);
577
578 if (match_type != EVENT_UNDEF && match_type != event->state)
579 continue;
580
581 event_queue_delete(event, false);
582 }
583 }
584
585 static void worker_returned(int fd_worker)
586 {
587 for (;;) {
588 struct worker_message msg;
589 ssize_t size;
590 struct udev_list_node *loop;
591
592 size = recv(fd_worker, &msg, sizeof(struct worker_message), MSG_DONTWAIT);
593 if (size != sizeof(struct worker_message))
594 break;
595
596 /* lookup worker who sent the signal */
597 udev_list_node_foreach(loop, &worker_list) {
598 struct worker *worker = node_to_worker(loop);
599
600 if (worker->pid != msg.pid)
601 continue;
602
603 /* worker returned */
604 if (worker->event) {
605 worker->event->exitcode = msg.exitcode;
606 event_queue_delete(worker->event, true);
607 worker->event = NULL;
608 }
609 if (worker->state != WORKER_KILLED)
610 worker->state = WORKER_IDLE;
611 worker_unref(worker);
612 break;
613 }
614 }
615 }
616
617 /* receive the udevd message from userspace */
618 static struct udev_ctrl_connection *handle_ctrl_msg(struct udev_ctrl *uctrl)
619 {
620 struct udev *udev = udev_ctrl_get_udev(uctrl);
621 struct udev_ctrl_connection *ctrl_conn;
622 struct udev_ctrl_msg *ctrl_msg = NULL;
623 const char *str;
624 int i;
625
626 ctrl_conn = udev_ctrl_get_connection(uctrl);
627 if (ctrl_conn == NULL)
628 goto out;
629
630 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
631 if (ctrl_msg == NULL)
632 goto out;
633
634 i = udev_ctrl_get_set_log_level(ctrl_msg);
635 if (i >= 0) {
636 log_debug("udevd message (SET_LOG_PRIORITY) received, log_priority=%i\n", i);
637 log_set_max_level(i);
638 udev_set_log_priority(udev, i);
639 worker_kill(udev, 0);
640 }
641
642 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
643 log_debug("udevd message (STOP_EXEC_QUEUE) received\n");
644 stop_exec_queue = true;
645 }
646
647 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
648 log_debug("udevd message (START_EXEC_QUEUE) received\n");
649 stop_exec_queue = false;
650 }
651
652 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
653 log_debug("udevd message (RELOAD) received\n");
654 reload = true;
655 }
656
657 str = udev_ctrl_get_set_env(ctrl_msg);
658 if (str != NULL) {
659 char *key;
660
661 key = strdup(str);
662 if (key != NULL) {
663 char *val;
664
665 val = strchr(key, '=');
666 if (val != NULL) {
667 val[0] = '\0';
668 val = &val[1];
669 if (val[0] == '\0') {
670 log_debug("udevd message (ENV) received, unset '%s'\n", key);
671 udev_add_property(udev, key, NULL);
672 } else {
673 log_debug("udevd message (ENV) received, set '%s=%s'\n", key, val);
674 udev_add_property(udev, key, val);
675 }
676 } else {
677 log_error("wrong key format '%s'\n", key);
678 }
679 free(key);
680 }
681 worker_kill(udev, 0);
682 }
683
684 i = udev_ctrl_get_set_children_max(ctrl_msg);
685 if (i >= 0) {
686 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i\n", i);
687 children_max = i;
688 }
689
690 if (udev_ctrl_get_ping(ctrl_msg) > 0)
691 log_debug("udevd message (SYNC) received\n");
692
693 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
694 log_debug("udevd message (EXIT) received\n");
695 udev_exit = true;
696 /* keep reference to block the client until we exit */
697 udev_ctrl_connection_ref(ctrl_conn);
698 }
699 out:
700 udev_ctrl_msg_unref(ctrl_msg);
701 return udev_ctrl_connection_unref(ctrl_conn);
702 }
703
704 /* read inotify messages */
705 static int handle_inotify(struct udev *udev)
706 {
707 int nbytes, pos;
708 char *buf;
709 struct inotify_event *ev;
710
711 if ((ioctl(fd_inotify, FIONREAD, &nbytes) < 0) || (nbytes <= 0))
712 return 0;
713
714 buf = malloc(nbytes);
715 if (buf == NULL) {
716 log_error("error getting buffer for inotify\n");
717 return -1;
718 }
719
720 nbytes = read(fd_inotify, buf, nbytes);
721
722 for (pos = 0; pos < nbytes; pos += sizeof(struct inotify_event) + ev->len) {
723 struct udev_device *dev;
724
725 ev = (struct inotify_event *)(buf + pos);
726 dev = udev_watch_lookup(udev, ev->wd);
727 if (dev != NULL) {
728 log_debug("inotify event: %x for %s\n", ev->mask, udev_device_get_devnode(dev));
729 if (ev->mask & IN_CLOSE_WRITE) {
730 char filename[UTIL_PATH_SIZE];
731 int fd;
732
733 log_debug("device %s closed, synthesising 'change'\n", udev_device_get_devnode(dev));
734 util_strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
735 fd = open(filename, O_WRONLY);
736 if (fd >= 0) {
737 if (write(fd, "change", 6) < 0)
738 log_debug("error writing uevent: %m\n");
739 close(fd);
740 }
741 }
742 if (ev->mask & IN_IGNORED)
743 udev_watch_end(udev, dev);
744
745 udev_device_unref(dev);
746 }
747
748 }
749
750 free(buf);
751 return 0;
752 }
753
754 static void handle_signal(struct udev *udev, int signo)
755 {
756 switch (signo) {
757 case SIGINT:
758 case SIGTERM:
759 udev_exit = true;
760 break;
761 case SIGCHLD:
762 for (;;) {
763 pid_t pid;
764 int status;
765 struct udev_list_node *loop, *tmp;
766
767 pid = waitpid(-1, &status, WNOHANG);
768 if (pid <= 0)
769 break;
770
771 udev_list_node_foreach_safe(loop, tmp, &worker_list) {
772 struct worker *worker = node_to_worker(loop);
773
774 if (worker->pid != pid)
775 continue;
776 log_debug("worker [%u] exit\n", pid);
777
778 if (WIFEXITED(status)) {
779 if (WEXITSTATUS(status) != 0)
780 log_error("worker [%u] exit with return code %i\n", pid, WEXITSTATUS(status));
781 } else if (WIFSIGNALED(status)) {
782 log_error("worker [%u] terminated by signal %i (%s)\n",
783 pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
784 } else if (WIFSTOPPED(status)) {
785 log_error("worker [%u] stopped\n", pid);
786 } else if (WIFCONTINUED(status)) {
787 log_error("worker [%u] continued\n", pid);
788 } else {
789 log_error("worker [%u] exit with status 0x%04x\n", pid, status);
790 }
791
792 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
793 if (worker->event) {
794 log_error("worker [%u] failed while handling '%s'\n",
795 pid, worker->event->devpath);
796 worker->event->exitcode = -32;
797 event_queue_delete(worker->event, true);
798 /* drop reference taken for state 'running' */
799 worker_unref(worker);
800 }
801 }
802 worker_unref(worker);
803 break;
804 }
805 }
806 break;
807 case SIGHUP:
808 reload = true;
809 break;
810 }
811 }
812
813 static void static_dev_create_from_modules(struct udev *udev)
814 {
815 struct utsname kernel;
816 char modules[UTIL_PATH_SIZE];
817 char buf[4096];
818 FILE *f;
819
820 uname(&kernel);
821 util_strscpyl(modules, sizeof(modules), "/lib/modules/", kernel.release, "/modules.devname", NULL);
822 f = fopen(modules, "r");
823 if (f == NULL)
824 return;
825
826 while (fgets(buf, sizeof(buf), f) != NULL) {
827 char *s;
828 const char *modname;
829 const char *devname;
830 const char *devno;
831 int maj, min;
832 char type;
833 mode_t mode;
834 char filename[UTIL_PATH_SIZE];
835
836 if (buf[0] == '#')
837 continue;
838
839 modname = buf;
840 s = strchr(modname, ' ');
841 if (s == NULL)
842 continue;
843 s[0] = '\0';
844
845 devname = &s[1];
846 s = strchr(devname, ' ');
847 if (s == NULL)
848 continue;
849 s[0] = '\0';
850
851 devno = &s[1];
852 s = strchr(devno, ' ');
853 if (s == NULL)
854 s = strchr(devno, '\n');
855 if (s != NULL)
856 s[0] = '\0';
857 if (sscanf(devno, "%c%u:%u", &type, &maj, &min) != 3)
858 continue;
859
860 if (type == 'c')
861 mode = S_IFCHR;
862 else if (type == 'b')
863 mode = S_IFBLK;
864 else
865 continue;
866
867 util_strscpyl(filename, sizeof(filename), udev_get_dev_path(udev), "/", devname, NULL);
868 util_create_path_selinux(udev, filename);
869 udev_selinux_setfscreatecon(udev, filename, mode);
870 log_debug("mknod '%s' %c%u:%u\n", filename, type, maj, min);
871 if (mknod(filename, mode, makedev(maj, min)) < 0 && errno == EEXIST)
872 utimensat(AT_FDCWD, filename, NULL, 0);
873 udev_selinux_resetfscreatecon(udev);
874 }
875
876 fclose(f);
877 }
878
879 static int copy_dev_dir(struct udev *udev, DIR *dir_from, DIR *dir_to, int maxdepth)
880 {
881 struct dirent *dent;
882
883 for (dent = readdir(dir_from); dent != NULL; dent = readdir(dir_from)) {
884 struct stat stats;
885
886 if (dent->d_name[0] == '.')
887 continue;
888 if (fstatat(dirfd(dir_from), dent->d_name, &stats, AT_SYMLINK_NOFOLLOW) != 0)
889 continue;
890
891 if (S_ISBLK(stats.st_mode) || S_ISCHR(stats.st_mode)) {
892 udev_selinux_setfscreateconat(udev, dirfd(dir_to), dent->d_name, stats.st_mode & 0777);
893 if (mknodat(dirfd(dir_to), dent->d_name, stats.st_mode, stats.st_rdev) == 0) {
894 fchmodat(dirfd(dir_to), dent->d_name, stats.st_mode & 0777, 0);
895 fchownat(dirfd(dir_to), dent->d_name, stats.st_uid, stats.st_gid, 0);
896 } else {
897 utimensat(dirfd(dir_to), dent->d_name, NULL, 0);
898 }
899 udev_selinux_resetfscreatecon(udev);
900 } else if (S_ISLNK(stats.st_mode)) {
901 char target[UTIL_PATH_SIZE];
902 ssize_t len;
903
904 len = readlinkat(dirfd(dir_from), dent->d_name, target, sizeof(target));
905 if (len <= 0 || len == (ssize_t)sizeof(target))
906 continue;
907 target[len] = '\0';
908 udev_selinux_setfscreateconat(udev, dirfd(dir_to), dent->d_name, S_IFLNK);
909 if (symlinkat(target, dirfd(dir_to), dent->d_name) < 0 && errno == EEXIST)
910 utimensat(dirfd(dir_to), dent->d_name, NULL, AT_SYMLINK_NOFOLLOW);
911 udev_selinux_resetfscreatecon(udev);
912 } else if (S_ISDIR(stats.st_mode)) {
913 DIR *dir2_from, *dir2_to;
914
915 if (maxdepth == 0)
916 continue;
917
918 udev_selinux_setfscreateconat(udev, dirfd(dir_to), dent->d_name, S_IFDIR|0755);
919 mkdirat(dirfd(dir_to), dent->d_name, 0755);
920 udev_selinux_resetfscreatecon(udev);
921
922 dir2_to = fdopendir(openat(dirfd(dir_to), dent->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC));
923 if (dir2_to == NULL)
924 continue;
925
926 dir2_from = fdopendir(openat(dirfd(dir_from), dent->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC));
927 if (dir2_from == NULL) {
928 closedir(dir2_to);
929 continue;
930 }
931
932 copy_dev_dir(udev, dir2_from, dir2_to, maxdepth-1);
933
934 closedir(dir2_to);
935 closedir(dir2_from);
936 }
937 }
938
939 return 0;
940 }
941
942 static void static_dev_create_links(struct udev *udev, DIR *dir)
943 {
944 struct stdlinks {
945 const char *link;
946 const char *target;
947 };
948 static const struct stdlinks stdlinks[] = {
949 { "core", "/proc/kcore" },
950 { "fd", "/proc/self/fd" },
951 { "stdin", "/proc/self/fd/0" },
952 { "stdout", "/proc/self/fd/1" },
953 { "stderr", "/proc/self/fd/2" },
954 };
955 unsigned int i;
956
957 for (i = 0; i < ARRAY_SIZE(stdlinks); i++) {
958 struct stat sb;
959
960 if (stat(stdlinks[i].target, &sb) == 0) {
961 udev_selinux_setfscreateconat(udev, dirfd(dir), stdlinks[i].link, S_IFLNK);
962 if (symlinkat(stdlinks[i].target, dirfd(dir), stdlinks[i].link) < 0 && errno == EEXIST)
963 utimensat(dirfd(dir), stdlinks[i].link, NULL, AT_SYMLINK_NOFOLLOW);
964 udev_selinux_resetfscreatecon(udev);
965 }
966 }
967 }
968
969 static void static_dev_create_from_devices(struct udev *udev, DIR *dir)
970 {
971 DIR *dir_from;
972
973 dir_from = opendir(UDEVLIBEXECDIR "/devices");
974 if (dir_from == NULL)
975 return;
976 copy_dev_dir(udev, dir_from, dir, 8);
977 closedir(dir_from);
978 }
979
980 static void static_dev_create(struct udev *udev)
981 {
982 DIR *dir;
983
984 dir = opendir(udev_get_dev_path(udev));
985 if (dir == NULL)
986 return;
987
988 static_dev_create_links(udev, dir);
989 static_dev_create_from_devices(udev, dir);
990
991 closedir(dir);
992 }
993
994 static int mem_size_mb(void)
995 {
996 FILE *f;
997 char buf[4096];
998 long int memsize = -1;
999
1000 f = fopen("/proc/meminfo", "r");
1001 if (f == NULL)
1002 return -1;
1003
1004 while (fgets(buf, sizeof(buf), f) != NULL) {
1005 long int value;
1006
1007 if (sscanf(buf, "MemTotal: %ld kB", &value) == 1) {
1008 memsize = value / 1024;
1009 break;
1010 }
1011 }
1012
1013 fclose(f);
1014 return memsize;
1015 }
1016
1017 static int convert_db(struct udev *udev)
1018 {
1019 char filename[UTIL_PATH_SIZE];
1020 FILE *f;
1021 struct udev_enumerate *udev_enumerate;
1022 struct udev_list_entry *list_entry;
1023
1024 /* current database */
1025 util_strscpyl(filename, sizeof(filename), udev_get_run_path(udev), "/data", NULL);
1026 if (access(filename, F_OK) >= 0)
1027 return 0;
1028
1029 /* make sure we do not get here again */
1030 util_create_path(udev, filename);
1031 mkdir(filename, 0755);
1032
1033 /* old database */
1034 util_strscpyl(filename, sizeof(filename), udev_get_dev_path(udev), "/.udev/db", NULL);
1035 if (access(filename, F_OK) < 0)
1036 return 0;
1037
1038 f = fopen("/dev/kmsg", "w");
1039 if (f != NULL) {
1040 fprintf(f, "<30>udevd[%u]: converting old udev database\n", getpid());
1041 fclose(f);
1042 }
1043
1044 udev_enumerate = udev_enumerate_new(udev);
1045 if (udev_enumerate == NULL)
1046 return -1;
1047 udev_enumerate_scan_devices(udev_enumerate);
1048 udev_list_entry_foreach(list_entry, udev_enumerate_get_list_entry(udev_enumerate)) {
1049 struct udev_device *device;
1050
1051 device = udev_device_new_from_syspath(udev, udev_list_entry_get_name(list_entry));
1052 if (device == NULL)
1053 continue;
1054
1055 /* try to find the old database for devices without a current one */
1056 if (udev_device_read_db(device, NULL) < 0) {
1057 bool have_db;
1058 const char *id;
1059 struct stat stats;
1060 char devpath[UTIL_PATH_SIZE];
1061 char from[UTIL_PATH_SIZE];
1062
1063 have_db = false;
1064
1065 /* find database in old location */
1066 id = udev_device_get_id_filename(device);
1067 util_strscpyl(from, sizeof(from), udev_get_dev_path(udev), "/.udev/db/", id, NULL);
1068 if (lstat(from, &stats) == 0) {
1069 if (!have_db) {
1070 udev_device_read_db(device, from);
1071 have_db = true;
1072 }
1073 unlink(from);
1074 }
1075
1076 /* find old database with $subsys:$sysname name */
1077 util_strscpyl(from, sizeof(from), udev_get_dev_path(udev),
1078 "/.udev/db/", udev_device_get_subsystem(device), ":",
1079 udev_device_get_sysname(device), NULL);
1080 if (lstat(from, &stats) == 0) {
1081 if (!have_db) {
1082 udev_device_read_db(device, from);
1083 have_db = true;
1084 }
1085 unlink(from);
1086 }
1087
1088 /* find old database with the encoded devpath name */
1089 util_path_encode(udev_device_get_devpath(device), devpath, sizeof(devpath));
1090 util_strscpyl(from, sizeof(from), udev_get_dev_path(udev), "/.udev/db/", devpath, NULL);
1091 if (lstat(from, &stats) == 0) {
1092 if (!have_db) {
1093 udev_device_read_db(device, from);
1094 have_db = true;
1095 }
1096 unlink(from);
1097 }
1098
1099 /* write out new database */
1100 if (have_db)
1101 udev_device_update_db(device);
1102 }
1103 udev_device_unref(device);
1104 }
1105 udev_enumerate_unref(udev_enumerate);
1106 return 0;
1107 }
1108
1109 static int systemd_fds(struct udev *udev, int *rctrl, int *rnetlink)
1110 {
1111 int ctrl = -1, netlink = -1;
1112 int fd, n;
1113
1114 n = sd_listen_fds(true);
1115 if (n <= 0)
1116 return -1;
1117
1118 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1119 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
1120 if (ctrl >= 0)
1121 return -1;
1122 ctrl = fd;
1123 continue;
1124 }
1125
1126 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
1127 if (netlink >= 0)
1128 return -1;
1129 netlink = fd;
1130 continue;
1131 }
1132
1133 return -1;
1134 }
1135
1136 if (ctrl < 0 || netlink < 0)
1137 return -1;
1138
1139 log_debug("ctrl=%i netlink=%i\n", ctrl, netlink);
1140 *rctrl = ctrl;
1141 *rnetlink = netlink;
1142 return 0;
1143 }
1144
1145 static bool check_rules_timestamp(struct udev *udev)
1146 {
1147 char **p;
1148 unsigned long long *stamp_usec;
1149 int i, n;
1150 bool changed = false;
1151
1152 n = udev_get_rules_path(udev, &p, &stamp_usec);
1153 for (i = 0; i < n; i++) {
1154 struct stat stats;
1155
1156 if (stat(p[i], &stats) < 0)
1157 continue;
1158
1159 if (stamp_usec[i] == ts_usec(&stats.st_mtim))
1160 continue;
1161
1162 /* first check */
1163 if (stamp_usec[i] != 0) {
1164 log_debug("reload - timestamp of '%s' changed\n", p[i]);
1165 changed = true;
1166 }
1167
1168 /* update timestamp */
1169 stamp_usec[i] = ts_usec(&stats.st_mtim);
1170 }
1171
1172 return changed;
1173 }
1174
1175 int main(int argc, char *argv[])
1176 {
1177 struct udev *udev;
1178 FILE *f;
1179 sigset_t mask;
1180 int daemonize = false;
1181 int resolve_names = 1;
1182 static const struct option options[] = {
1183 { "daemon", no_argument, NULL, 'd' },
1184 { "debug", no_argument, NULL, 'D' },
1185 { "children-max", required_argument, NULL, 'c' },
1186 { "exec-delay", required_argument, NULL, 'e' },
1187 { "resolve-names", required_argument, NULL, 'N' },
1188 { "help", no_argument, NULL, 'h' },
1189 { "version", no_argument, NULL, 'V' },
1190 {}
1191 };
1192 int fd_ctrl = -1;
1193 int fd_netlink = -1;
1194 int fd_worker = -1;
1195 struct epoll_event ep_ctrl, ep_inotify, ep_signal, ep_netlink, ep_worker;
1196 struct udev_ctrl_connection *ctrl_conn = NULL;
1197 char **s;
1198 int rc = 1;
1199
1200 udev = udev_new();
1201 if (udev == NULL)
1202 goto exit;
1203
1204 log_open();
1205 log_parse_environment();
1206 udev_set_log_fn(udev, udev_main_log);
1207 log_debug("version %s\n", VERSION);
1208 udev_selinux_init(udev);
1209
1210 for (;;) {
1211 int option;
1212
1213 option = getopt_long(argc, argv, "c:deDtN:hV", options, NULL);
1214 if (option == -1)
1215 break;
1216
1217 switch (option) {
1218 case 'd':
1219 daemonize = true;
1220 break;
1221 case 'c':
1222 children_max = strtoul(optarg, NULL, 0);
1223 break;
1224 case 'e':
1225 exec_delay = strtoul(optarg, NULL, 0);
1226 break;
1227 case 'D':
1228 debug = true;
1229 if (udev_get_log_priority(udev) < LOG_INFO)
1230 udev_set_log_priority(udev, LOG_INFO);
1231 break;
1232 case 'N':
1233 if (strcmp (optarg, "early") == 0) {
1234 resolve_names = 1;
1235 } else if (strcmp (optarg, "late") == 0) {
1236 resolve_names = 0;
1237 } else if (strcmp (optarg, "never") == 0) {
1238 resolve_names = -1;
1239 } else {
1240 fprintf(stderr, "resolve-names must be early, late or never\n");
1241 log_error("resolve-names must be early, late or never\n");
1242 goto exit;
1243 }
1244 break;
1245 case 'h':
1246 printf("Usage: udevd OPTIONS\n"
1247 " --daemon\n"
1248 " --debug\n"
1249 " --children-max=<maximum number of workers>\n"
1250 " --exec-delay=<seconds to wait before executing RUN=>\n"
1251 " --resolve-names=early|late|never\n"
1252 " --version\n"
1253 " --help\n"
1254 "\n");
1255 goto exit;
1256 case 'V':
1257 printf("%s\n", VERSION);
1258 goto exit;
1259 default:
1260 goto exit;
1261 }
1262 }
1263
1264 /*
1265 * read the kernel commandline, in case we need to get into debug mode
1266 * udev.log-priority=<level> syslog priority
1267 * udev.children-max=<number of workers> events are fully serialized if set to 1
1268 *
1269 */
1270 f = fopen("/proc/cmdline", "r");
1271 if (f != NULL) {
1272 char cmdline[4096];
1273
1274 if (fgets(cmdline, sizeof(cmdline), f) != NULL) {
1275 char *pos;
1276
1277 pos = strstr(cmdline, "udev.log-priority=");
1278 if (pos != NULL) {
1279 pos += strlen("udev.log-priority=");
1280 udev_set_log_priority(udev, util_log_priority(pos));
1281 }
1282
1283 pos = strstr(cmdline, "udev.children-max=");
1284 if (pos != NULL) {
1285 pos += strlen("udev.children-max=");
1286 children_max = strtoul(pos, NULL, 0);
1287 }
1288
1289 pos = strstr(cmdline, "udev.exec-delay=");
1290 if (pos != NULL) {
1291 pos += strlen("udev.exec-delay=");
1292 exec_delay = strtoul(pos, NULL, 0);
1293 }
1294 }
1295 fclose(f);
1296 }
1297
1298 if (getuid() != 0) {
1299 fprintf(stderr, "root privileges required\n");
1300 log_error("root privileges required\n");
1301 goto exit;
1302 }
1303
1304 /* set umask before creating any file/directory */
1305 chdir("/");
1306 umask(022);
1307
1308 /* /run/udev */
1309 mkdir(udev_get_run_path(udev), 0755);
1310
1311 /* create standard links, copy static nodes, create nodes from modules */
1312 static_dev_create(udev);
1313 static_dev_create_from_modules(udev);
1314
1315 /* before opening new files, make sure std{in,out,err} fds are in a sane state */
1316 if (daemonize) {
1317 int fd;
1318
1319 fd = open("/dev/null", O_RDWR);
1320 if (fd >= 0) {
1321 if (write(STDOUT_FILENO, 0, 0) < 0)
1322 dup2(fd, STDOUT_FILENO);
1323 if (write(STDERR_FILENO, 0, 0) < 0)
1324 dup2(fd, STDERR_FILENO);
1325 if (fd > STDERR_FILENO)
1326 close(fd);
1327 } else {
1328 fprintf(stderr, "cannot open /dev/null\n");
1329 log_error("cannot open /dev/null\n");
1330 }
1331 }
1332
1333 if (systemd_fds(udev, &fd_ctrl, &fd_netlink) >= 0) {
1334 /* get control and netlink socket from from systemd */
1335 udev_ctrl = udev_ctrl_new_from_fd(udev, fd_ctrl);
1336 if (udev_ctrl == NULL) {
1337 log_error("error taking over udev control socket");
1338 rc = 1;
1339 goto exit;
1340 }
1341
1342 monitor = udev_monitor_new_from_netlink_fd(udev, "kernel", fd_netlink);
1343 if (monitor == NULL) {
1344 log_error("error taking over netlink socket\n");
1345 rc = 3;
1346 goto exit;
1347 }
1348 } else {
1349 /* open control and netlink socket */
1350 udev_ctrl = udev_ctrl_new(udev);
1351 if (udev_ctrl == NULL) {
1352 fprintf(stderr, "error initializing udev control socket");
1353 log_error("error initializing udev control socket");
1354 rc = 1;
1355 goto exit;
1356 }
1357 fd_ctrl = udev_ctrl_get_fd(udev_ctrl);
1358
1359 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1360 if (monitor == NULL) {
1361 fprintf(stderr, "error initializing netlink socket\n");
1362 log_error("error initializing netlink socket\n");
1363 rc = 3;
1364 goto exit;
1365 }
1366 fd_netlink = udev_monitor_get_fd(monitor);
1367 }
1368
1369 if (udev_monitor_enable_receiving(monitor) < 0) {
1370 fprintf(stderr, "error binding netlink socket\n");
1371 log_error("error binding netlink socket\n");
1372 rc = 3;
1373 goto exit;
1374 }
1375
1376 if (udev_ctrl_enable_receiving(udev_ctrl) < 0) {
1377 fprintf(stderr, "error binding udev control socket\n");
1378 log_error("error binding udev control socket\n");
1379 rc = 1;
1380 goto exit;
1381 }
1382
1383 udev_monitor_set_receive_buffer_size(monitor, 128*1024*1024);
1384
1385 /* create queue file before signalling 'ready', to make sure we block 'settle' */
1386 udev_queue_export = udev_queue_export_new(udev);
1387 if (udev_queue_export == NULL) {
1388 log_error("error creating queue file\n");
1389 goto exit;
1390 }
1391
1392 if (daemonize) {
1393 pid_t pid;
1394 int fd;
1395
1396 pid = fork();
1397 switch (pid) {
1398 case 0:
1399 break;
1400 case -1:
1401 log_error("fork of daemon failed: %m\n");
1402 rc = 4;
1403 goto exit;
1404 default:
1405 rc = EXIT_SUCCESS;
1406 goto exit_daemonize;
1407 }
1408
1409 setsid();
1410
1411 fd = open("/proc/self/oom_score_adj", O_RDWR);
1412 if (fd < 0) {
1413 /* Fallback to old interface */
1414 fd = open("/proc/self/oom_adj", O_RDWR);
1415 if (fd < 0) {
1416 log_error("error disabling OOM: %m\n");
1417 } else {
1418 /* OOM_DISABLE == -17 */
1419 write(fd, "-17", 3);
1420 close(fd);
1421 }
1422 } else {
1423 write(fd, "-1000", 5);
1424 close(fd);
1425 }
1426 } else {
1427 sd_notify(1, "READY=1");
1428 }
1429
1430 f = fopen("/dev/kmsg", "w");
1431 if (f != NULL) {
1432 fprintf(f, "<30>udevd[%u]: starting version " VERSION "\n", getpid());
1433 fclose(f);
1434 }
1435
1436 if (!debug) {
1437 int fd;
1438
1439 fd = open("/dev/null", O_RDWR);
1440 if (fd >= 0) {
1441 dup2(fd, STDIN_FILENO);
1442 dup2(fd, STDOUT_FILENO);
1443 dup2(fd, STDERR_FILENO);
1444 close(fd);
1445 }
1446 }
1447
1448 fd_inotify = udev_watch_init(udev);
1449 if (fd_inotify < 0) {
1450 fprintf(stderr, "error initializing inotify\n");
1451 log_error("error initializing inotify\n");
1452 rc = 4;
1453 goto exit;
1454 }
1455 udev_watch_restore(udev);
1456
1457 /* block and listen to all signals on signalfd */
1458 sigfillset(&mask);
1459 sigprocmask(SIG_SETMASK, &mask, &sigmask_orig);
1460 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
1461 if (fd_signal < 0) {
1462 fprintf(stderr, "error creating signalfd\n");
1463 log_error("error creating signalfd\n");
1464 rc = 5;
1465 goto exit;
1466 }
1467
1468 /* unnamed socket from workers to the main daemon */
1469 if (socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, worker_watch) < 0) {
1470 fprintf(stderr, "error creating socketpair\n");
1471 log_error("error creating socketpair\n");
1472 rc = 6;
1473 goto exit;
1474 }
1475 fd_worker = worker_watch[READ_END];
1476
1477 udev_builtin_init(udev);
1478
1479 rules = udev_rules_new(udev, resolve_names);
1480 if (rules == NULL) {
1481 log_error("error reading rules\n");
1482 goto exit;
1483 }
1484
1485 memset(&ep_ctrl, 0, sizeof(struct epoll_event));
1486 ep_ctrl.events = EPOLLIN;
1487 ep_ctrl.data.fd = fd_ctrl;
1488
1489 memset(&ep_inotify, 0, sizeof(struct epoll_event));
1490 ep_inotify.events = EPOLLIN;
1491 ep_inotify.data.fd = fd_inotify;
1492
1493 memset(&ep_signal, 0, sizeof(struct epoll_event));
1494 ep_signal.events = EPOLLIN;
1495 ep_signal.data.fd = fd_signal;
1496
1497 memset(&ep_netlink, 0, sizeof(struct epoll_event));
1498 ep_netlink.events = EPOLLIN;
1499 ep_netlink.data.fd = fd_netlink;
1500
1501 memset(&ep_worker, 0, sizeof(struct epoll_event));
1502 ep_worker.events = EPOLLIN;
1503 ep_worker.data.fd = fd_worker;
1504
1505 fd_ep = epoll_create1(EPOLL_CLOEXEC);
1506 if (fd_ep < 0) {
1507 log_error("error creating epoll fd: %m\n");
1508 goto exit;
1509 }
1510 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_ctrl, &ep_ctrl) < 0 ||
1511 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_inotify, &ep_inotify) < 0 ||
1512 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
1513 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_netlink, &ep_netlink) < 0 ||
1514 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_worker, &ep_worker) < 0) {
1515 log_error("fail to add fds to epoll: %m\n");
1516 goto exit;
1517 }
1518
1519 /* if needed, convert old database from earlier udev version */
1520 convert_db(udev);
1521
1522 if (children_max <= 0) {
1523 int memsize = mem_size_mb();
1524
1525 /* set value depending on the amount of RAM */
1526 if (memsize > 0)
1527 children_max = 128 + (memsize / 8);
1528 else
1529 children_max = 128;
1530 }
1531 log_debug("set children_max to %u\n", children_max);
1532
1533 udev_rules_apply_static_dev_perms(rules);
1534
1535 udev_list_node_init(&event_list);
1536 udev_list_node_init(&worker_list);
1537
1538 for (;;) {
1539 static unsigned long long last_usec;
1540 struct epoll_event ev[8];
1541 int fdcount;
1542 int timeout;
1543 bool is_worker, is_signal, is_inotify, is_netlink, is_ctrl;
1544 int i;
1545
1546 if (udev_exit) {
1547 /* close sources of new events and discard buffered events */
1548 if (fd_ctrl >= 0) {
1549 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_ctrl, NULL);
1550 fd_ctrl = -1;
1551 }
1552 if (monitor != NULL) {
1553 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_netlink, NULL);
1554 udev_monitor_unref(monitor);
1555 monitor = NULL;
1556 }
1557 if (fd_inotify >= 0) {
1558 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_inotify, NULL);
1559 close(fd_inotify);
1560 fd_inotify = -1;
1561 }
1562
1563 /* discard queued events and kill workers */
1564 event_queue_cleanup(udev, EVENT_QUEUED);
1565 worker_kill(udev, 0);
1566
1567 /* exit after all has cleaned up */
1568 if (udev_list_node_is_empty(&event_list) && udev_list_node_is_empty(&worker_list))
1569 break;
1570
1571 /* timeout at exit for workers to finish */
1572 timeout = 30 * 1000;
1573 } else if (udev_list_node_is_empty(&event_list) && children <= 2) {
1574 /* we are idle */
1575 timeout = -1;
1576 } else {
1577 /* kill idle or hanging workers */
1578 timeout = 3 * 1000;
1579 }
1580 fdcount = epoll_wait(fd_ep, ev, ARRAY_SIZE(ev), timeout);
1581 if (fdcount < 0)
1582 continue;
1583
1584 if (fdcount == 0) {
1585 struct udev_list_node *loop;
1586
1587 /* timeout */
1588 if (udev_exit) {
1589 log_error("timeout, giving up waiting for workers to finish\n");
1590 break;
1591 }
1592
1593 /* kill idle workers */
1594 if (udev_list_node_is_empty(&event_list)) {
1595 log_debug("cleanup idle workers\n");
1596 worker_kill(udev, 2);
1597 }
1598
1599 /* check for hanging events */
1600 udev_list_node_foreach(loop, &worker_list) {
1601 struct worker *worker = node_to_worker(loop);
1602
1603 if (worker->state != WORKER_RUNNING)
1604 continue;
1605
1606 if ((now_usec() - worker->event_start_usec) > 30 * 1000 * 1000) {
1607 log_error("worker [%u] timeout, kill it\n", worker->pid,
1608 worker->event ? worker->event->devpath : "<idle>");
1609 kill(worker->pid, SIGKILL);
1610 worker->state = WORKER_KILLED;
1611 /* drop reference taken for state 'running' */
1612 worker_unref(worker);
1613 if (worker->event) {
1614 log_error("seq %llu '%s' killed\n",
1615 udev_device_get_seqnum(worker->event->dev), worker->event->devpath);
1616 worker->event->exitcode = -64;
1617 event_queue_delete(worker->event, true);
1618 worker->event = NULL;
1619 }
1620 }
1621 }
1622
1623 }
1624
1625 is_worker = is_signal = is_inotify = is_netlink = is_ctrl = false;
1626 for (i = 0; i < fdcount; i++) {
1627 if (ev[i].data.fd == fd_worker && ev[i].events & EPOLLIN)
1628 is_worker = true;
1629 else if (ev[i].data.fd == fd_netlink && ev[i].events & EPOLLIN)
1630 is_netlink = true;
1631 else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN)
1632 is_signal = true;
1633 else if (ev[i].data.fd == fd_inotify && ev[i].events & EPOLLIN)
1634 is_inotify = true;
1635 else if (ev[i].data.fd == fd_ctrl && ev[i].events & EPOLLIN)
1636 is_ctrl = true;
1637 }
1638
1639 /* check for changed config, every 3 seconds at most */
1640 if ((now_usec() - last_usec) > 3 * 1000 * 1000) {
1641 if (check_rules_timestamp(udev))
1642 reload = true;
1643 if (udev_builtin_validate(udev))
1644 reload = true;
1645
1646 last_usec = now_usec();
1647 }
1648
1649 /* reload requested, HUP signal received, rules changed, builtin changed */
1650 if (reload) {
1651 worker_kill(udev, 0);
1652 rules = udev_rules_unref(rules);
1653 udev_builtin_exit(udev);
1654 reload = 0;
1655 }
1656
1657 /* event has finished */
1658 if (is_worker)
1659 worker_returned(fd_worker);
1660
1661 if (is_netlink) {
1662 struct udev_device *dev;
1663
1664 dev = udev_monitor_receive_device(monitor);
1665 if (dev != NULL) {
1666 udev_device_set_usec_initialized(dev, now_usec());
1667 if (event_queue_insert(dev) < 0)
1668 udev_device_unref(dev);
1669 }
1670 }
1671
1672 /* start new events */
1673 if (!udev_list_node_is_empty(&event_list) && !udev_exit && !stop_exec_queue) {
1674 if (rules == NULL)
1675 rules = udev_rules_new(udev, resolve_names);
1676 if (rules != NULL)
1677 event_queue_start(udev);
1678 }
1679
1680 if (is_signal) {
1681 struct signalfd_siginfo fdsi;
1682 ssize_t size;
1683
1684 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
1685 if (size == sizeof(struct signalfd_siginfo))
1686 handle_signal(udev, fdsi.ssi_signo);
1687 }
1688
1689 /* we are shutting down, the events below are not handled anymore */
1690 if (udev_exit)
1691 continue;
1692
1693 /* device node watch */
1694 if (is_inotify)
1695 handle_inotify(udev);
1696
1697 /*
1698 * This needs to be after the inotify handling, to make sure,
1699 * that the ping is send back after the possibly generated
1700 * "change" events by the inotify device node watch.
1701 *
1702 * A single time we may receive a client connection which we need to
1703 * keep open to block the client. It will be closed right before we
1704 * exit.
1705 */
1706 if (is_ctrl)
1707 ctrl_conn = handle_ctrl_msg(udev_ctrl);
1708 }
1709
1710 rc = EXIT_SUCCESS;
1711 exit:
1712 udev_queue_export_cleanup(udev_queue_export);
1713 udev_ctrl_cleanup(udev_ctrl);
1714 exit_daemonize:
1715 if (fd_ep >= 0)
1716 close(fd_ep);
1717 worker_list_cleanup(udev);
1718 event_queue_cleanup(udev, EVENT_UNDEF);
1719 udev_rules_unref(rules);
1720 udev_builtin_exit(udev);
1721 if (fd_signal >= 0)
1722 close(fd_signal);
1723 if (worker_watch[READ_END] >= 0)
1724 close(worker_watch[READ_END]);
1725 if (worker_watch[WRITE_END] >= 0)
1726 close(worker_watch[WRITE_END]);
1727 udev_monitor_unref(monitor);
1728 udev_queue_export_unref(udev_queue_export);
1729 udev_ctrl_connection_unref(ctrl_conn);
1730 udev_ctrl_unref(udev_ctrl);
1731 udev_selinux_exit(udev);
1732 udev_unref(udev);
1733 log_close();
1734 return rc;
1735 }