]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/udev/udevd.c
udev: unify /dev static symlink setup
[thirdparty/systemd.git] / src / udev / udevd.c
1 /*
2 * Copyright (C) 2004-2012 Kay Sievers <kay.sievers@vrfy.org>
3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <ctype.h>
30 #include <fcntl.h>
31 #include <time.h>
32 #include <getopt.h>
33 #include <dirent.h>
34 #include <sys/time.h>
35 #include <sys/prctl.h>
36 #include <sys/socket.h>
37 #include <sys/un.h>
38 #include <sys/signalfd.h>
39 #include <sys/epoll.h>
40 #include <sys/poll.h>
41 #include <sys/wait.h>
42 #include <sys/stat.h>
43 #include <sys/ioctl.h>
44 #include <sys/inotify.h>
45 #include <sys/utsname.h>
46
47 #include "udev.h"
48 #include "sd-daemon.h"
49 #include "cgroup-util.h"
50 #include "dev-setup.h"
51
52 static bool debug;
53
54 void udev_main_log(struct udev *udev, int priority,
55 const char *file, int line, const char *fn,
56 const char *format, va_list args)
57 {
58 log_metav(priority, file, line, fn, format, args);
59 }
60
61 static struct udev_rules *rules;
62 static struct udev_queue_export *udev_queue_export;
63 static struct udev_ctrl *udev_ctrl;
64 static struct udev_monitor *monitor;
65 static int worker_watch[2] = { -1, -1 };
66 static int fd_signal = -1;
67 static int fd_ep = -1;
68 static int fd_inotify = -1;
69 static bool stop_exec_queue;
70 static bool reload;
71 static int children;
72 static int children_max;
73 static int exec_delay;
74 static sigset_t sigmask_orig;
75 static UDEV_LIST(event_list);
76 static UDEV_LIST(worker_list);
77 char *udev_cgroup;
78 static bool udev_exit;
79
80 enum event_state {
81 EVENT_UNDEF,
82 EVENT_QUEUED,
83 EVENT_RUNNING,
84 };
85
86 struct event {
87 struct udev_list_node node;
88 struct udev *udev;
89 struct udev_device *dev;
90 enum event_state state;
91 int exitcode;
92 unsigned long long int delaying_seqnum;
93 unsigned long long int seqnum;
94 const char *devpath;
95 size_t devpath_len;
96 const char *devpath_old;
97 dev_t devnum;
98 bool is_block;
99 int ifindex;
100 };
101
102 static struct event *node_to_event(struct udev_list_node *node)
103 {
104 char *event;
105
106 event = (char *)node;
107 event -= offsetof(struct event, node);
108 return (struct event *)event;
109 }
110
111 static void event_queue_cleanup(struct udev *udev, enum event_state type);
112
113 enum worker_state {
114 WORKER_UNDEF,
115 WORKER_RUNNING,
116 WORKER_IDLE,
117 WORKER_KILLED,
118 };
119
120 struct worker {
121 struct udev_list_node node;
122 struct udev *udev;
123 int refcount;
124 pid_t pid;
125 struct udev_monitor *monitor;
126 enum worker_state state;
127 struct event *event;
128 unsigned long long event_start_usec;
129 };
130
131 /* passed from worker to main process */
132 struct worker_message {
133 pid_t pid;
134 int exitcode;
135 };
136
137 static struct worker *node_to_worker(struct udev_list_node *node)
138 {
139 char *worker;
140
141 worker = (char *)node;
142 worker -= offsetof(struct worker, node);
143 return (struct worker *)worker;
144 }
145
146 static void event_queue_delete(struct event *event, bool export)
147 {
148 udev_list_node_remove(&event->node);
149
150 if (export) {
151 udev_queue_export_device_finished(udev_queue_export, event->dev);
152 log_debug("seq %llu done with %i\n", udev_device_get_seqnum(event->dev), event->exitcode);
153 }
154 udev_device_unref(event->dev);
155 free(event);
156 }
157
158 static struct worker *worker_ref(struct worker *worker)
159 {
160 worker->refcount++;
161 return worker;
162 }
163
164 static void worker_cleanup(struct worker *worker)
165 {
166 udev_list_node_remove(&worker->node);
167 udev_monitor_unref(worker->monitor);
168 children--;
169 free(worker);
170 }
171
172 static void worker_unref(struct worker *worker)
173 {
174 worker->refcount--;
175 if (worker->refcount > 0)
176 return;
177 log_debug("worker [%u] cleaned up\n", worker->pid);
178 worker_cleanup(worker);
179 }
180
181 static void worker_list_cleanup(struct udev *udev)
182 {
183 struct udev_list_node *loop, *tmp;
184
185 udev_list_node_foreach_safe(loop, tmp, &worker_list) {
186 struct worker *worker = node_to_worker(loop);
187
188 worker_cleanup(worker);
189 }
190 }
191
192 static void worker_new(struct event *event)
193 {
194 struct udev *udev = event->udev;
195 struct worker *worker;
196 struct udev_monitor *worker_monitor;
197 pid_t pid;
198
199 /* listen for new events */
200 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
201 if (worker_monitor == NULL)
202 return;
203 /* allow the main daemon netlink address to send devices to the worker */
204 udev_monitor_allow_unicast_sender(worker_monitor, monitor);
205 udev_monitor_enable_receiving(worker_monitor);
206
207 worker = calloc(1, sizeof(struct worker));
208 if (worker == NULL) {
209 udev_monitor_unref(worker_monitor);
210 return;
211 }
212 /* worker + event reference */
213 worker->refcount = 2;
214 worker->udev = udev;
215
216 pid = fork();
217 switch (pid) {
218 case 0: {
219 struct udev_device *dev = NULL;
220 int fd_monitor;
221 struct epoll_event ep_signal, ep_monitor;
222 sigset_t mask;
223 int rc = EXIT_SUCCESS;
224
225 /* take initial device from queue */
226 dev = event->dev;
227 event->dev = NULL;
228
229 free(worker);
230 worker_list_cleanup(udev);
231 event_queue_cleanup(udev, EVENT_UNDEF);
232 udev_queue_export_unref(udev_queue_export);
233 udev_monitor_unref(monitor);
234 udev_ctrl_unref(udev_ctrl);
235 close(fd_signal);
236 close(fd_ep);
237 close(worker_watch[READ_END]);
238
239 sigfillset(&mask);
240 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
241 if (fd_signal < 0) {
242 log_error("error creating signalfd %m\n");
243 rc = 2;
244 goto out;
245 }
246
247 fd_ep = epoll_create1(EPOLL_CLOEXEC);
248 if (fd_ep < 0) {
249 log_error("error creating epoll fd: %m\n");
250 rc = 3;
251 goto out;
252 }
253
254 memset(&ep_signal, 0, sizeof(struct epoll_event));
255 ep_signal.events = EPOLLIN;
256 ep_signal.data.fd = fd_signal;
257
258 fd_monitor = udev_monitor_get_fd(worker_monitor);
259 memset(&ep_monitor, 0, sizeof(struct epoll_event));
260 ep_monitor.events = EPOLLIN;
261 ep_monitor.data.fd = fd_monitor;
262
263 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
264 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
265 log_error("fail to add fds to epoll: %m\n");
266 rc = 4;
267 goto out;
268 }
269
270 /* request TERM signal if parent exits */
271 prctl(PR_SET_PDEATHSIG, SIGTERM);
272
273 for (;;) {
274 struct udev_event *udev_event;
275 struct worker_message msg;
276 int err;
277
278 log_debug("seq %llu running\n", udev_device_get_seqnum(dev));
279 udev_event = udev_event_new(dev);
280 if (udev_event == NULL) {
281 rc = 5;
282 goto out;
283 }
284
285 /* needed for SIGCHLD/SIGTERM in spawn() */
286 udev_event->fd_signal = fd_signal;
287
288 if (exec_delay > 0)
289 udev_event->exec_delay = exec_delay;
290
291 /* apply rules, create node, symlinks */
292 err = udev_event_execute_rules(udev_event, rules, &sigmask_orig);
293
294 if (err == 0)
295 udev_event_execute_run(udev_event, &sigmask_orig);
296
297 /* apply/restore inotify watch */
298 if (err == 0 && udev_event->inotify_watch) {
299 udev_watch_begin(udev, dev);
300 udev_device_update_db(dev);
301 }
302
303 /* send processed event back to libudev listeners */
304 udev_monitor_send_device(worker_monitor, NULL, dev);
305
306 /* send udevd the result of the event execution */
307 memset(&msg, 0, sizeof(struct worker_message));
308 if (err != 0)
309 msg.exitcode = err;
310 msg.pid = getpid();
311 send(worker_watch[WRITE_END], &msg, sizeof(struct worker_message), 0);
312
313 log_debug("seq %llu processed with %i\n", udev_device_get_seqnum(dev), err);
314
315 udev_device_unref(dev);
316 dev = NULL;
317
318 if (udev_event->sigterm) {
319 udev_event_unref(udev_event);
320 goto out;
321 }
322
323 udev_event_unref(udev_event);
324
325 /* wait for more device messages from main udevd, or term signal */
326 while (dev == NULL) {
327 struct epoll_event ev[4];
328 int fdcount;
329 int i;
330
331 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
332 if (fdcount < 0) {
333 if (errno == EINTR)
334 continue;
335 log_error("failed to poll: %m\n");
336 goto out;
337 }
338
339 for (i = 0; i < fdcount; i++) {
340 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
341 dev = udev_monitor_receive_device(worker_monitor);
342 break;
343 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
344 struct signalfd_siginfo fdsi;
345 ssize_t size;
346
347 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
348 if (size != sizeof(struct signalfd_siginfo))
349 continue;
350 switch (fdsi.ssi_signo) {
351 case SIGTERM:
352 goto out;
353 }
354 }
355 }
356 }
357 }
358 out:
359 udev_device_unref(dev);
360 if (fd_signal >= 0)
361 close(fd_signal);
362 if (fd_ep >= 0)
363 close(fd_ep);
364 close(fd_inotify);
365 close(worker_watch[WRITE_END]);
366 udev_rules_unref(rules);
367 udev_builtin_exit(udev);
368 udev_monitor_unref(worker_monitor);
369 udev_unref(udev);
370 log_close();
371 exit(rc);
372 }
373 case -1:
374 udev_monitor_unref(worker_monitor);
375 event->state = EVENT_QUEUED;
376 free(worker);
377 log_error("fork of child failed: %m\n");
378 break;
379 default:
380 /* close monitor, but keep address around */
381 udev_monitor_disconnect(worker_monitor);
382 worker->monitor = worker_monitor;
383 worker->pid = pid;
384 worker->state = WORKER_RUNNING;
385 worker->event_start_usec = now_usec();
386 worker->event = event;
387 event->state = EVENT_RUNNING;
388 udev_list_node_append(&worker->node, &worker_list);
389 children++;
390 log_debug("seq %llu forked new worker [%u]\n", udev_device_get_seqnum(event->dev), pid);
391 break;
392 }
393 }
394
395 static void event_run(struct event *event)
396 {
397 struct udev_list_node *loop;
398
399 udev_list_node_foreach(loop, &worker_list) {
400 struct worker *worker = node_to_worker(loop);
401 ssize_t count;
402
403 if (worker->state != WORKER_IDLE)
404 continue;
405
406 count = udev_monitor_send_device(monitor, worker->monitor, event->dev);
407 if (count < 0) {
408 log_error("worker [%u] did not accept message %zi (%m), kill it\n", worker->pid, count);
409 kill(worker->pid, SIGKILL);
410 worker->state = WORKER_KILLED;
411 continue;
412 }
413 worker_ref(worker);
414 worker->event = event;
415 worker->state = WORKER_RUNNING;
416 worker->event_start_usec = now_usec();
417 event->state = EVENT_RUNNING;
418 return;
419 }
420
421 if (children >= children_max) {
422 if (children_max > 1)
423 log_debug("maximum number (%i) of children reached\n", children);
424 return;
425 }
426
427 /* start new worker and pass initial device */
428 worker_new(event);
429 }
430
431 static int event_queue_insert(struct udev_device *dev)
432 {
433 struct event *event;
434
435 event = calloc(1, sizeof(struct event));
436 if (event == NULL)
437 return -1;
438
439 event->udev = udev_device_get_udev(dev);
440 event->dev = dev;
441 event->seqnum = udev_device_get_seqnum(dev);
442 event->devpath = udev_device_get_devpath(dev);
443 event->devpath_len = strlen(event->devpath);
444 event->devpath_old = udev_device_get_devpath_old(dev);
445 event->devnum = udev_device_get_devnum(dev);
446 event->is_block = (strcmp("block", udev_device_get_subsystem(dev)) == 0);
447 event->ifindex = udev_device_get_ifindex(dev);
448
449 udev_queue_export_device_queued(udev_queue_export, dev);
450 log_debug("seq %llu queued, '%s' '%s'\n", udev_device_get_seqnum(dev),
451 udev_device_get_action(dev), udev_device_get_subsystem(dev));
452
453 event->state = EVENT_QUEUED;
454 udev_list_node_append(&event->node, &event_list);
455 return 0;
456 }
457
458 static void worker_kill(struct udev *udev)
459 {
460 struct udev_list_node *loop;
461
462 udev_list_node_foreach(loop, &worker_list) {
463 struct worker *worker = node_to_worker(loop);
464
465 if (worker->state == WORKER_KILLED)
466 continue;
467
468 worker->state = WORKER_KILLED;
469 kill(worker->pid, SIGTERM);
470 }
471 }
472
473 /* lookup event for identical, parent, child device */
474 static bool is_devpath_busy(struct event *event)
475 {
476 struct udev_list_node *loop;
477 size_t common;
478
479 /* check if queue contains events we depend on */
480 udev_list_node_foreach(loop, &event_list) {
481 struct event *loop_event = node_to_event(loop);
482
483 /* we already found a later event, earlier can not block us, no need to check again */
484 if (loop_event->seqnum < event->delaying_seqnum)
485 continue;
486
487 /* event we checked earlier still exists, no need to check again */
488 if (loop_event->seqnum == event->delaying_seqnum)
489 return true;
490
491 /* found ourself, no later event can block us */
492 if (loop_event->seqnum >= event->seqnum)
493 break;
494
495 /* check major/minor */
496 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
497 return true;
498
499 /* check network device ifindex */
500 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
501 return true;
502
503 /* check our old name */
504 if (event->devpath_old != NULL && strcmp(loop_event->devpath, event->devpath_old) == 0) {
505 event->delaying_seqnum = loop_event->seqnum;
506 return true;
507 }
508
509 /* compare devpath */
510 common = MIN(loop_event->devpath_len, event->devpath_len);
511
512 /* one devpath is contained in the other? */
513 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
514 continue;
515
516 /* identical device event found */
517 if (loop_event->devpath_len == event->devpath_len) {
518 /* devices names might have changed/swapped in the meantime */
519 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
520 continue;
521 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
522 continue;
523 event->delaying_seqnum = loop_event->seqnum;
524 return true;
525 }
526
527 /* parent device event found */
528 if (event->devpath[common] == '/') {
529 event->delaying_seqnum = loop_event->seqnum;
530 return true;
531 }
532
533 /* child device event found */
534 if (loop_event->devpath[common] == '/') {
535 event->delaying_seqnum = loop_event->seqnum;
536 return true;
537 }
538
539 /* no matching device */
540 continue;
541 }
542
543 return false;
544 }
545
546 static void event_queue_start(struct udev *udev)
547 {
548 struct udev_list_node *loop;
549
550 udev_list_node_foreach(loop, &event_list) {
551 struct event *event = node_to_event(loop);
552
553 if (event->state != EVENT_QUEUED)
554 continue;
555
556 /* do not start event if parent or child event is still running */
557 if (is_devpath_busy(event))
558 continue;
559
560 event_run(event);
561 }
562 }
563
564 static void event_queue_cleanup(struct udev *udev, enum event_state match_type)
565 {
566 struct udev_list_node *loop, *tmp;
567
568 udev_list_node_foreach_safe(loop, tmp, &event_list) {
569 struct event *event = node_to_event(loop);
570
571 if (match_type != EVENT_UNDEF && match_type != event->state)
572 continue;
573
574 event_queue_delete(event, false);
575 }
576 }
577
578 static void worker_returned(int fd_worker)
579 {
580 for (;;) {
581 struct worker_message msg;
582 ssize_t size;
583 struct udev_list_node *loop;
584
585 size = recv(fd_worker, &msg, sizeof(struct worker_message), MSG_DONTWAIT);
586 if (size != sizeof(struct worker_message))
587 break;
588
589 /* lookup worker who sent the signal */
590 udev_list_node_foreach(loop, &worker_list) {
591 struct worker *worker = node_to_worker(loop);
592
593 if (worker->pid != msg.pid)
594 continue;
595
596 /* worker returned */
597 if (worker->event) {
598 worker->event->exitcode = msg.exitcode;
599 event_queue_delete(worker->event, true);
600 worker->event = NULL;
601 }
602 if (worker->state != WORKER_KILLED)
603 worker->state = WORKER_IDLE;
604 worker_unref(worker);
605 break;
606 }
607 }
608 }
609
610 /* receive the udevd message from userspace */
611 static struct udev_ctrl_connection *handle_ctrl_msg(struct udev_ctrl *uctrl)
612 {
613 struct udev *udev = udev_ctrl_get_udev(uctrl);
614 struct udev_ctrl_connection *ctrl_conn;
615 struct udev_ctrl_msg *ctrl_msg = NULL;
616 const char *str;
617 int i;
618
619 ctrl_conn = udev_ctrl_get_connection(uctrl);
620 if (ctrl_conn == NULL)
621 goto out;
622
623 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
624 if (ctrl_msg == NULL)
625 goto out;
626
627 i = udev_ctrl_get_set_log_level(ctrl_msg);
628 if (i >= 0) {
629 log_debug("udevd message (SET_LOG_PRIORITY) received, log_priority=%i\n", i);
630 log_set_max_level(i);
631 udev_set_log_priority(udev, i);
632 worker_kill(udev);
633 }
634
635 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
636 log_debug("udevd message (STOP_EXEC_QUEUE) received\n");
637 stop_exec_queue = true;
638 }
639
640 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
641 log_debug("udevd message (START_EXEC_QUEUE) received\n");
642 stop_exec_queue = false;
643 }
644
645 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
646 log_debug("udevd message (RELOAD) received\n");
647 reload = true;
648 }
649
650 str = udev_ctrl_get_set_env(ctrl_msg);
651 if (str != NULL) {
652 char *key;
653
654 key = strdup(str);
655 if (key != NULL) {
656 char *val;
657
658 val = strchr(key, '=');
659 if (val != NULL) {
660 val[0] = '\0';
661 val = &val[1];
662 if (val[0] == '\0') {
663 log_debug("udevd message (ENV) received, unset '%s'\n", key);
664 udev_add_property(udev, key, NULL);
665 } else {
666 log_debug("udevd message (ENV) received, set '%s=%s'\n", key, val);
667 udev_add_property(udev, key, val);
668 }
669 } else {
670 log_error("wrong key format '%s'\n", key);
671 }
672 free(key);
673 }
674 worker_kill(udev);
675 }
676
677 i = udev_ctrl_get_set_children_max(ctrl_msg);
678 if (i >= 0) {
679 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i\n", i);
680 children_max = i;
681 }
682
683 if (udev_ctrl_get_ping(ctrl_msg) > 0)
684 log_debug("udevd message (SYNC) received\n");
685
686 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
687 log_debug("udevd message (EXIT) received\n");
688 udev_exit = true;
689 /* keep reference to block the client until we exit */
690 udev_ctrl_connection_ref(ctrl_conn);
691 }
692 out:
693 udev_ctrl_msg_unref(ctrl_msg);
694 return udev_ctrl_connection_unref(ctrl_conn);
695 }
696
697 /* read inotify messages */
698 static int handle_inotify(struct udev *udev)
699 {
700 int nbytes, pos;
701 char *buf;
702 struct inotify_event *ev;
703
704 if ((ioctl(fd_inotify, FIONREAD, &nbytes) < 0) || (nbytes <= 0))
705 return 0;
706
707 buf = malloc(nbytes);
708 if (buf == NULL) {
709 log_error("error getting buffer for inotify\n");
710 return -1;
711 }
712
713 nbytes = read(fd_inotify, buf, nbytes);
714
715 for (pos = 0; pos < nbytes; pos += sizeof(struct inotify_event) + ev->len) {
716 struct udev_device *dev;
717
718 ev = (struct inotify_event *)(buf + pos);
719 dev = udev_watch_lookup(udev, ev->wd);
720 if (dev != NULL) {
721 log_debug("inotify event: %x for %s\n", ev->mask, udev_device_get_devnode(dev));
722 if (ev->mask & IN_CLOSE_WRITE) {
723 char filename[UTIL_PATH_SIZE];
724 int fd;
725
726 log_debug("device %s closed, synthesising 'change'\n", udev_device_get_devnode(dev));
727 util_strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
728 fd = open(filename, O_WRONLY);
729 if (fd >= 0) {
730 if (write(fd, "change", 6) < 0)
731 log_debug("error writing uevent: %m\n");
732 close(fd);
733 }
734 }
735 if (ev->mask & IN_IGNORED)
736 udev_watch_end(udev, dev);
737
738 udev_device_unref(dev);
739 }
740
741 }
742
743 free(buf);
744 return 0;
745 }
746
747 static void handle_signal(struct udev *udev, int signo)
748 {
749 switch (signo) {
750 case SIGINT:
751 case SIGTERM:
752 udev_exit = true;
753 break;
754 case SIGCHLD:
755 for (;;) {
756 pid_t pid;
757 int status;
758 struct udev_list_node *loop, *tmp;
759
760 pid = waitpid(-1, &status, WNOHANG);
761 if (pid <= 0)
762 break;
763
764 udev_list_node_foreach_safe(loop, tmp, &worker_list) {
765 struct worker *worker = node_to_worker(loop);
766
767 if (worker->pid != pid)
768 continue;
769 log_debug("worker [%u] exit\n", pid);
770
771 if (WIFEXITED(status)) {
772 if (WEXITSTATUS(status) != 0)
773 log_error("worker [%u] exit with return code %i\n", pid, WEXITSTATUS(status));
774 } else if (WIFSIGNALED(status)) {
775 log_error("worker [%u] terminated by signal %i (%s)\n",
776 pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
777 } else if (WIFSTOPPED(status)) {
778 log_error("worker [%u] stopped\n", pid);
779 } else if (WIFCONTINUED(status)) {
780 log_error("worker [%u] continued\n", pid);
781 } else {
782 log_error("worker [%u] exit with status 0x%04x\n", pid, status);
783 }
784
785 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
786 if (worker->event) {
787 log_error("worker [%u] failed while handling '%s'\n",
788 pid, worker->event->devpath);
789 worker->event->exitcode = -32;
790 event_queue_delete(worker->event, true);
791 /* drop reference taken for state 'running' */
792 worker_unref(worker);
793 }
794 }
795 worker_unref(worker);
796 break;
797 }
798 }
799 break;
800 case SIGHUP:
801 reload = true;
802 break;
803 }
804 }
805
806 static void static_dev_create_from_modules(struct udev *udev)
807 {
808 struct utsname kernel;
809 char modules[UTIL_PATH_SIZE];
810 char buf[4096];
811 FILE *f;
812
813 uname(&kernel);
814 util_strscpyl(modules, sizeof(modules), "/lib/modules/", kernel.release, "/modules.devname", NULL);
815 f = fopen(modules, "r");
816 if (f == NULL)
817 return;
818
819 while (fgets(buf, sizeof(buf), f) != NULL) {
820 char *s;
821 const char *modname;
822 const char *devname;
823 const char *devno;
824 int maj, min;
825 char type;
826 mode_t mode;
827 char filename[UTIL_PATH_SIZE];
828
829 if (buf[0] == '#')
830 continue;
831
832 modname = buf;
833 s = strchr(modname, ' ');
834 if (s == NULL)
835 continue;
836 s[0] = '\0';
837
838 devname = &s[1];
839 s = strchr(devname, ' ');
840 if (s == NULL)
841 continue;
842 s[0] = '\0';
843
844 devno = &s[1];
845 s = strchr(devno, ' ');
846 if (s == NULL)
847 s = strchr(devno, '\n');
848 if (s != NULL)
849 s[0] = '\0';
850 if (sscanf(devno, "%c%u:%u", &type, &maj, &min) != 3)
851 continue;
852
853 if (type == 'c')
854 mode = S_IFCHR;
855 else if (type == 'b')
856 mode = S_IFBLK;
857 else
858 continue;
859
860 util_strscpyl(filename, sizeof(filename), "/dev/", devname, NULL);
861 mkdir_parents(filename, 0755);
862 label_context_set(filename, mode);
863 log_debug("mknod '%s' %c%u:%u\n", filename, type, maj, min);
864 if (mknod(filename, mode, makedev(maj, min)) < 0 && errno == EEXIST)
865 utimensat(AT_FDCWD, filename, NULL, 0);
866 label_context_clear();
867 }
868
869 fclose(f);
870 }
871
872 static int mem_size_mb(void)
873 {
874 FILE *f;
875 char buf[4096];
876 long int memsize = -1;
877
878 f = fopen("/proc/meminfo", "r");
879 if (f == NULL)
880 return -1;
881
882 while (fgets(buf, sizeof(buf), f) != NULL) {
883 long int value;
884
885 if (sscanf(buf, "MemTotal: %ld kB", &value) == 1) {
886 memsize = value / 1024;
887 break;
888 }
889 }
890
891 fclose(f);
892 return memsize;
893 }
894
895 static int convert_db(struct udev *udev)
896 {
897 char filename[UTIL_PATH_SIZE];
898 FILE *f;
899 struct udev_enumerate *udev_enumerate;
900 struct udev_list_entry *list_entry;
901
902 /* current database */
903 if (access("/run/udev/data", F_OK) >= 0)
904 return 0;
905
906 /* make sure we do not get here again */
907 mkdir_parents("/run/udev/data", 0755);
908 mkdir(filename, 0755);
909
910 /* old database */
911 util_strscpyl(filename, sizeof(filename), "/dev/.udev/db", NULL);
912 if (access(filename, F_OK) < 0)
913 return 0;
914
915 f = fopen("/dev/kmsg", "w");
916 if (f != NULL) {
917 fprintf(f, "<30>udevd[%u]: converting old udev database\n", getpid());
918 fclose(f);
919 }
920
921 udev_enumerate = udev_enumerate_new(udev);
922 if (udev_enumerate == NULL)
923 return -1;
924 udev_enumerate_scan_devices(udev_enumerate);
925 udev_list_entry_foreach(list_entry, udev_enumerate_get_list_entry(udev_enumerate)) {
926 struct udev_device *device;
927
928 device = udev_device_new_from_syspath(udev, udev_list_entry_get_name(list_entry));
929 if (device == NULL)
930 continue;
931
932 /* try to find the old database for devices without a current one */
933 if (udev_device_read_db(device, NULL) < 0) {
934 bool have_db;
935 const char *id;
936 struct stat stats;
937 char devpath[UTIL_PATH_SIZE];
938 char from[UTIL_PATH_SIZE];
939
940 have_db = false;
941
942 /* find database in old location */
943 id = udev_device_get_id_filename(device);
944 util_strscpyl(from, sizeof(from), "/dev/.udev/db/", id, NULL);
945 if (lstat(from, &stats) == 0) {
946 if (!have_db) {
947 udev_device_read_db(device, from);
948 have_db = true;
949 }
950 unlink(from);
951 }
952
953 /* find old database with $subsys:$sysname name */
954 util_strscpyl(from, sizeof(from), "/dev/.udev/db/",
955 udev_device_get_subsystem(device), ":", udev_device_get_sysname(device), NULL);
956 if (lstat(from, &stats) == 0) {
957 if (!have_db) {
958 udev_device_read_db(device, from);
959 have_db = true;
960 }
961 unlink(from);
962 }
963
964 /* find old database with the encoded devpath name */
965 util_path_encode(udev_device_get_devpath(device), devpath, sizeof(devpath));
966 util_strscpyl(from, sizeof(from), "/dev/.udev/db/", devpath, NULL);
967 if (lstat(from, &stats) == 0) {
968 if (!have_db) {
969 udev_device_read_db(device, from);
970 have_db = true;
971 }
972 unlink(from);
973 }
974
975 /* write out new database */
976 if (have_db)
977 udev_device_update_db(device);
978 }
979 udev_device_unref(device);
980 }
981 udev_enumerate_unref(udev_enumerate);
982 return 0;
983 }
984
985 static int systemd_fds(struct udev *udev, int *rctrl, int *rnetlink)
986 {
987 int ctrl = -1, netlink = -1;
988 int fd, n;
989
990 n = sd_listen_fds(true);
991 if (n <= 0)
992 return -1;
993
994 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
995 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
996 if (ctrl >= 0)
997 return -1;
998 ctrl = fd;
999 continue;
1000 }
1001
1002 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
1003 if (netlink >= 0)
1004 return -1;
1005 netlink = fd;
1006 continue;
1007 }
1008
1009 return -1;
1010 }
1011
1012 if (ctrl < 0 || netlink < 0)
1013 return -1;
1014
1015 log_debug("ctrl=%i netlink=%i\n", ctrl, netlink);
1016 *rctrl = ctrl;
1017 *rnetlink = netlink;
1018 return 0;
1019 }
1020
1021 int main(int argc, char *argv[])
1022 {
1023 struct udev *udev;
1024 FILE *f;
1025 sigset_t mask;
1026 int daemonize = false;
1027 int resolve_names = 1;
1028 static const struct option options[] = {
1029 { "daemon", no_argument, NULL, 'd' },
1030 { "debug", no_argument, NULL, 'D' },
1031 { "children-max", required_argument, NULL, 'c' },
1032 { "exec-delay", required_argument, NULL, 'e' },
1033 { "resolve-names", required_argument, NULL, 'N' },
1034 { "help", no_argument, NULL, 'h' },
1035 { "version", no_argument, NULL, 'V' },
1036 {}
1037 };
1038 int fd_ctrl = -1;
1039 int fd_netlink = -1;
1040 int fd_worker = -1;
1041 struct epoll_event ep_ctrl, ep_inotify, ep_signal, ep_netlink, ep_worker;
1042 struct udev_ctrl_connection *ctrl_conn = NULL;
1043 int rc = 1;
1044
1045 udev = udev_new();
1046 if (udev == NULL)
1047 goto exit;
1048
1049 log_open();
1050 log_parse_environment();
1051 udev_set_log_fn(udev, udev_main_log);
1052 log_debug("version %s\n", VERSION);
1053 label_init("/dev");
1054
1055 for (;;) {
1056 int option;
1057
1058 option = getopt_long(argc, argv, "c:deDtN:hV", options, NULL);
1059 if (option == -1)
1060 break;
1061
1062 switch (option) {
1063 case 'd':
1064 daemonize = true;
1065 break;
1066 case 'c':
1067 children_max = strtoul(optarg, NULL, 0);
1068 break;
1069 case 'e':
1070 exec_delay = strtoul(optarg, NULL, 0);
1071 break;
1072 case 'D':
1073 debug = true;
1074 log_set_max_level(LOG_DEBUG);
1075 udev_set_log_priority(udev, LOG_INFO);
1076 break;
1077 case 'N':
1078 if (strcmp (optarg, "early") == 0) {
1079 resolve_names = 1;
1080 } else if (strcmp (optarg, "late") == 0) {
1081 resolve_names = 0;
1082 } else if (strcmp (optarg, "never") == 0) {
1083 resolve_names = -1;
1084 } else {
1085 fprintf(stderr, "resolve-names must be early, late or never\n");
1086 log_error("resolve-names must be early, late or never\n");
1087 goto exit;
1088 }
1089 break;
1090 case 'h':
1091 printf("Usage: udevd OPTIONS\n"
1092 " --daemon\n"
1093 " --debug\n"
1094 " --children-max=<maximum number of workers>\n"
1095 " --exec-delay=<seconds to wait before executing RUN=>\n"
1096 " --resolve-names=early|late|never\n"
1097 " --version\n"
1098 " --help\n"
1099 "\n");
1100 goto exit;
1101 case 'V':
1102 printf("%s\n", VERSION);
1103 goto exit;
1104 default:
1105 goto exit;
1106 }
1107 }
1108
1109 /*
1110 * read the kernel commandline, in case we need to get into debug mode
1111 * udev.log-priority=<level> syslog priority
1112 * udev.children-max=<number of workers> events are fully serialized if set to 1
1113 *
1114 */
1115 f = fopen("/proc/cmdline", "r");
1116 if (f != NULL) {
1117 char cmdline[4096];
1118
1119 if (fgets(cmdline, sizeof(cmdline), f) != NULL) {
1120 char *pos;
1121
1122 pos = strstr(cmdline, "udev.log-priority=");
1123 if (pos != NULL) {
1124 pos += strlen("udev.log-priority=");
1125 udev_set_log_priority(udev, util_log_priority(pos));
1126 }
1127
1128 pos = strstr(cmdline, "udev.children-max=");
1129 if (pos != NULL) {
1130 pos += strlen("udev.children-max=");
1131 children_max = strtoul(pos, NULL, 0);
1132 }
1133
1134 pos = strstr(cmdline, "udev.exec-delay=");
1135 if (pos != NULL) {
1136 pos += strlen("udev.exec-delay=");
1137 exec_delay = strtoul(pos, NULL, 0);
1138 }
1139 }
1140 fclose(f);
1141 }
1142
1143 if (getuid() != 0) {
1144 fprintf(stderr, "root privileges required\n");
1145 log_error("root privileges required\n");
1146 goto exit;
1147 }
1148
1149 /* set umask before creating any file/directory */
1150 chdir("/");
1151 umask(022);
1152
1153 mkdir("/run/udev", 0755);
1154
1155 dev_setup();
1156 static_dev_create_from_modules(udev);
1157
1158 /* before opening new files, make sure std{in,out,err} fds are in a sane state */
1159 if (daemonize) {
1160 int fd;
1161
1162 fd = open("/dev/null", O_RDWR);
1163 if (fd >= 0) {
1164 if (write(STDOUT_FILENO, 0, 0) < 0)
1165 dup2(fd, STDOUT_FILENO);
1166 if (write(STDERR_FILENO, 0, 0) < 0)
1167 dup2(fd, STDERR_FILENO);
1168 if (fd > STDERR_FILENO)
1169 close(fd);
1170 } else {
1171 fprintf(stderr, "cannot open /dev/null\n");
1172 log_error("cannot open /dev/null\n");
1173 }
1174 }
1175
1176 if (systemd_fds(udev, &fd_ctrl, &fd_netlink) >= 0) {
1177 /* get control and netlink socket from from systemd */
1178 udev_ctrl = udev_ctrl_new_from_fd(udev, fd_ctrl);
1179 if (udev_ctrl == NULL) {
1180 log_error("error taking over udev control socket");
1181 rc = 1;
1182 goto exit;
1183 }
1184
1185 monitor = udev_monitor_new_from_netlink_fd(udev, "kernel", fd_netlink);
1186 if (monitor == NULL) {
1187 log_error("error taking over netlink socket\n");
1188 rc = 3;
1189 goto exit;
1190 }
1191
1192 /* get our own cgroup, we regularly kill everything udev has left behind */
1193 if (cg_get_by_pid(SYSTEMD_CGROUP_CONTROLLER, 0, &udev_cgroup) < 0)
1194 udev_cgroup = NULL;
1195 } else {
1196 /* open control and netlink socket */
1197 udev_ctrl = udev_ctrl_new(udev);
1198 if (udev_ctrl == NULL) {
1199 fprintf(stderr, "error initializing udev control socket");
1200 log_error("error initializing udev control socket");
1201 rc = 1;
1202 goto exit;
1203 }
1204 fd_ctrl = udev_ctrl_get_fd(udev_ctrl);
1205
1206 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1207 if (monitor == NULL) {
1208 fprintf(stderr, "error initializing netlink socket\n");
1209 log_error("error initializing netlink socket\n");
1210 rc = 3;
1211 goto exit;
1212 }
1213 fd_netlink = udev_monitor_get_fd(monitor);
1214 }
1215
1216 if (udev_monitor_enable_receiving(monitor) < 0) {
1217 fprintf(stderr, "error binding netlink socket\n");
1218 log_error("error binding netlink socket\n");
1219 rc = 3;
1220 goto exit;
1221 }
1222
1223 if (udev_ctrl_enable_receiving(udev_ctrl) < 0) {
1224 fprintf(stderr, "error binding udev control socket\n");
1225 log_error("error binding udev control socket\n");
1226 rc = 1;
1227 goto exit;
1228 }
1229
1230 udev_monitor_set_receive_buffer_size(monitor, 128*1024*1024);
1231
1232 /* create queue file before signalling 'ready', to make sure we block 'settle' */
1233 udev_queue_export = udev_queue_export_new(udev);
1234 if (udev_queue_export == NULL) {
1235 log_error("error creating queue file\n");
1236 goto exit;
1237 }
1238
1239 if (daemonize) {
1240 pid_t pid;
1241 int fd;
1242
1243 pid = fork();
1244 switch (pid) {
1245 case 0:
1246 break;
1247 case -1:
1248 log_error("fork of daemon failed: %m\n");
1249 rc = 4;
1250 goto exit;
1251 default:
1252 rc = EXIT_SUCCESS;
1253 goto exit_daemonize;
1254 }
1255
1256 setsid();
1257
1258 fd = open("/proc/self/oom_score_adj", O_RDWR);
1259 if (fd < 0) {
1260 /* Fallback to old interface */
1261 fd = open("/proc/self/oom_adj", O_RDWR);
1262 if (fd < 0) {
1263 log_error("error disabling OOM: %m\n");
1264 } else {
1265 /* OOM_DISABLE == -17 */
1266 write(fd, "-17", 3);
1267 close(fd);
1268 }
1269 } else {
1270 write(fd, "-1000", 5);
1271 close(fd);
1272 }
1273 } else {
1274 sd_notify(1, "READY=1");
1275 }
1276
1277 f = fopen("/dev/kmsg", "w");
1278 if (f != NULL) {
1279 fprintf(f, "<30>udevd[%u]: starting version " VERSION "\n", getpid());
1280 fclose(f);
1281 }
1282
1283 if (!debug) {
1284 int fd;
1285
1286 fd = open("/dev/null", O_RDWR);
1287 if (fd >= 0) {
1288 dup2(fd, STDIN_FILENO);
1289 dup2(fd, STDOUT_FILENO);
1290 dup2(fd, STDERR_FILENO);
1291 close(fd);
1292 }
1293 }
1294
1295 fd_inotify = udev_watch_init(udev);
1296 if (fd_inotify < 0) {
1297 fprintf(stderr, "error initializing inotify\n");
1298 log_error("error initializing inotify\n");
1299 rc = 4;
1300 goto exit;
1301 }
1302 udev_watch_restore(udev);
1303
1304 /* block and listen to all signals on signalfd */
1305 sigfillset(&mask);
1306 sigprocmask(SIG_SETMASK, &mask, &sigmask_orig);
1307 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
1308 if (fd_signal < 0) {
1309 fprintf(stderr, "error creating signalfd\n");
1310 log_error("error creating signalfd\n");
1311 rc = 5;
1312 goto exit;
1313 }
1314
1315 /* unnamed socket from workers to the main daemon */
1316 if (socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, worker_watch) < 0) {
1317 fprintf(stderr, "error creating socketpair\n");
1318 log_error("error creating socketpair\n");
1319 rc = 6;
1320 goto exit;
1321 }
1322 fd_worker = worker_watch[READ_END];
1323
1324 udev_builtin_init(udev);
1325
1326 rules = udev_rules_new(udev, resolve_names);
1327 if (rules == NULL) {
1328 log_error("error reading rules\n");
1329 goto exit;
1330 }
1331
1332 memset(&ep_ctrl, 0, sizeof(struct epoll_event));
1333 ep_ctrl.events = EPOLLIN;
1334 ep_ctrl.data.fd = fd_ctrl;
1335
1336 memset(&ep_inotify, 0, sizeof(struct epoll_event));
1337 ep_inotify.events = EPOLLIN;
1338 ep_inotify.data.fd = fd_inotify;
1339
1340 memset(&ep_signal, 0, sizeof(struct epoll_event));
1341 ep_signal.events = EPOLLIN;
1342 ep_signal.data.fd = fd_signal;
1343
1344 memset(&ep_netlink, 0, sizeof(struct epoll_event));
1345 ep_netlink.events = EPOLLIN;
1346 ep_netlink.data.fd = fd_netlink;
1347
1348 memset(&ep_worker, 0, sizeof(struct epoll_event));
1349 ep_worker.events = EPOLLIN;
1350 ep_worker.data.fd = fd_worker;
1351
1352 fd_ep = epoll_create1(EPOLL_CLOEXEC);
1353 if (fd_ep < 0) {
1354 log_error("error creating epoll fd: %m\n");
1355 goto exit;
1356 }
1357 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_ctrl, &ep_ctrl) < 0 ||
1358 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_inotify, &ep_inotify) < 0 ||
1359 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
1360 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_netlink, &ep_netlink) < 0 ||
1361 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_worker, &ep_worker) < 0) {
1362 log_error("fail to add fds to epoll: %m\n");
1363 goto exit;
1364 }
1365
1366 /* if needed, convert old database from earlier udev version */
1367 convert_db(udev);
1368
1369 if (children_max <= 0) {
1370 int memsize = mem_size_mb();
1371
1372 /* set value depending on the amount of RAM */
1373 if (memsize > 0)
1374 children_max = 128 + (memsize / 8);
1375 else
1376 children_max = 128;
1377 }
1378 log_debug("set children_max to %u\n", children_max);
1379
1380 udev_rules_apply_static_dev_perms(rules);
1381
1382 udev_list_node_init(&event_list);
1383 udev_list_node_init(&worker_list);
1384
1385 for (;;) {
1386 static unsigned long long last_usec;
1387 struct epoll_event ev[8];
1388 int fdcount;
1389 int timeout;
1390 bool is_worker, is_signal, is_inotify, is_netlink, is_ctrl;
1391 int i;
1392
1393 if (udev_exit) {
1394 /* close sources of new events and discard buffered events */
1395 if (fd_ctrl >= 0) {
1396 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_ctrl, NULL);
1397 fd_ctrl = -1;
1398 }
1399 if (monitor != NULL) {
1400 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_netlink, NULL);
1401 udev_monitor_unref(monitor);
1402 monitor = NULL;
1403 }
1404 if (fd_inotify >= 0) {
1405 epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_inotify, NULL);
1406 close(fd_inotify);
1407 fd_inotify = -1;
1408 }
1409
1410 /* discard queued events and kill workers */
1411 event_queue_cleanup(udev, EVENT_QUEUED);
1412 worker_kill(udev);
1413
1414 /* exit after all has cleaned up */
1415 if (udev_list_node_is_empty(&event_list) && udev_list_node_is_empty(&worker_list))
1416 break;
1417
1418 /* timeout at exit for workers to finish */
1419 timeout = 30 * 1000;
1420 } else if (udev_list_node_is_empty(&event_list) && !children) {
1421 /* we are idle */
1422 timeout = -1;
1423
1424 /* cleanup possible left-over processes in our cgroup */
1425 if (udev_cgroup)
1426 cg_kill(SYSTEMD_CGROUP_CONTROLLER, udev_cgroup, SIGKILL, false, true, NULL);
1427 } else {
1428 /* kill idle or hanging workers */
1429 timeout = 3 * 1000;
1430 }
1431 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), timeout);
1432 if (fdcount < 0)
1433 continue;
1434
1435 if (fdcount == 0) {
1436 struct udev_list_node *loop;
1437
1438 /* timeout */
1439 if (udev_exit) {
1440 log_error("timeout, giving up waiting for workers to finish\n");
1441 break;
1442 }
1443
1444 /* kill idle workers */
1445 if (udev_list_node_is_empty(&event_list)) {
1446 log_debug("cleanup idle workers\n");
1447 worker_kill(udev);
1448 }
1449
1450 /* check for hanging events */
1451 udev_list_node_foreach(loop, &worker_list) {
1452 struct worker *worker = node_to_worker(loop);
1453
1454 if (worker->state != WORKER_RUNNING)
1455 continue;
1456
1457 if ((now_usec() - worker->event_start_usec) > 30 * 1000 * 1000) {
1458 log_error("worker [%u] %s timeout; kill it\n", worker->pid,
1459 worker->event ? worker->event->devpath : "<idle>");
1460 kill(worker->pid, SIGKILL);
1461 worker->state = WORKER_KILLED;
1462 /* drop reference taken for state 'running' */
1463 worker_unref(worker);
1464 if (worker->event) {
1465 log_error("seq %llu '%s' killed\n",
1466 udev_device_get_seqnum(worker->event->dev), worker->event->devpath);
1467 worker->event->exitcode = -64;
1468 event_queue_delete(worker->event, true);
1469 worker->event = NULL;
1470 }
1471 }
1472 }
1473
1474 }
1475
1476 is_worker = is_signal = is_inotify = is_netlink = is_ctrl = false;
1477 for (i = 0; i < fdcount; i++) {
1478 if (ev[i].data.fd == fd_worker && ev[i].events & EPOLLIN)
1479 is_worker = true;
1480 else if (ev[i].data.fd == fd_netlink && ev[i].events & EPOLLIN)
1481 is_netlink = true;
1482 else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN)
1483 is_signal = true;
1484 else if (ev[i].data.fd == fd_inotify && ev[i].events & EPOLLIN)
1485 is_inotify = true;
1486 else if (ev[i].data.fd == fd_ctrl && ev[i].events & EPOLLIN)
1487 is_ctrl = true;
1488 }
1489
1490 /* check for changed config, every 3 seconds at most */
1491 if ((now_usec() - last_usec) > 3 * 1000 * 1000) {
1492 if (udev_rules_check_timestamp(rules))
1493 reload = true;
1494 if (udev_builtin_validate(udev))
1495 reload = true;
1496
1497 last_usec = now_usec();
1498 }
1499
1500 /* reload requested, HUP signal received, rules changed, builtin changed */
1501 if (reload) {
1502 worker_kill(udev);
1503 rules = udev_rules_unref(rules);
1504 udev_builtin_exit(udev);
1505 reload = 0;
1506 }
1507
1508 /* event has finished */
1509 if (is_worker)
1510 worker_returned(fd_worker);
1511
1512 if (is_netlink) {
1513 struct udev_device *dev;
1514
1515 dev = udev_monitor_receive_device(monitor);
1516 if (dev != NULL) {
1517 udev_device_set_usec_initialized(dev, now_usec());
1518 if (event_queue_insert(dev) < 0)
1519 udev_device_unref(dev);
1520 }
1521 }
1522
1523 /* start new events */
1524 if (!udev_list_node_is_empty(&event_list) && !udev_exit && !stop_exec_queue) {
1525 if (rules == NULL)
1526 rules = udev_rules_new(udev, resolve_names);
1527 if (rules != NULL)
1528 event_queue_start(udev);
1529 }
1530
1531 if (is_signal) {
1532 struct signalfd_siginfo fdsi;
1533 ssize_t size;
1534
1535 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
1536 if (size == sizeof(struct signalfd_siginfo))
1537 handle_signal(udev, fdsi.ssi_signo);
1538 }
1539
1540 /* we are shutting down, the events below are not handled anymore */
1541 if (udev_exit)
1542 continue;
1543
1544 /* device node watch */
1545 if (is_inotify)
1546 handle_inotify(udev);
1547
1548 /*
1549 * This needs to be after the inotify handling, to make sure,
1550 * that the ping is send back after the possibly generated
1551 * "change" events by the inotify device node watch.
1552 *
1553 * A single time we may receive a client connection which we need to
1554 * keep open to block the client. It will be closed right before we
1555 * exit.
1556 */
1557 if (is_ctrl)
1558 ctrl_conn = handle_ctrl_msg(udev_ctrl);
1559 }
1560
1561 rc = EXIT_SUCCESS;
1562 exit:
1563 udev_queue_export_cleanup(udev_queue_export);
1564 udev_ctrl_cleanup(udev_ctrl);
1565 exit_daemonize:
1566 if (fd_ep >= 0)
1567 close(fd_ep);
1568 worker_list_cleanup(udev);
1569 event_queue_cleanup(udev, EVENT_UNDEF);
1570 udev_rules_unref(rules);
1571 udev_builtin_exit(udev);
1572 if (fd_signal >= 0)
1573 close(fd_signal);
1574 if (worker_watch[READ_END] >= 0)
1575 close(worker_watch[READ_END]);
1576 if (worker_watch[WRITE_END] >= 0)
1577 close(worker_watch[WRITE_END]);
1578 udev_monitor_unref(monitor);
1579 udev_queue_export_unref(udev_queue_export);
1580 udev_ctrl_connection_unref(ctrl_conn);
1581 udev_ctrl_unref(udev_ctrl);
1582 label_finish();
1583 udev_unref(udev);
1584 log_close();
1585 return rc;
1586 }