]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
sd-event: make sure sd_event_now() cannot fail
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
7fafc032 1/*
1298001e 2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
55e9959b
KS
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
7fafc032 11 *
55e9959b
KS
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
7fafc032 16 *
55e9959b
KS
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
19 */
20
a695feae 21#include <stddef.h>
7fafc032
KS
22#include <signal.h>
23#include <unistd.h>
24#include <errno.h>
25#include <stdio.h>
26#include <stdlib.h>
c3804728 27#include <stdbool.h>
7fafc032 28#include <string.h>
085cce37 29#include <fcntl.h>
b52a01ee 30#include <getopt.h>
3ebdb81e 31#include <sys/file.h>
820fc48f 32#include <sys/time.h>
1e03b754
KS
33#include <sys/prctl.h>
34#include <sys/socket.h>
35#include <sys/signalfd.h>
ff2c503d 36#include <sys/epoll.h>
ede34445 37#include <sys/mount.h>
138068d6 38#include <sys/wait.h>
dc117daa 39#include <sys/stat.h>
c895fd00 40#include <sys/ioctl.h>
01618658 41#include <sys/inotify.h>
7fafc032 42
392ef7a2 43#include "sd-daemon.h"
693d371d 44#include "sd-event.h"
8314de1d 45
40e749b5 46#include "terminal-util.h"
8314de1d 47#include "signal-util.h"
693d371d 48#include "event-util.h"
1c4baffc 49#include "netlink-util.h"
194bbe33 50#include "cgroup-util.h"
4fa4d885 51#include "process-util.h"
5ba2dc25 52#include "dev-setup.h"
a5c32cff 53#include "fileio.h"
d7b8eec7
LP
54#include "selinux-util.h"
55#include "udev.h"
56#include "udev-util.h"
6482f626 57#include "formats-util.h"
a505965d 58#include "hashmap.h"
7fafc032 59
bba7a484
TG
60static bool arg_debug = false;
61static int arg_daemonize = false;
62static int arg_resolve_names = 1;
020328e1 63static unsigned arg_children_max;
bba7a484
TG
64static int arg_exec_delay;
65static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
66static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
67
68typedef struct Manager {
69 struct udev *udev;
693d371d 70 sd_event *event;
c0c6806b 71 Hashmap *workers;
ecb17862 72 struct udev_list_node events;
c26d1879 73 const char *cgroup;
cb49a4f2 74 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 75
ecb17862 76 struct udev_rules *rules;
c0c6806b
TG
77 struct udev_list properties;
78
79 struct udev_monitor *monitor;
80 struct udev_ctrl *ctrl;
81 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 82 int fd_inotify;
e237d8cb
TG
83 int worker_watch[2];
84
693d371d
TG
85 sd_event_source *ctrl_event;
86 sd_event_source *uevent_event;
87 sd_event_source *inotify_event;
88
7c4c7e89
TG
89 usec_t last_usec;
90
c0c6806b 91 bool stop_exec_queue:1;
c0c6806b
TG
92 bool exit:1;
93} Manager;
1e03b754 94
1e03b754 95enum event_state {
912541b0
KS
96 EVENT_UNDEF,
97 EVENT_QUEUED,
98 EVENT_RUNNING,
1e03b754
KS
99};
100
101struct event {
912541b0 102 struct udev_list_node node;
cb49a4f2 103 Manager *manager;
912541b0
KS
104 struct udev *udev;
105 struct udev_device *dev;
6969c349 106 struct udev_device *dev_kernel;
c6aa11f2 107 struct worker *worker;
912541b0 108 enum event_state state;
912541b0
KS
109 unsigned long long int delaying_seqnum;
110 unsigned long long int seqnum;
111 const char *devpath;
112 size_t devpath_len;
113 const char *devpath_old;
114 dev_t devnum;
912541b0 115 int ifindex;
ea6039a3 116 bool is_block;
693d371d
TG
117 sd_event_source *timeout_warning;
118 sd_event_source *timeout;
1e03b754
KS
119};
120
9ec6e95b 121static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 122 return container_of(node, struct event, node);
1e03b754
KS
123}
124
ecb17862 125static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 126
1e03b754 127enum worker_state {
912541b0
KS
128 WORKER_UNDEF,
129 WORKER_RUNNING,
130 WORKER_IDLE,
131 WORKER_KILLED,
1e03b754
KS
132};
133
134struct worker {
c0c6806b 135 Manager *manager;
912541b0 136 struct udev_list_node node;
912541b0
KS
137 int refcount;
138 pid_t pid;
139 struct udev_monitor *monitor;
140 enum worker_state state;
141 struct event *event;
1e03b754
KS
142};
143
144/* passed from worker to main process */
145struct worker_message {
1e03b754
KS
146};
147
c6aa11f2 148static void event_free(struct event *event) {
cb49a4f2
TG
149 int r;
150
c6aa11f2
TG
151 if (!event)
152 return;
153
912541b0 154 udev_list_node_remove(&event->node);
912541b0 155 udev_device_unref(event->dev);
6969c349 156 udev_device_unref(event->dev_kernel);
c6aa11f2 157
693d371d
TG
158 sd_event_source_unref(event->timeout_warning);
159 sd_event_source_unref(event->timeout);
160
c6aa11f2
TG
161 if (event->worker)
162 event->worker->event = NULL;
163
cb49a4f2
TG
164 assert(event->manager);
165
166 if (udev_list_node_is_empty(&event->manager->events)) {
167 /* only clean up the queue from the process that created it */
168 if (event->manager->pid == getpid()) {
169 r = unlink("/run/udev/queue");
170 if (r < 0)
171 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
172 }
173 }
174
912541b0 175 free(event);
aa8734ff 176}
7a770250 177
c6aa11f2
TG
178static void worker_free(struct worker *worker) {
179 if (!worker)
180 return;
bc113de9 181
c0c6806b
TG
182 assert(worker->manager);
183
184 hashmap_remove(worker->manager->workers, UINT_TO_PTR(worker->pid));
912541b0 185 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
186 event_free(worker->event);
187
c6aa11f2 188 free(worker);
ff2c503d
KS
189}
190
c0c6806b 191static void manager_workers_free(Manager *manager) {
a505965d
TG
192 struct worker *worker;
193 Iterator i;
ff2c503d 194
c0c6806b
TG
195 assert(manager);
196
197 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 198 worker_free(worker);
a505965d 199
c0c6806b 200 manager->workers = hashmap_free(manager->workers);
fc465079
KS
201}
202
c0c6806b 203static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
204 _cleanup_free_ struct worker *worker = NULL;
205 int r;
3a19b32a
TG
206
207 assert(ret);
c0c6806b 208 assert(manager);
3a19b32a
TG
209 assert(worker_monitor);
210 assert(pid > 1);
211
212 worker = new0(struct worker, 1);
213 if (!worker)
214 return -ENOMEM;
215
39c19cf1 216 worker->refcount = 1;
c0c6806b 217 worker->manager = manager;
3a19b32a
TG
218 /* close monitor, but keep address around */
219 udev_monitor_disconnect(worker_monitor);
220 worker->monitor = udev_monitor_ref(worker_monitor);
221 worker->pid = pid;
a505965d 222
c0c6806b 223 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
224 if (r < 0)
225 return r;
226
c0c6806b 227 r = hashmap_put(manager->workers, UINT_TO_PTR(pid), worker);
a505965d
TG
228 if (r < 0)
229 return r;
230
3a19b32a 231 *ret = worker;
a505965d 232 worker = NULL;
3a19b32a
TG
233
234 return 0;
235}
236
4fa4d885
TG
237static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
238 struct event *event = userdata;
239
240 assert(event);
241 assert(event->worker);
242
243 kill_and_sigcont(event->worker->pid, SIGKILL);
244 event->worker->state = WORKER_KILLED;
245
246 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
247
248 return 1;
249}
250
251static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
252 struct event *event = userdata;
253
254 assert(event);
255
256 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
257
258 return 1;
259}
260
39c19cf1 261static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
262 sd_event *e;
263 uint64_t usec;
693d371d 264
c6aa11f2 265 assert(worker);
693d371d 266 assert(worker->manager);
c6aa11f2
TG
267 assert(event);
268 assert(!event->worker);
269 assert(!worker->event);
270
39c19cf1 271 worker->state = WORKER_RUNNING;
39c19cf1
TG
272 worker->event = event;
273 event->state = EVENT_RUNNING;
c6aa11f2 274 event->worker = worker;
693d371d
TG
275
276 e = worker->manager->event;
277
38a03f06 278 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
279
280 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
281 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
282
283 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
284 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
285}
286
e237d8cb
TG
287static void manager_free(Manager *manager) {
288 if (!manager)
289 return;
290
b2d21d93
TG
291 udev_builtin_exit(manager->udev);
292
693d371d
TG
293 sd_event_source_unref(manager->ctrl_event);
294 sd_event_source_unref(manager->uevent_event);
295 sd_event_source_unref(manager->inotify_event);
296
e237d8cb 297 udev_unref(manager->udev);
693d371d 298 sd_event_unref(manager->event);
e237d8cb
TG
299 manager_workers_free(manager);
300 event_queue_cleanup(manager, EVENT_UNDEF);
301
302 udev_monitor_unref(manager->monitor);
303 udev_ctrl_unref(manager->ctrl);
304 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
305
306 udev_list_cleanup(&manager->properties);
307 udev_rules_unref(manager->rules);
e237d8cb 308
e237d8cb
TG
309 safe_close(manager->fd_inotify);
310 safe_close_pair(manager->worker_watch);
311
312 free(manager);
313}
314
315DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
316
9a73bd7c
TG
317static int worker_send_message(int fd) {
318 struct worker_message message = {};
319
320 return loop_write(fd, &message, sizeof(message), false);
321}
322
c0c6806b 323static void worker_spawn(Manager *manager, struct event *event) {
912541b0 324 struct udev *udev = event->udev;
3a19b32a 325 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 326 pid_t pid;
b6aab8ef 327 int r = 0;
912541b0
KS
328
329 /* listen for new events */
330 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
331 if (worker_monitor == NULL)
332 return;
333 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 334 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
335 r = udev_monitor_enable_receiving(worker_monitor);
336 if (r < 0)
337 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 338
912541b0
KS
339 pid = fork();
340 switch (pid) {
341 case 0: {
342 struct udev_device *dev = NULL;
1c4baffc 343 _cleanup_netlink_unref_ sd_netlink *rtnl = NULL;
912541b0 344 int fd_monitor;
e237d8cb 345 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
346 struct epoll_event ep_signal = { .events = EPOLLIN };
347 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 348 sigset_t mask;
912541b0 349
43095991 350 /* take initial device from queue */
912541b0
KS
351 dev = event->dev;
352 event->dev = NULL;
353
39fd2ca1
TG
354 unsetenv("NOTIFY_SOCKET");
355
c0c6806b 356 manager_workers_free(manager);
ecb17862 357 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 358
e237d8cb 359 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 360 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 361 manager->ctrl = udev_ctrl_unref(manager->ctrl);
693d371d 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 364
693d371d
TG
365 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
366 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
367 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
368
369 manager->event = sd_event_unref(manager->event);
370
912541b0
KS
371 sigfillset(&mask);
372 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
373 if (fd_signal < 0) {
6af5e6a4 374 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
375 goto out;
376 }
2dd9f98d
TG
377 ep_signal.data.fd = fd_signal;
378
379 fd_monitor = udev_monitor_get_fd(worker_monitor);
380 ep_monitor.data.fd = fd_monitor;
912541b0
KS
381
382 fd_ep = epoll_create1(EPOLL_CLOEXEC);
383 if (fd_ep < 0) {
6af5e6a4 384 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
385 goto out;
386 }
387
912541b0
KS
388 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
389 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 390 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
391 goto out;
392 }
393
394 /* request TERM signal if parent exits */
395 prctl(PR_SET_PDEATHSIG, SIGTERM);
396
145dae7e 397 /* reset OOM score, we only protect the main daemon */
ad118bda 398 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 399
912541b0
KS
400 for (;;) {
401 struct udev_event *udev_event;
6af5e6a4 402 int fd_lock = -1;
912541b0 403
3b64e4d4
TG
404 assert(dev);
405
9f6445e3 406 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
407 udev_event = udev_event_new(dev);
408 if (udev_event == NULL) {
6af5e6a4 409 r = -ENOMEM;
912541b0
KS
410 goto out;
411 }
412
bba7a484
TG
413 if (arg_exec_delay > 0)
414 udev_event->exec_delay = arg_exec_delay;
912541b0 415
3ebdb81e 416 /*
2e5b17d0 417 * Take a shared lock on the device node; this establishes
3ebdb81e 418 * a concept of device "ownership" to serialize device
2e5b17d0 419 * access. External processes holding an exclusive lock will
3ebdb81e 420 * cause udev to skip the event handling; in the case udev
2e5b17d0 421 * acquired the lock, the external process can block until
3ebdb81e
KS
422 * udev has finished its event handling.
423 */
2e5b17d0
KS
424 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
425 streq_ptr("block", udev_device_get_subsystem(dev)) &&
426 !startswith(udev_device_get_sysname(dev), "dm-") &&
427 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
428 struct udev_device *d = dev;
429
430 if (streq_ptr("partition", udev_device_get_devtype(d)))
431 d = udev_device_get_parent(d);
432
433 if (d) {
434 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
435 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 436 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 437 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
438 goto skip;
439 }
440 }
441 }
442
4c83d994
TG
443 /* needed for renaming netifs */
444 udev_event->rtnl = rtnl;
445
912541b0 446 /* apply rules, create node, symlinks */
adeba500
KS
447 udev_event_execute_rules(udev_event,
448 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 449 &manager->properties,
8314de1d 450 manager->rules);
adeba500
KS
451
452 udev_event_execute_run(udev_event,
8314de1d 453 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 454
523c620b
TG
455 if (udev_event->rtnl)
456 /* in case rtnl was initialized */
1c4baffc 457 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 458
912541b0 459 /* apply/restore inotify watch */
bf9bead1 460 if (udev_event->inotify_watch) {
912541b0
KS
461 udev_watch_begin(udev, dev);
462 udev_device_update_db(dev);
463 }
464
3d06f418 465 safe_close(fd_lock);
3ebdb81e 466
912541b0
KS
467 /* send processed event back to libudev listeners */
468 udev_monitor_send_device(worker_monitor, NULL, dev);
469
3ebdb81e 470skip:
4914cb2d 471 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 472
912541b0 473 /* send udevd the result of the event execution */
e237d8cb 474 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 475 if (r < 0)
9a73bd7c 476 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 477 udev_device_get_seqnum(dev));
912541b0
KS
478
479 udev_device_unref(dev);
480 dev = NULL;
481
73814ca2 482 udev_event_unref(udev_event);
47e737dc 483
912541b0
KS
484 /* wait for more device messages from main udevd, or term signal */
485 while (dev == NULL) {
486 struct epoll_event ev[4];
487 int fdcount;
488 int i;
489
8fef0ff2 490 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
491 if (fdcount < 0) {
492 if (errno == EINTR)
493 continue;
6af5e6a4 494 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
495 goto out;
496 }
497
498 for (i = 0; i < fdcount; i++) {
499 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
500 dev = udev_monitor_receive_device(worker_monitor);
501 break;
502 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
503 struct signalfd_siginfo fdsi;
504 ssize_t size;
505
506 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
507 if (size != sizeof(struct signalfd_siginfo))
508 continue;
509 switch (fdsi.ssi_signo) {
510 case SIGTERM:
511 goto out;
512 }
513 }
514 }
515 }
516 }
82063a88 517out:
912541b0 518 udev_device_unref(dev);
e237d8cb 519 manager_free(manager);
baa30fbc 520 log_close();
8b46c3fc 521 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
522 }
523 case -1:
912541b0 524 event->state = EVENT_QUEUED;
56f64d95 525 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
526 break;
527 default:
e03c7cc2
TG
528 {
529 struct worker *worker;
530
c0c6806b 531 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 532 if (r < 0)
e03c7cc2 533 return;
e03c7cc2 534
39c19cf1
TG
535 worker_attach_event(worker, event);
536
1fa2f38f 537 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
538 break;
539 }
e03c7cc2 540 }
7fafc032
KS
541}
542
c0c6806b 543static void event_run(Manager *manager, struct event *event) {
a505965d
TG
544 struct worker *worker;
545 Iterator i;
912541b0 546
c0c6806b
TG
547 assert(manager);
548 assert(event);
549
550 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
551 ssize_t count;
552
553 if (worker->state != WORKER_IDLE)
554 continue;
555
c0c6806b 556 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 557 if (count < 0) {
1fa2f38f
ZJS
558 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
559 worker->pid, count);
912541b0
KS
560 kill(worker->pid, SIGKILL);
561 worker->state = WORKER_KILLED;
562 continue;
563 }
39c19cf1 564 worker_attach_event(worker, event);
912541b0
KS
565 return;
566 }
567
c0c6806b 568 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 569 if (arg_children_max > 1)
c0c6806b 570 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
571 return;
572 }
573
574 /* start new worker and pass initial device */
c0c6806b 575 worker_spawn(manager, event);
1e03b754
KS
576}
577
ecb17862 578static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 579 struct event *event;
cb49a4f2 580 int r;
912541b0 581
ecb17862
TG
582 assert(manager);
583 assert(dev);
584
040e6896
TG
585 /* only one process can add events to the queue */
586 if (manager->pid == 0)
587 manager->pid = getpid();
588
cb49a4f2
TG
589 assert(manager->pid == getpid());
590
955d98c9 591 event = new0(struct event, 1);
cb49a4f2
TG
592 if (!event)
593 return -ENOMEM;
912541b0
KS
594
595 event->udev = udev_device_get_udev(dev);
cb49a4f2 596 event->manager = manager;
912541b0 597 event->dev = dev;
6969c349
TG
598 event->dev_kernel = udev_device_shallow_clone(dev);
599 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
600 event->seqnum = udev_device_get_seqnum(dev);
601 event->devpath = udev_device_get_devpath(dev);
602 event->devpath_len = strlen(event->devpath);
603 event->devpath_old = udev_device_get_devpath_old(dev);
604 event->devnum = udev_device_get_devnum(dev);
ea6039a3 605 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
606 event->ifindex = udev_device_get_ifindex(dev);
607
9f6445e3 608 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
609 udev_device_get_action(dev), udev_device_get_subsystem(dev));
610
611 event->state = EVENT_QUEUED;
cb49a4f2
TG
612
613 if (udev_list_node_is_empty(&manager->events)) {
614 r = touch("/run/udev/queue");
615 if (r < 0)
616 log_warning_errno(r, "could not touch /run/udev/queue: %m");
617 }
618
ecb17862 619 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 620
912541b0 621 return 0;
fc465079
KS
622}
623
c0c6806b 624static void manager_kill_workers(Manager *manager) {
a505965d
TG
625 struct worker *worker;
626 Iterator i;
1e03b754 627
c0c6806b
TG
628 assert(manager);
629
630 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
631 if (worker->state == WORKER_KILLED)
632 continue;
1e03b754 633
912541b0
KS
634 worker->state = WORKER_KILLED;
635 kill(worker->pid, SIGTERM);
636 }
1e03b754
KS
637}
638
e3196993 639/* lookup event for identical, parent, child device */
ecb17862 640static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
641 struct udev_list_node *loop;
642 size_t common;
643
644 /* check if queue contains events we depend on */
ecb17862 645 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
646 struct event *loop_event = node_to_event(loop);
647
648 /* we already found a later event, earlier can not block us, no need to check again */
649 if (loop_event->seqnum < event->delaying_seqnum)
650 continue;
651
652 /* event we checked earlier still exists, no need to check again */
653 if (loop_event->seqnum == event->delaying_seqnum)
654 return true;
655
656 /* found ourself, no later event can block us */
657 if (loop_event->seqnum >= event->seqnum)
658 break;
659
660 /* check major/minor */
661 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
662 return true;
663
664 /* check network device ifindex */
665 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
666 return true;
667
668 /* check our old name */
090be865 669 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
670 event->delaying_seqnum = loop_event->seqnum;
671 return true;
672 }
673
674 /* compare devpath */
675 common = MIN(loop_event->devpath_len, event->devpath_len);
676
677 /* one devpath is contained in the other? */
678 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
679 continue;
680
681 /* identical device event found */
682 if (loop_event->devpath_len == event->devpath_len) {
683 /* devices names might have changed/swapped in the meantime */
684 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
685 continue;
686 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
687 continue;
688 event->delaying_seqnum = loop_event->seqnum;
689 return true;
690 }
691
692 /* parent device event found */
693 if (event->devpath[common] == '/') {
694 event->delaying_seqnum = loop_event->seqnum;
695 return true;
696 }
697
698 /* child device event found */
699 if (loop_event->devpath[common] == '/') {
700 event->delaying_seqnum = loop_event->seqnum;
701 return true;
702 }
703
704 /* no matching device */
705 continue;
706 }
707
708 return false;
7fafc032
KS
709}
710
693d371d
TG
711static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
712 Manager *manager = userdata;
713
714 assert(manager);
715
716 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
717
718 sd_event_exit(manager->event, -ETIMEDOUT);
719
720 return 1;
721}
722
62d43dac 723static void manager_exit(Manager *manager) {
693d371d
TG
724 uint64_t usec;
725 int r;
62d43dac
TG
726
727 assert(manager);
728
729 manager->exit = true;
730
b79aacbf
TG
731 sd_notify(false,
732 "STOPPING=1\n"
733 "STATUS=Starting shutdown...");
734
62d43dac 735 /* close sources of new events and discard buffered events */
693d371d 736 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 737 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 738
693d371d 739 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 740 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 741
693d371d 742 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 743 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
744
745 /* discard queued events and kill workers */
746 event_queue_cleanup(manager, EVENT_QUEUED);
747 manager_kill_workers(manager);
693d371d 748
38a03f06 749 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
750
751 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
752 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
753 if (r < 0)
754 return;
62d43dac
TG
755}
756
757/* reload requested, HUP signal received, rules changed, builtin changed */
758static void manager_reload(Manager *manager) {
759
760 assert(manager);
761
b79aacbf
TG
762 sd_notify(false,
763 "RELOADING=1\n"
764 "STATUS=Flushing configuration...");
765
62d43dac
TG
766 manager_kill_workers(manager);
767 manager->rules = udev_rules_unref(manager->rules);
768 udev_builtin_exit(manager->udev);
b79aacbf
TG
769
770 sd_notify(false,
771 "READY=1\n"
772 "STATUS=Processing...");
62d43dac
TG
773}
774
c0c6806b 775static void event_queue_start(Manager *manager) {
912541b0 776 struct udev_list_node *loop;
693d371d 777 usec_t usec;
8ab44e3f 778
c0c6806b
TG
779 assert(manager);
780
7c4c7e89
TG
781 if (udev_list_node_is_empty(&manager->events) ||
782 manager->exit || manager->stop_exec_queue)
783 return;
784
38a03f06
LP
785 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
786 /* check for changed config, every 3 seconds at most */
787 if (manager->last_usec == 0 ||
788 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
789 if (udev_rules_check_timestamp(manager->rules) ||
790 udev_builtin_validate(manager->udev))
791 manager_reload(manager);
693d371d 792
38a03f06 793 manager->last_usec = usec;
7c4c7e89
TG
794 }
795
796 udev_builtin_init(manager->udev);
797
798 if (!manager->rules) {
799 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
800 if (!manager->rules)
801 return;
802 }
803
ecb17862 804 udev_list_node_foreach(loop, &manager->events) {
912541b0 805 struct event *event = node_to_event(loop);
0bc74ea7 806
912541b0
KS
807 if (event->state != EVENT_QUEUED)
808 continue;
0bc74ea7 809
912541b0 810 /* do not start event if parent or child event is still running */
ecb17862 811 if (is_devpath_busy(manager, event))
912541b0 812 continue;
fc465079 813
c0c6806b 814 event_run(manager, event);
912541b0 815 }
1e03b754
KS
816}
817
ecb17862 818static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 819 struct udev_list_node *loop, *tmp;
ff2c503d 820
ecb17862 821 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 822 struct event *event = node_to_event(loop);
ff2c503d 823
912541b0
KS
824 if (match_type != EVENT_UNDEF && match_type != event->state)
825 continue;
ff2c503d 826
c6aa11f2 827 event_free(event);
912541b0 828 }
ff2c503d
KS
829}
830
e82e8fa5 831static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
832 Manager *manager = userdata;
833
834 assert(manager);
835
912541b0
KS
836 for (;;) {
837 struct worker_message msg;
979558f3
TG
838 struct iovec iovec = {
839 .iov_base = &msg,
840 .iov_len = sizeof(msg),
841 };
842 union {
843 struct cmsghdr cmsghdr;
844 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
845 } control = {};
846 struct msghdr msghdr = {
847 .msg_iov = &iovec,
848 .msg_iovlen = 1,
849 .msg_control = &control,
850 .msg_controllen = sizeof(control),
851 };
852 struct cmsghdr *cmsg;
912541b0 853 ssize_t size;
979558f3 854 struct ucred *ucred = NULL;
a505965d 855 struct worker *worker;
912541b0 856
e82e8fa5 857 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 858 if (size < 0) {
738a7907
TG
859 if (errno == EINTR)
860 continue;
861 else if (errno == EAGAIN)
862 /* nothing more to read */
863 break;
979558f3 864
e82e8fa5 865 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
866 } else if (size != sizeof(struct worker_message)) {
867 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 868 continue;
979558f3
TG
869 }
870
2a1288ff 871 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
872 if (cmsg->cmsg_level == SOL_SOCKET &&
873 cmsg->cmsg_type == SCM_CREDENTIALS &&
874 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
875 ucred = (struct ucred*) CMSG_DATA(cmsg);
876 }
877
878 if (!ucred || ucred->pid <= 0) {
879 log_warning_errno(EIO, "ignoring worker message without valid PID");
880 continue;
881 }
912541b0
KS
882
883 /* lookup worker who sent the signal */
c0c6806b 884 worker = hashmap_get(manager->workers, UINT_TO_PTR(ucred->pid));
a505965d
TG
885 if (!worker) {
886 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
887 continue;
912541b0 888 }
c0bbfd72 889
a505965d
TG
890 if (worker->state != WORKER_KILLED)
891 worker->state = WORKER_IDLE;
892
893 /* worker returned */
894 event_free(worker->event);
912541b0 895 }
e82e8fa5 896
8302fe5a
TG
897 /* we have free workers, try to schedule events */
898 event_queue_start(manager);
899
e82e8fa5
TG
900 return 1;
901}
902
903static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 904 Manager *manager = userdata;
e82e8fa5
TG
905 struct udev_device *dev;
906 int r;
907
c0c6806b 908 assert(manager);
e82e8fa5 909
c0c6806b 910 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
911 if (dev) {
912 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 913 r = event_queue_insert(manager, dev);
e82e8fa5
TG
914 if (r < 0)
915 udev_device_unref(dev);
8302fe5a
TG
916 else
917 /* we have fresh events, try to schedule them */
918 event_queue_start(manager);
e82e8fa5
TG
919 }
920
921 return 1;
88f4b648
KS
922}
923
3b47c739 924/* receive the udevd message from userspace */
e82e8fa5 925static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 926 Manager *manager = userdata;
e4f66b77
TG
927 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
928 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
929 const char *str;
930 int i;
931
c0c6806b 932 assert(manager);
e4f66b77 933
c0c6806b 934 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 935 if (!ctrl_conn)
e82e8fa5 936 return 1;
912541b0
KS
937
938 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 939 if (!ctrl_msg)
e82e8fa5 940 return 1;
912541b0
KS
941
942 i = udev_ctrl_get_set_log_level(ctrl_msg);
943 if (i >= 0) {
ed14edc0 944 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 945 log_set_max_level(i);
c0c6806b 946 manager_kill_workers(manager);
912541b0
KS
947 }
948
949 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 950 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 951 manager->stop_exec_queue = true;
912541b0
KS
952 }
953
954 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 955 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 956 manager->stop_exec_queue = false;
8302fe5a 957 event_queue_start(manager);
912541b0
KS
958 }
959
960 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 961 log_debug("udevd message (RELOAD) received");
62d43dac 962 manager_reload(manager);
912541b0
KS
963 }
964
965 str = udev_ctrl_get_set_env(ctrl_msg);
966 if (str != NULL) {
c0c6806b 967 _cleanup_free_ char *key = NULL;
912541b0
KS
968
969 key = strdup(str);
c0c6806b 970 if (key) {
912541b0
KS
971 char *val;
972
973 val = strchr(key, '=');
974 if (val != NULL) {
975 val[0] = '\0';
976 val = &val[1];
977 if (val[0] == '\0') {
9f6445e3 978 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 979 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 980 } else {
9f6445e3 981 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 982 udev_list_entry_add(&manager->properties, key, val);
912541b0 983 }
c0c6806b 984 } else
9f6445e3 985 log_error("wrong key format '%s'", key);
912541b0 986 }
c0c6806b 987 manager_kill_workers(manager);
912541b0
KS
988 }
989
990 i = udev_ctrl_get_set_children_max(ctrl_msg);
991 if (i >= 0) {
9f6445e3 992 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 993 arg_children_max = i;
912541b0
KS
994 }
995
cb49a4f2 996 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 997 log_debug("udevd message (SYNC) received");
912541b0
KS
998
999 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1000 log_debug("udevd message (EXIT) received");
62d43dac 1001 manager_exit(manager);
c0c6806b
TG
1002 /* keep reference to block the client until we exit
1003 TODO: deal with several blocking exit requests */
1004 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1005 }
e4f66b77 1006
e82e8fa5 1007 return 1;
88f4b648 1008}
4a231017 1009
f3a740a5 1010static int synthesize_change(struct udev_device *dev) {
edd32000 1011 char filename[UTIL_PATH_SIZE];
f3a740a5 1012 int r;
edd32000 1013
f3a740a5 1014 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1015 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1016 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1017 bool part_table_read = false;
1018 bool has_partitions = false;
ede34445 1019 int fd;
f3a740a5
KS
1020 struct udev *udev = udev_device_get_udev(dev);
1021 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1022 struct udev_list_entry *item;
1023
ede34445 1024 /*
e9fc29f4
KS
1025 * Try to re-read the partition table. This only succeeds if
1026 * none of the devices is busy. The kernel returns 0 if no
1027 * partition table is found, and we will not get an event for
1028 * the disk.
ede34445 1029 */
02ba8fb3 1030 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1031 if (fd >= 0) {
02ba8fb3
KS
1032 r = flock(fd, LOCK_EX|LOCK_NB);
1033 if (r >= 0)
1034 r = ioctl(fd, BLKRRPART, 0);
1035
ede34445
KS
1036 close(fd);
1037 if (r >= 0)
e9fc29f4 1038 part_table_read = true;
ede34445
KS
1039 }
1040
e9fc29f4 1041 /* search for partitions */
f3a740a5
KS
1042 e = udev_enumerate_new(udev);
1043 if (!e)
1044 return -ENOMEM;
1045
1046 r = udev_enumerate_add_match_parent(e, dev);
1047 if (r < 0)
1048 return r;
1049
1050 r = udev_enumerate_add_match_subsystem(e, "block");
1051 if (r < 0)
1052 return r;
1053
1054 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1055 if (r < 0)
1056 return r;
e9fc29f4
KS
1057
1058 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1059 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1060
1061 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1062 if (!d)
1063 continue;
1064
1065 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1066 continue;
1067
1068 has_partitions = true;
1069 break;
1070 }
1071
1072 /*
1073 * We have partitions and re-read the table, the kernel already sent
1074 * out a "change" event for the disk, and "remove/add" for all
1075 * partitions.
1076 */
1077 if (part_table_read && has_partitions)
1078 return 0;
1079
1080 /*
1081 * We have partitions but re-reading the partition table did not
1082 * work, synthesize "change" for the disk and all partitions.
1083 */
1084 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1085 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1086 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1087
f3a740a5
KS
1088 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1089 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1090
1091 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1092 if (!d)
1093 continue;
1094
1095 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1096 continue;
1097
1098 log_debug("device %s closed, synthesising partition '%s' 'change'",
1099 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1100 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1101 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1102 }
ede34445
KS
1103
1104 return 0;
f3a740a5
KS
1105 }
1106
ede34445
KS
1107 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1108 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1109 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1110
f3a740a5 1111 return 0;
edd32000
KS
1112}
1113
e82e8fa5 1114static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1115 Manager *manager = userdata;
0254e944 1116 union inotify_event_buffer buffer;
f7c1ad4f
LP
1117 struct inotify_event *e;
1118 ssize_t l;
912541b0 1119
c0c6806b 1120 assert(manager);
e82e8fa5
TG
1121
1122 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1123 if (l < 0) {
1124 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1125 return 1;
912541b0 1126
f7c1ad4f 1127 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1128 }
1129
f7c1ad4f 1130 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1131 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1132
c0c6806b 1133 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1134 if (!dev)
1135 continue;
912541b0 1136
f7c1ad4f 1137 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1138 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1139 synthesize_change(dev);
a8389097
TG
1140
1141 /* settle might be waiting on us to determine the queue
1142 * state. If we just handled an inotify event, we might have
1143 * generated a "change" event, but we won't have queued up
1144 * the resultant uevent yet. Do that.
1145 */
c0c6806b 1146 on_uevent(NULL, -1, 0, manager);
a8389097 1147 } else if (e->mask & IN_IGNORED)
c0c6806b 1148 udev_watch_end(manager->udev, dev);
912541b0
KS
1149 }
1150
e82e8fa5 1151 return 1;
bd284db1
SJR
1152}
1153
0561329d 1154static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1155 Manager *manager = userdata;
1156
1157 assert(manager);
1158
62d43dac 1159 manager_exit(manager);
912541b0 1160
e82e8fa5
TG
1161 return 1;
1162}
912541b0 1163
0561329d 1164static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1165 Manager *manager = userdata;
1166
1167 assert(manager);
1168
62d43dac 1169 manager_reload(manager);
912541b0 1170
e82e8fa5
TG
1171 return 1;
1172}
912541b0 1173
e82e8fa5 1174static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1175 Manager *manager = userdata;
1176
1177 assert(manager);
1178
e82e8fa5
TG
1179 for (;;) {
1180 pid_t pid;
1181 int status;
1182 struct worker *worker;
d1317d02 1183
e82e8fa5
TG
1184 pid = waitpid(-1, &status, WNOHANG);
1185 if (pid <= 0)
f29328d6 1186 break;
e82e8fa5 1187
c0c6806b 1188 worker = hashmap_get(manager->workers, UINT_TO_PTR(pid));
e82e8fa5
TG
1189 if (!worker) {
1190 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1191 continue;
912541b0 1192 }
e82e8fa5
TG
1193
1194 if (WIFEXITED(status)) {
1195 if (WEXITSTATUS(status) == 0)
1196 log_debug("worker ["PID_FMT"] exited", pid);
1197 else
1198 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1199 } else if (WIFSIGNALED(status)) {
1200 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1201 } else if (WIFSTOPPED(status)) {
1202 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1203 continue;
e82e8fa5
TG
1204 } else if (WIFCONTINUED(status)) {
1205 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1206 continue;
e82e8fa5
TG
1207 } else
1208 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1209
1210 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1211 if (worker->event) {
1212 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1213 /* delete state from disk */
1214 udev_device_delete_db(worker->event->dev);
1215 udev_device_tag_index(worker->event->dev, NULL, false);
1216 /* forward kernel event without amending it */
c0c6806b 1217 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1218 }
1219 }
1220
1221 worker_free(worker);
912541b0 1222 }
e82e8fa5 1223
8302fe5a
TG
1224 /* we can start new workers, try to schedule events */
1225 event_queue_start(manager);
1226
e82e8fa5 1227 return 1;
f27125f9 1228}
1229
693d371d
TG
1230static int on_post(sd_event_source *s, void *userdata) {
1231 Manager *manager = userdata;
1232 int r;
1233
1234 assert(manager);
1235
1236 if (udev_list_node_is_empty(&manager->events)) {
1237 /* no pending events */
1238 if (!hashmap_isempty(manager->workers)) {
1239 /* there are idle workers */
1240 log_debug("cleanup idle workers");
1241 manager_kill_workers(manager);
1242 } else {
1243 /* we are idle */
1244 if (manager->exit) {
1245 r = sd_event_exit(manager->event, 0);
1246 if (r < 0)
1247 return r;
1248 } else if (manager->cgroup)
1249 /* cleanup possible left-over processes in our cgroup */
1250 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
1251 }
1252 }
1253
1254 return 1;
1255}
1256
fcff1e72 1257static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1258 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1259 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1260 int fd, n, r;
912541b0 1261
fcff1e72
TG
1262 assert(rctrl);
1263 assert(rnetlink);
1264
912541b0 1265 n = sd_listen_fds(true);
fcff1e72
TG
1266 if (n < 0)
1267 return n;
912541b0
KS
1268
1269 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1270 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1271 if (ctrl_fd >= 0)
1272 return -EINVAL;
1273 ctrl_fd = fd;
912541b0
KS
1274 continue;
1275 }
1276
1277 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1278 if (netlink_fd >= 0)
1279 return -EINVAL;
1280 netlink_fd = fd;
912541b0
KS
1281 continue;
1282 }
1283
fcff1e72 1284 return -EINVAL;
912541b0
KS
1285 }
1286
f59118ec
TG
1287 if (ctrl_fd < 0) {
1288 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1289
1290 udev = udev_new();
1291 if (!udev)
1292 return -ENOMEM;
1293
1294 ctrl = udev_ctrl_new(udev);
1295 if (!ctrl)
1296 return log_error_errno(EINVAL, "error initializing udev control socket");
1297
1298 r = udev_ctrl_enable_receiving(ctrl);
1299 if (r < 0)
1300 return log_error_errno(EINVAL, "error binding udev control socket");
1301
1302 fd = udev_ctrl_get_fd(ctrl);
1303 if (fd < 0)
1304 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1305
f59118ec
TG
1306 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1307 if (ctrl_fd < 0)
1308 return log_error_errno(errno, "could not dup ctrl fd: %m");
1309 }
1310
1311 if (netlink_fd < 0) {
1312 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1313
1314 if (!udev) {
1315 udev = udev_new();
1316 if (!udev)
1317 return -ENOMEM;
1318 }
1319
1320 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1321 if (!monitor)
1322 return log_error_errno(EINVAL, "error initializing netlink socket");
1323
1324 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1325
1326 r = udev_monitor_enable_receiving(monitor);
1327 if (r < 0)
1328 return log_error_errno(EINVAL, "error binding netlink socket");
1329
1330 fd = udev_monitor_get_fd(monitor);
1331 if (fd < 0)
1332 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1333
1334 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1335 if (ctrl_fd < 0)
1336 return log_error_errno(errno, "could not dup netlink fd: %m");
1337 }
fcff1e72
TG
1338
1339 *rctrl = ctrl_fd;
1340 *rnetlink = netlink_fd;
912541b0 1341
912541b0 1342 return 0;
7459bcdc
KS
1343}
1344
e6f86cac 1345/*
3f85ef0f 1346 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1347 * udev.log-priority=<level> syslog priority
1348 * udev.children-max=<number of workers> events are fully serialized if set to 1
1349 * udev.exec-delay=<number of seconds> delay execution of every executed program
1350 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1351 */
614a823c 1352static int parse_proc_cmdline_item(const char *key, const char *value) {
3567afa5 1353 const char *full_key = key;
74df0fca 1354 int r;
e6f86cac 1355
614a823c 1356 assert(key);
e6f86cac 1357
614a823c
TG
1358 if (!value)
1359 return 0;
e6f86cac 1360
614a823c
TG
1361 if (startswith(key, "rd."))
1362 key += strlen("rd.");
e6f86cac 1363
614a823c
TG
1364 if (startswith(key, "udev."))
1365 key += strlen("udev.");
1366 else
1367 return 0;
e6f86cac 1368
614a823c
TG
1369 if (streq(key, "log-priority")) {
1370 int prio;
e6f86cac 1371
614a823c 1372 prio = util_log_priority(value);
e00f5bdd 1373 if (prio < 0)
3567afa5
MS
1374 goto invalid;
1375 log_set_max_level(prio);
614a823c 1376 } else if (streq(key, "children-max")) {
020328e1 1377 r = safe_atou(value, &arg_children_max);
614a823c 1378 if (r < 0)
3567afa5 1379 goto invalid;
614a823c
TG
1380 } else if (streq(key, "exec-delay")) {
1381 r = safe_atoi(value, &arg_exec_delay);
1382 if (r < 0)
3567afa5 1383 goto invalid;
614a823c
TG
1384 } else if (streq(key, "event-timeout")) {
1385 r = safe_atou64(value, &arg_event_timeout_usec);
1386 if (r < 0)
3567afa5
MS
1387 goto invalid;
1388 arg_event_timeout_usec *= USEC_PER_SEC;
1389 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
e6f86cac 1390 }
614a823c 1391
3567afa5
MS
1392 return 0;
1393invalid:
1394 log_warning("invalid %s ignored: %s", full_key, value);
614a823c 1395 return 0;
e6f86cac
KS
1396}
1397
ed216e1f
TG
1398static void help(void) {
1399 printf("%s [OPTIONS...]\n\n"
1400 "Manages devices.\n\n"
5ac0162c
LP
1401 " -h --help Print this message\n"
1402 " --version Print version of the program\n"
1403 " --daemon Detach and run in the background\n"
1404 " --debug Enable debug output\n"
1405 " --children-max=INT Set maximum number of workers\n"
1406 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1407 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1408 " --resolve-names=early|late|never\n"
1409 " When to resolve users and groups\n"
ed216e1f
TG
1410 , program_invocation_short_name);
1411}
1412
bba7a484 1413static int parse_argv(int argc, char *argv[]) {
912541b0 1414 static const struct option options[] = {
bba7a484
TG
1415 { "daemon", no_argument, NULL, 'd' },
1416 { "debug", no_argument, NULL, 'D' },
1417 { "children-max", required_argument, NULL, 'c' },
1418 { "exec-delay", required_argument, NULL, 'e' },
1419 { "event-timeout", required_argument, NULL, 't' },
1420 { "resolve-names", required_argument, NULL, 'N' },
1421 { "help", no_argument, NULL, 'h' },
1422 { "version", no_argument, NULL, 'V' },
912541b0
KS
1423 {}
1424 };
689a97f5 1425
bba7a484 1426 int c;
689a97f5 1427
bba7a484
TG
1428 assert(argc >= 0);
1429 assert(argv);
912541b0 1430
e14b6f21 1431 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1432 int r;
912541b0 1433
bba7a484 1434 switch (c) {
912541b0 1435
912541b0 1436 case 'd':
bba7a484 1437 arg_daemonize = true;
912541b0
KS
1438 break;
1439 case 'c':
020328e1 1440 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1441 if (r < 0)
1442 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1443 break;
1444 case 'e':
6f5cf8a8
TG
1445 r = safe_atoi(optarg, &arg_exec_delay);
1446 if (r < 0)
1447 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1448 break;
9719859c 1449 case 't':
f1e8664e
TG
1450 r = safe_atou64(optarg, &arg_event_timeout_usec);
1451 if (r < 0)
65fea570 1452 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1453 else {
1454 arg_event_timeout_usec *= USEC_PER_SEC;
1455 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1456 }
9719859c 1457 break;
912541b0 1458 case 'D':
bba7a484 1459 arg_debug = true;
912541b0
KS
1460 break;
1461 case 'N':
090be865 1462 if (streq(optarg, "early")) {
bba7a484 1463 arg_resolve_names = 1;
090be865 1464 } else if (streq(optarg, "late")) {
bba7a484 1465 arg_resolve_names = 0;
090be865 1466 } else if (streq(optarg, "never")) {
bba7a484 1467 arg_resolve_names = -1;
912541b0 1468 } else {
9f6445e3 1469 log_error("resolve-names must be early, late or never");
bba7a484 1470 return 0;
912541b0
KS
1471 }
1472 break;
1473 case 'h':
ed216e1f 1474 help();
bba7a484 1475 return 0;
912541b0
KS
1476 case 'V':
1477 printf("%s\n", VERSION);
bba7a484
TG
1478 return 0;
1479 case '?':
1480 return -EINVAL;
912541b0 1481 default:
bba7a484
TG
1482 assert_not_reached("Unhandled option");
1483
912541b0
KS
1484 }
1485 }
1486
bba7a484
TG
1487 return 1;
1488}
1489
b7f74dd4 1490static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1491 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1492 int r, fd_worker, one = 1;
c0c6806b
TG
1493
1494 assert(ret);
11b1dd8c
TG
1495 assert(fd_ctrl >= 0);
1496 assert(fd_uevent >= 0);
c0c6806b
TG
1497
1498 manager = new0(Manager, 1);
1499 if (!manager)
1500 return log_oom();
1501
e237d8cb
TG
1502 manager->fd_inotify = -1;
1503 manager->worker_watch[WRITE_END] = -1;
1504 manager->worker_watch[READ_END] = -1;
1505
c0c6806b
TG
1506 manager->udev = udev_new();
1507 if (!manager->udev)
1508 return log_error_errno(errno, "could not allocate udev context: %m");
1509
b2d21d93
TG
1510 udev_builtin_init(manager->udev);
1511
ecb17862
TG
1512 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1513 if (!manager->rules)
1514 return log_error_errno(ENOMEM, "error reading rules");
1515
1516 udev_list_node_init(&manager->events);
1517 udev_list_init(manager->udev, &manager->properties, true);
1518
c26d1879
TG
1519 manager->cgroup = cgroup;
1520
f59118ec
TG
1521 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1522 if (!manager->ctrl)
1523 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1524
f59118ec
TG
1525 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1526 if (!manager->monitor)
1527 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1528
1529 /* unnamed socket from workers to the main daemon */
1530 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1531 if (r < 0)
1532 return log_error_errno(errno, "error creating socketpair: %m");
1533
693d371d 1534 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1535
693d371d 1536 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1537 if (r < 0)
1538 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1539
1540 manager->fd_inotify = udev_watch_init(manager->udev);
1541 if (manager->fd_inotify < 0)
1542 return log_error_errno(ENOMEM, "error initializing inotify");
1543
1544 udev_watch_restore(manager->udev);
1545
1546 /* block and listen to all signals on signalfd */
72c0a2c2 1547 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1548
49f997f3
TG
1549 r = sd_event_default(&manager->event);
1550 if (r < 0)
1551 return log_error_errno(errno, "could not allocate event loop: %m");
1552
693d371d
TG
1553 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1554 if (r < 0)
1555 return log_error_errno(r, "error creating sigint event source: %m");
1556
1557 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1558 if (r < 0)
1559 return log_error_errno(r, "error creating sigterm event source: %m");
1560
1561 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1562 if (r < 0)
1563 return log_error_errno(r, "error creating sighup event source: %m");
1564
1565 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1566 if (r < 0)
1567 return log_error_errno(r, "error creating sigchld event source: %m");
1568
1569 r = sd_event_set_watchdog(manager->event, true);
1570 if (r < 0)
1571 return log_error_errno(r, "error creating watchdog event source: %m");
1572
11b1dd8c 1573 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1574 if (r < 0)
1575 return log_error_errno(r, "error creating ctrl event source: %m");
1576
1577 /* This needs to be after the inotify and uevent handling, to make sure
1578 * that the ping is send back after fully processing the pending uevents
1579 * (including the synthetic ones we may create due to inotify events).
1580 */
1581 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1582 if (r < 0)
1583 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1584
1585 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1586 if (r < 0)
1587 return log_error_errno(r, "error creating inotify event source: %m");
1588
11b1dd8c 1589 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1590 if (r < 0)
1591 return log_error_errno(r, "error creating uevent event source: %m");
1592
1593 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1594 if (r < 0)
1595 return log_error_errno(r, "error creating worker event source: %m");
1596
1597 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1598 if (r < 0)
1599 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1600
11b1dd8c
TG
1601 *ret = manager;
1602 manager = NULL;
1603
86c3bece 1604 return 0;
c0c6806b
TG
1605}
1606
077fc5e2 1607static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1608 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1609 int r;
1610
1611 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1612 if (r < 0) {
1613 r = log_error_errno(r, "failed to allocate manager object: %m");
1614 goto exit;
1615 }
1616
1617 r = udev_rules_apply_static_dev_perms(manager->rules);
1618 if (r < 0)
1619 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1620
1621 (void) sd_notify(false,
1622 "READY=1\n"
1623 "STATUS=Processing...");
1624
1625 r = sd_event_loop(manager->event);
1626 if (r < 0) {
1627 log_error_errno(r, "event loop failed: %m");
1628 goto exit;
1629 }
1630
1631 sd_event_get_exit_code(manager->event, &r);
1632
1633exit:
1634 sd_notify(false,
1635 "STOPPING=1\n"
1636 "STATUS=Shutting down...");
1637 if (manager)
1638 udev_ctrl_cleanup(manager->ctrl);
1639 return r;
1640}
1641
1642int main(int argc, char *argv[]) {
c26d1879 1643 _cleanup_free_ char *cgroup = NULL;
b7f74dd4 1644 int r, fd_ctrl, fd_uevent;
bba7a484 1645
bba7a484
TG
1646 log_set_target(LOG_TARGET_AUTO);
1647 log_parse_environment();
1648 log_open();
1649
bba7a484
TG
1650 r = parse_argv(argc, argv);
1651 if (r <= 0)
1652 goto exit;
1653
614a823c
TG
1654 r = parse_proc_cmdline(parse_proc_cmdline_item);
1655 if (r < 0)
1656 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1657
78d3e041
KS
1658 if (arg_debug) {
1659 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1660 log_set_max_level(LOG_DEBUG);
78d3e041 1661 }
bba7a484 1662
912541b0 1663 if (getuid() != 0) {
6af5e6a4 1664 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1665 goto exit;
1666 }
1667
712cebf1
TG
1668 if (arg_children_max == 0) {
1669 cpu_set_t cpu_set;
ebc164ef 1670
712cebf1 1671 arg_children_max = 8;
d457ff83 1672
712cebf1 1673 if (sched_getaffinity(0, sizeof (cpu_set), &cpu_set) == 0) {
920b52e4 1674 arg_children_max += CPU_COUNT(&cpu_set) * 2;
712cebf1 1675 }
912541b0 1676
712cebf1 1677 log_debug("set children_max to %u", arg_children_max);
d457ff83 1678 }
912541b0 1679
712cebf1
TG
1680 /* set umask before creating any file/directory */
1681 r = chdir("/");
1682 if (r < 0) {
1683 r = log_error_errno(errno, "could not change dir to /: %m");
1684 goto exit;
1685 }
194bbe33 1686
712cebf1 1687 umask(022);
912541b0 1688
712cebf1
TG
1689 r = mac_selinux_init("/dev");
1690 if (r < 0) {
1691 log_error_errno(r, "could not initialize labelling: %m");
1692 goto exit;
912541b0
KS
1693 }
1694
712cebf1
TG
1695 r = mkdir("/run/udev", 0755);
1696 if (r < 0 && errno != EEXIST) {
1697 r = log_error_errno(errno, "could not create /run/udev: %m");
1698 goto exit;
1699 }
1700
03cfe0d5 1701 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1702
c26d1879
TG
1703 if (getppid() == 1) {
1704 /* get our own cgroup, we regularly kill everything udev has left behind
1705 we only do this on systemd systems, and only if we are directly spawned
1706 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1707 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15
TG
1708 if (r < 0) {
1709 if (r == -ENOENT)
1710 log_debug_errno(r, "did not find dedicated cgroup: %m");
1711 else
1712 log_warning_errno(r, "failed to get cgroup: %m");
1713 }
c26d1879
TG
1714 }
1715
b7f74dd4
TG
1716 r = listen_fds(&fd_ctrl, &fd_uevent);
1717 if (r < 0) {
1718 r = log_error_errno(r, "could not listen on fds: %m");
1719 goto exit;
1720 }
1721
bba7a484 1722 if (arg_daemonize) {
912541b0 1723 pid_t pid;
912541b0 1724
3cbb2057
TG
1725 log_info("starting version " VERSION);
1726
40e749b5
TG
1727 /* connect /dev/null to stdin, stdout, stderr */
1728 if (log_get_max_level() < LOG_DEBUG)
1729 (void) make_null_stdio();
1730
912541b0
KS
1731 pid = fork();
1732 switch (pid) {
1733 case 0:
1734 break;
1735 case -1:
6af5e6a4 1736 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1737 goto exit;
1738 default:
f53d1fcd
TG
1739 mac_selinux_finish();
1740 log_close();
1741 _exit(EXIT_SUCCESS);
912541b0
KS
1742 }
1743
1744 setsid();
1745
ad118bda 1746 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1747 }
912541b0 1748
077fc5e2 1749 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1750
53921bfa 1751exit:
cc56fafe 1752 mac_selinux_finish();
baa30fbc 1753 log_close();
6af5e6a4 1754 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1755}