]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
util-lib: split string parsing related calls from util.[ch] into parse-util.[ch]
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
7fafc032 1/*
1298001e 2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
55e9959b
KS
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
7fafc032 11 *
55e9959b
KS
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
7fafc032 16 *
55e9959b
KS
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
19 */
20
7fafc032 21#include <errno.h>
618234a5
LP
22#include <fcntl.h>
23#include <getopt.h>
24#include <signal.h>
25#include <stdbool.h>
26#include <stddef.h>
7fafc032
KS
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
618234a5 30#include <sys/epoll.h>
3ebdb81e 31#include <sys/file.h>
618234a5
LP
32#include <sys/inotify.h>
33#include <sys/ioctl.h>
34#include <sys/mount.h>
1e03b754 35#include <sys/prctl.h>
1e03b754 36#include <sys/signalfd.h>
618234a5 37#include <sys/socket.h>
dc117daa 38#include <sys/stat.h>
618234a5
LP
39#include <sys/time.h>
40#include <sys/wait.h>
41#include <unistd.h>
7fafc032 42
392ef7a2 43#include "sd-daemon.h"
693d371d 44#include "sd-event.h"
8314de1d 45
194bbe33 46#include "cgroup-util.h"
618234a5 47#include "cpu-set-util.h"
5ba2dc25 48#include "dev-setup.h"
618234a5 49#include "event-util.h"
3ffd4af2 50#include "fd-util.h"
a5c32cff 51#include "fileio.h"
6482f626 52#include "formats-util.h"
a505965d 53#include "hashmap.h"
c004493c 54#include "io-util.h"
618234a5 55#include "netlink-util.h"
6bedfcbb 56#include "parse-util.h"
618234a5
LP
57#include "process-util.h"
58#include "selinux-util.h"
59#include "signal-util.h"
07630cea 60#include "string-util.h"
618234a5
LP
61#include "terminal-util.h"
62#include "udev-util.h"
63#include "udev.h"
7fafc032 64
bba7a484
TG
65static bool arg_debug = false;
66static int arg_daemonize = false;
67static int arg_resolve_names = 1;
020328e1 68static unsigned arg_children_max;
bba7a484
TG
69static int arg_exec_delay;
70static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
71static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
72
73typedef struct Manager {
74 struct udev *udev;
693d371d 75 sd_event *event;
c0c6806b 76 Hashmap *workers;
ecb17862 77 struct udev_list_node events;
c26d1879 78 const char *cgroup;
cb49a4f2 79 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 80
ecb17862 81 struct udev_rules *rules;
c0c6806b
TG
82 struct udev_list properties;
83
84 struct udev_monitor *monitor;
85 struct udev_ctrl *ctrl;
86 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 87 int fd_inotify;
e237d8cb
TG
88 int worker_watch[2];
89
693d371d
TG
90 sd_event_source *ctrl_event;
91 sd_event_source *uevent_event;
92 sd_event_source *inotify_event;
93
7c4c7e89
TG
94 usec_t last_usec;
95
c0c6806b 96 bool stop_exec_queue:1;
c0c6806b
TG
97 bool exit:1;
98} Manager;
1e03b754 99
1e03b754 100enum event_state {
912541b0
KS
101 EVENT_UNDEF,
102 EVENT_QUEUED,
103 EVENT_RUNNING,
1e03b754
KS
104};
105
106struct event {
912541b0 107 struct udev_list_node node;
cb49a4f2 108 Manager *manager;
912541b0
KS
109 struct udev *udev;
110 struct udev_device *dev;
6969c349 111 struct udev_device *dev_kernel;
c6aa11f2 112 struct worker *worker;
912541b0 113 enum event_state state;
912541b0
KS
114 unsigned long long int delaying_seqnum;
115 unsigned long long int seqnum;
116 const char *devpath;
117 size_t devpath_len;
118 const char *devpath_old;
119 dev_t devnum;
912541b0 120 int ifindex;
ea6039a3 121 bool is_block;
693d371d
TG
122 sd_event_source *timeout_warning;
123 sd_event_source *timeout;
1e03b754
KS
124};
125
9ec6e95b 126static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 127 return container_of(node, struct event, node);
1e03b754
KS
128}
129
ecb17862 130static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 131
1e03b754 132enum worker_state {
912541b0
KS
133 WORKER_UNDEF,
134 WORKER_RUNNING,
135 WORKER_IDLE,
136 WORKER_KILLED,
1e03b754
KS
137};
138
139struct worker {
c0c6806b 140 Manager *manager;
912541b0 141 struct udev_list_node node;
912541b0
KS
142 int refcount;
143 pid_t pid;
144 struct udev_monitor *monitor;
145 enum worker_state state;
146 struct event *event;
1e03b754
KS
147};
148
149/* passed from worker to main process */
150struct worker_message {
1e03b754
KS
151};
152
c6aa11f2 153static void event_free(struct event *event) {
cb49a4f2
TG
154 int r;
155
c6aa11f2
TG
156 if (!event)
157 return;
158
912541b0 159 udev_list_node_remove(&event->node);
912541b0 160 udev_device_unref(event->dev);
6969c349 161 udev_device_unref(event->dev_kernel);
c6aa11f2 162
693d371d
TG
163 sd_event_source_unref(event->timeout_warning);
164 sd_event_source_unref(event->timeout);
165
c6aa11f2
TG
166 if (event->worker)
167 event->worker->event = NULL;
168
cb49a4f2
TG
169 assert(event->manager);
170
171 if (udev_list_node_is_empty(&event->manager->events)) {
172 /* only clean up the queue from the process that created it */
173 if (event->manager->pid == getpid()) {
174 r = unlink("/run/udev/queue");
175 if (r < 0)
176 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
177 }
178 }
179
912541b0 180 free(event);
aa8734ff 181}
7a770250 182
c6aa11f2
TG
183static void worker_free(struct worker *worker) {
184 if (!worker)
185 return;
bc113de9 186
c0c6806b
TG
187 assert(worker->manager);
188
189 hashmap_remove(worker->manager->workers, UINT_TO_PTR(worker->pid));
912541b0 190 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
191 event_free(worker->event);
192
c6aa11f2 193 free(worker);
ff2c503d
KS
194}
195
c0c6806b 196static void manager_workers_free(Manager *manager) {
a505965d
TG
197 struct worker *worker;
198 Iterator i;
ff2c503d 199
c0c6806b
TG
200 assert(manager);
201
202 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 203 worker_free(worker);
a505965d 204
c0c6806b 205 manager->workers = hashmap_free(manager->workers);
fc465079
KS
206}
207
c0c6806b 208static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
209 _cleanup_free_ struct worker *worker = NULL;
210 int r;
3a19b32a
TG
211
212 assert(ret);
c0c6806b 213 assert(manager);
3a19b32a
TG
214 assert(worker_monitor);
215 assert(pid > 1);
216
217 worker = new0(struct worker, 1);
218 if (!worker)
219 return -ENOMEM;
220
39c19cf1 221 worker->refcount = 1;
c0c6806b 222 worker->manager = manager;
3a19b32a
TG
223 /* close monitor, but keep address around */
224 udev_monitor_disconnect(worker_monitor);
225 worker->monitor = udev_monitor_ref(worker_monitor);
226 worker->pid = pid;
a505965d 227
c0c6806b 228 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
229 if (r < 0)
230 return r;
231
c0c6806b 232 r = hashmap_put(manager->workers, UINT_TO_PTR(pid), worker);
a505965d
TG
233 if (r < 0)
234 return r;
235
3a19b32a 236 *ret = worker;
a505965d 237 worker = NULL;
3a19b32a
TG
238
239 return 0;
240}
241
4fa4d885
TG
242static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
243 struct event *event = userdata;
244
245 assert(event);
246 assert(event->worker);
247
248 kill_and_sigcont(event->worker->pid, SIGKILL);
249 event->worker->state = WORKER_KILLED;
250
251 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
252
253 return 1;
254}
255
256static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
257 struct event *event = userdata;
258
259 assert(event);
260
261 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
262
263 return 1;
264}
265
39c19cf1 266static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
267 sd_event *e;
268 uint64_t usec;
693d371d 269
c6aa11f2 270 assert(worker);
693d371d 271 assert(worker->manager);
c6aa11f2
TG
272 assert(event);
273 assert(!event->worker);
274 assert(!worker->event);
275
39c19cf1 276 worker->state = WORKER_RUNNING;
39c19cf1
TG
277 worker->event = event;
278 event->state = EVENT_RUNNING;
c6aa11f2 279 event->worker = worker;
693d371d
TG
280
281 e = worker->manager->event;
282
38a03f06 283 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
284
285 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
286 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
287
288 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
289 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
290}
291
e237d8cb
TG
292static void manager_free(Manager *manager) {
293 if (!manager)
294 return;
295
b2d21d93
TG
296 udev_builtin_exit(manager->udev);
297
693d371d
TG
298 sd_event_source_unref(manager->ctrl_event);
299 sd_event_source_unref(manager->uevent_event);
300 sd_event_source_unref(manager->inotify_event);
301
e237d8cb 302 udev_unref(manager->udev);
693d371d 303 sd_event_unref(manager->event);
e237d8cb
TG
304 manager_workers_free(manager);
305 event_queue_cleanup(manager, EVENT_UNDEF);
306
307 udev_monitor_unref(manager->monitor);
308 udev_ctrl_unref(manager->ctrl);
309 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
310
311 udev_list_cleanup(&manager->properties);
312 udev_rules_unref(manager->rules);
e237d8cb 313
e237d8cb
TG
314 safe_close(manager->fd_inotify);
315 safe_close_pair(manager->worker_watch);
316
317 free(manager);
318}
319
320DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
321
9a73bd7c
TG
322static int worker_send_message(int fd) {
323 struct worker_message message = {};
324
325 return loop_write(fd, &message, sizeof(message), false);
326}
327
c0c6806b 328static void worker_spawn(Manager *manager, struct event *event) {
912541b0 329 struct udev *udev = event->udev;
3a19b32a 330 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 331 pid_t pid;
b6aab8ef 332 int r = 0;
912541b0
KS
333
334 /* listen for new events */
335 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
336 if (worker_monitor == NULL)
337 return;
338 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 339 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
340 r = udev_monitor_enable_receiving(worker_monitor);
341 if (r < 0)
342 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 343
912541b0
KS
344 pid = fork();
345 switch (pid) {
346 case 0: {
347 struct udev_device *dev = NULL;
1c4baffc 348 _cleanup_netlink_unref_ sd_netlink *rtnl = NULL;
912541b0 349 int fd_monitor;
e237d8cb 350 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
351 struct epoll_event ep_signal = { .events = EPOLLIN };
352 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 353 sigset_t mask;
912541b0 354
43095991 355 /* take initial device from queue */
912541b0
KS
356 dev = event->dev;
357 event->dev = NULL;
358
39fd2ca1
TG
359 unsetenv("NOTIFY_SOCKET");
360
c0c6806b 361 manager_workers_free(manager);
ecb17862 362 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 363
e237d8cb 364 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 365 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 366 manager->ctrl = udev_ctrl_unref(manager->ctrl);
693d371d 367 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 368 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 369
693d371d
TG
370 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
371 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
372 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
373
374 manager->event = sd_event_unref(manager->event);
375
912541b0
KS
376 sigfillset(&mask);
377 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
378 if (fd_signal < 0) {
6af5e6a4 379 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
380 goto out;
381 }
2dd9f98d
TG
382 ep_signal.data.fd = fd_signal;
383
384 fd_monitor = udev_monitor_get_fd(worker_monitor);
385 ep_monitor.data.fd = fd_monitor;
912541b0
KS
386
387 fd_ep = epoll_create1(EPOLL_CLOEXEC);
388 if (fd_ep < 0) {
6af5e6a4 389 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
390 goto out;
391 }
392
912541b0
KS
393 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
394 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 395 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
396 goto out;
397 }
398
399 /* request TERM signal if parent exits */
400 prctl(PR_SET_PDEATHSIG, SIGTERM);
401
145dae7e 402 /* reset OOM score, we only protect the main daemon */
ad118bda 403 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 404
912541b0
KS
405 for (;;) {
406 struct udev_event *udev_event;
6af5e6a4 407 int fd_lock = -1;
912541b0 408
3b64e4d4
TG
409 assert(dev);
410
9f6445e3 411 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
412 udev_event = udev_event_new(dev);
413 if (udev_event == NULL) {
6af5e6a4 414 r = -ENOMEM;
912541b0
KS
415 goto out;
416 }
417
bba7a484
TG
418 if (arg_exec_delay > 0)
419 udev_event->exec_delay = arg_exec_delay;
912541b0 420
3ebdb81e 421 /*
2e5b17d0 422 * Take a shared lock on the device node; this establishes
3ebdb81e 423 * a concept of device "ownership" to serialize device
2e5b17d0 424 * access. External processes holding an exclusive lock will
3ebdb81e 425 * cause udev to skip the event handling; in the case udev
2e5b17d0 426 * acquired the lock, the external process can block until
3ebdb81e
KS
427 * udev has finished its event handling.
428 */
2e5b17d0
KS
429 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
430 streq_ptr("block", udev_device_get_subsystem(dev)) &&
431 !startswith(udev_device_get_sysname(dev), "dm-") &&
432 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
433 struct udev_device *d = dev;
434
435 if (streq_ptr("partition", udev_device_get_devtype(d)))
436 d = udev_device_get_parent(d);
437
438 if (d) {
439 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
440 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 441 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 442 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
443 goto skip;
444 }
445 }
446 }
447
4c83d994
TG
448 /* needed for renaming netifs */
449 udev_event->rtnl = rtnl;
450
912541b0 451 /* apply rules, create node, symlinks */
adeba500
KS
452 udev_event_execute_rules(udev_event,
453 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 454 &manager->properties,
8314de1d 455 manager->rules);
adeba500
KS
456
457 udev_event_execute_run(udev_event,
8314de1d 458 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 459
523c620b
TG
460 if (udev_event->rtnl)
461 /* in case rtnl was initialized */
1c4baffc 462 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 463
912541b0 464 /* apply/restore inotify watch */
bf9bead1 465 if (udev_event->inotify_watch) {
912541b0
KS
466 udev_watch_begin(udev, dev);
467 udev_device_update_db(dev);
468 }
469
3d06f418 470 safe_close(fd_lock);
3ebdb81e 471
912541b0
KS
472 /* send processed event back to libudev listeners */
473 udev_monitor_send_device(worker_monitor, NULL, dev);
474
3ebdb81e 475skip:
4914cb2d 476 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 477
912541b0 478 /* send udevd the result of the event execution */
e237d8cb 479 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 480 if (r < 0)
9a73bd7c 481 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 482 udev_device_get_seqnum(dev));
912541b0
KS
483
484 udev_device_unref(dev);
485 dev = NULL;
486
73814ca2 487 udev_event_unref(udev_event);
47e737dc 488
912541b0
KS
489 /* wait for more device messages from main udevd, or term signal */
490 while (dev == NULL) {
491 struct epoll_event ev[4];
492 int fdcount;
493 int i;
494
8fef0ff2 495 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
496 if (fdcount < 0) {
497 if (errno == EINTR)
498 continue;
6af5e6a4 499 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
500 goto out;
501 }
502
503 for (i = 0; i < fdcount; i++) {
504 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
505 dev = udev_monitor_receive_device(worker_monitor);
506 break;
507 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
508 struct signalfd_siginfo fdsi;
509 ssize_t size;
510
511 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
512 if (size != sizeof(struct signalfd_siginfo))
513 continue;
514 switch (fdsi.ssi_signo) {
515 case SIGTERM:
516 goto out;
517 }
518 }
519 }
520 }
521 }
82063a88 522out:
912541b0 523 udev_device_unref(dev);
e237d8cb 524 manager_free(manager);
baa30fbc 525 log_close();
8b46c3fc 526 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
527 }
528 case -1:
912541b0 529 event->state = EVENT_QUEUED;
56f64d95 530 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
531 break;
532 default:
e03c7cc2
TG
533 {
534 struct worker *worker;
535
c0c6806b 536 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 537 if (r < 0)
e03c7cc2 538 return;
e03c7cc2 539
39c19cf1
TG
540 worker_attach_event(worker, event);
541
1fa2f38f 542 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
543 break;
544 }
e03c7cc2 545 }
7fafc032
KS
546}
547
c0c6806b 548static void event_run(Manager *manager, struct event *event) {
a505965d
TG
549 struct worker *worker;
550 Iterator i;
912541b0 551
c0c6806b
TG
552 assert(manager);
553 assert(event);
554
555 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
556 ssize_t count;
557
558 if (worker->state != WORKER_IDLE)
559 continue;
560
c0c6806b 561 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 562 if (count < 0) {
1fa2f38f
ZJS
563 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
564 worker->pid, count);
912541b0
KS
565 kill(worker->pid, SIGKILL);
566 worker->state = WORKER_KILLED;
567 continue;
568 }
39c19cf1 569 worker_attach_event(worker, event);
912541b0
KS
570 return;
571 }
572
c0c6806b 573 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 574 if (arg_children_max > 1)
c0c6806b 575 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
576 return;
577 }
578
579 /* start new worker and pass initial device */
c0c6806b 580 worker_spawn(manager, event);
1e03b754
KS
581}
582
ecb17862 583static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 584 struct event *event;
cb49a4f2 585 int r;
912541b0 586
ecb17862
TG
587 assert(manager);
588 assert(dev);
589
040e6896
TG
590 /* only one process can add events to the queue */
591 if (manager->pid == 0)
592 manager->pid = getpid();
593
cb49a4f2
TG
594 assert(manager->pid == getpid());
595
955d98c9 596 event = new0(struct event, 1);
cb49a4f2
TG
597 if (!event)
598 return -ENOMEM;
912541b0
KS
599
600 event->udev = udev_device_get_udev(dev);
cb49a4f2 601 event->manager = manager;
912541b0 602 event->dev = dev;
6969c349
TG
603 event->dev_kernel = udev_device_shallow_clone(dev);
604 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
605 event->seqnum = udev_device_get_seqnum(dev);
606 event->devpath = udev_device_get_devpath(dev);
607 event->devpath_len = strlen(event->devpath);
608 event->devpath_old = udev_device_get_devpath_old(dev);
609 event->devnum = udev_device_get_devnum(dev);
ea6039a3 610 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
611 event->ifindex = udev_device_get_ifindex(dev);
612
9f6445e3 613 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
614 udev_device_get_action(dev), udev_device_get_subsystem(dev));
615
616 event->state = EVENT_QUEUED;
cb49a4f2
TG
617
618 if (udev_list_node_is_empty(&manager->events)) {
619 r = touch("/run/udev/queue");
620 if (r < 0)
621 log_warning_errno(r, "could not touch /run/udev/queue: %m");
622 }
623
ecb17862 624 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 625
912541b0 626 return 0;
fc465079
KS
627}
628
c0c6806b 629static void manager_kill_workers(Manager *manager) {
a505965d
TG
630 struct worker *worker;
631 Iterator i;
1e03b754 632
c0c6806b
TG
633 assert(manager);
634
635 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
636 if (worker->state == WORKER_KILLED)
637 continue;
1e03b754 638
912541b0
KS
639 worker->state = WORKER_KILLED;
640 kill(worker->pid, SIGTERM);
641 }
1e03b754
KS
642}
643
e3196993 644/* lookup event for identical, parent, child device */
ecb17862 645static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
646 struct udev_list_node *loop;
647 size_t common;
648
649 /* check if queue contains events we depend on */
ecb17862 650 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
651 struct event *loop_event = node_to_event(loop);
652
653 /* we already found a later event, earlier can not block us, no need to check again */
654 if (loop_event->seqnum < event->delaying_seqnum)
655 continue;
656
657 /* event we checked earlier still exists, no need to check again */
658 if (loop_event->seqnum == event->delaying_seqnum)
659 return true;
660
661 /* found ourself, no later event can block us */
662 if (loop_event->seqnum >= event->seqnum)
663 break;
664
665 /* check major/minor */
666 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
667 return true;
668
669 /* check network device ifindex */
670 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
671 return true;
672
673 /* check our old name */
090be865 674 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
675 event->delaying_seqnum = loop_event->seqnum;
676 return true;
677 }
678
679 /* compare devpath */
680 common = MIN(loop_event->devpath_len, event->devpath_len);
681
682 /* one devpath is contained in the other? */
683 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
684 continue;
685
686 /* identical device event found */
687 if (loop_event->devpath_len == event->devpath_len) {
688 /* devices names might have changed/swapped in the meantime */
689 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
690 continue;
691 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
692 continue;
693 event->delaying_seqnum = loop_event->seqnum;
694 return true;
695 }
696
697 /* parent device event found */
698 if (event->devpath[common] == '/') {
699 event->delaying_seqnum = loop_event->seqnum;
700 return true;
701 }
702
703 /* child device event found */
704 if (loop_event->devpath[common] == '/') {
705 event->delaying_seqnum = loop_event->seqnum;
706 return true;
707 }
708
709 /* no matching device */
710 continue;
711 }
712
713 return false;
7fafc032
KS
714}
715
693d371d
TG
716static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
717 Manager *manager = userdata;
718
719 assert(manager);
720
721 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
722
723 sd_event_exit(manager->event, -ETIMEDOUT);
724
725 return 1;
726}
727
62d43dac 728static void manager_exit(Manager *manager) {
693d371d
TG
729 uint64_t usec;
730 int r;
62d43dac
TG
731
732 assert(manager);
733
734 manager->exit = true;
735
b79aacbf
TG
736 sd_notify(false,
737 "STOPPING=1\n"
738 "STATUS=Starting shutdown...");
739
62d43dac 740 /* close sources of new events and discard buffered events */
693d371d 741 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 742 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 743
693d371d 744 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 745 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 746
693d371d 747 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 748 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
749
750 /* discard queued events and kill workers */
751 event_queue_cleanup(manager, EVENT_QUEUED);
752 manager_kill_workers(manager);
693d371d 753
38a03f06 754 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
755
756 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
757 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
758 if (r < 0)
759 return;
62d43dac
TG
760}
761
762/* reload requested, HUP signal received, rules changed, builtin changed */
763static void manager_reload(Manager *manager) {
764
765 assert(manager);
766
b79aacbf
TG
767 sd_notify(false,
768 "RELOADING=1\n"
769 "STATUS=Flushing configuration...");
770
62d43dac
TG
771 manager_kill_workers(manager);
772 manager->rules = udev_rules_unref(manager->rules);
773 udev_builtin_exit(manager->udev);
b79aacbf
TG
774
775 sd_notify(false,
776 "READY=1\n"
777 "STATUS=Processing...");
62d43dac
TG
778}
779
c0c6806b 780static void event_queue_start(Manager *manager) {
912541b0 781 struct udev_list_node *loop;
693d371d 782 usec_t usec;
8ab44e3f 783
c0c6806b
TG
784 assert(manager);
785
7c4c7e89
TG
786 if (udev_list_node_is_empty(&manager->events) ||
787 manager->exit || manager->stop_exec_queue)
788 return;
789
38a03f06
LP
790 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
791 /* check for changed config, every 3 seconds at most */
792 if (manager->last_usec == 0 ||
793 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
794 if (udev_rules_check_timestamp(manager->rules) ||
795 udev_builtin_validate(manager->udev))
796 manager_reload(manager);
693d371d 797
38a03f06 798 manager->last_usec = usec;
7c4c7e89
TG
799 }
800
801 udev_builtin_init(manager->udev);
802
803 if (!manager->rules) {
804 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
805 if (!manager->rules)
806 return;
807 }
808
ecb17862 809 udev_list_node_foreach(loop, &manager->events) {
912541b0 810 struct event *event = node_to_event(loop);
0bc74ea7 811
912541b0
KS
812 if (event->state != EVENT_QUEUED)
813 continue;
0bc74ea7 814
912541b0 815 /* do not start event if parent or child event is still running */
ecb17862 816 if (is_devpath_busy(manager, event))
912541b0 817 continue;
fc465079 818
c0c6806b 819 event_run(manager, event);
912541b0 820 }
1e03b754
KS
821}
822
ecb17862 823static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 824 struct udev_list_node *loop, *tmp;
ff2c503d 825
ecb17862 826 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 827 struct event *event = node_to_event(loop);
ff2c503d 828
912541b0
KS
829 if (match_type != EVENT_UNDEF && match_type != event->state)
830 continue;
ff2c503d 831
c6aa11f2 832 event_free(event);
912541b0 833 }
ff2c503d
KS
834}
835
e82e8fa5 836static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
837 Manager *manager = userdata;
838
839 assert(manager);
840
912541b0
KS
841 for (;;) {
842 struct worker_message msg;
979558f3
TG
843 struct iovec iovec = {
844 .iov_base = &msg,
845 .iov_len = sizeof(msg),
846 };
847 union {
848 struct cmsghdr cmsghdr;
849 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
850 } control = {};
851 struct msghdr msghdr = {
852 .msg_iov = &iovec,
853 .msg_iovlen = 1,
854 .msg_control = &control,
855 .msg_controllen = sizeof(control),
856 };
857 struct cmsghdr *cmsg;
912541b0 858 ssize_t size;
979558f3 859 struct ucred *ucred = NULL;
a505965d 860 struct worker *worker;
912541b0 861
e82e8fa5 862 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 863 if (size < 0) {
738a7907
TG
864 if (errno == EINTR)
865 continue;
866 else if (errno == EAGAIN)
867 /* nothing more to read */
868 break;
979558f3 869
e82e8fa5 870 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
871 } else if (size != sizeof(struct worker_message)) {
872 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 873 continue;
979558f3
TG
874 }
875
2a1288ff 876 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
877 if (cmsg->cmsg_level == SOL_SOCKET &&
878 cmsg->cmsg_type == SCM_CREDENTIALS &&
879 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
880 ucred = (struct ucred*) CMSG_DATA(cmsg);
881 }
882
883 if (!ucred || ucred->pid <= 0) {
884 log_warning_errno(EIO, "ignoring worker message without valid PID");
885 continue;
886 }
912541b0
KS
887
888 /* lookup worker who sent the signal */
c0c6806b 889 worker = hashmap_get(manager->workers, UINT_TO_PTR(ucred->pid));
a505965d
TG
890 if (!worker) {
891 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
892 continue;
912541b0 893 }
c0bbfd72 894
a505965d
TG
895 if (worker->state != WORKER_KILLED)
896 worker->state = WORKER_IDLE;
897
898 /* worker returned */
899 event_free(worker->event);
912541b0 900 }
e82e8fa5 901
8302fe5a
TG
902 /* we have free workers, try to schedule events */
903 event_queue_start(manager);
904
e82e8fa5
TG
905 return 1;
906}
907
908static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 909 Manager *manager = userdata;
e82e8fa5
TG
910 struct udev_device *dev;
911 int r;
912
c0c6806b 913 assert(manager);
e82e8fa5 914
c0c6806b 915 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
916 if (dev) {
917 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 918 r = event_queue_insert(manager, dev);
e82e8fa5
TG
919 if (r < 0)
920 udev_device_unref(dev);
8302fe5a
TG
921 else
922 /* we have fresh events, try to schedule them */
923 event_queue_start(manager);
e82e8fa5
TG
924 }
925
926 return 1;
88f4b648
KS
927}
928
3b47c739 929/* receive the udevd message from userspace */
e82e8fa5 930static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 931 Manager *manager = userdata;
e4f66b77
TG
932 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
933 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
934 const char *str;
935 int i;
936
c0c6806b 937 assert(manager);
e4f66b77 938
c0c6806b 939 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 940 if (!ctrl_conn)
e82e8fa5 941 return 1;
912541b0
KS
942
943 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 944 if (!ctrl_msg)
e82e8fa5 945 return 1;
912541b0
KS
946
947 i = udev_ctrl_get_set_log_level(ctrl_msg);
948 if (i >= 0) {
ed14edc0 949 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 950 log_set_max_level(i);
c0c6806b 951 manager_kill_workers(manager);
912541b0
KS
952 }
953
954 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 955 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 956 manager->stop_exec_queue = true;
912541b0
KS
957 }
958
959 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 960 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 961 manager->stop_exec_queue = false;
8302fe5a 962 event_queue_start(manager);
912541b0
KS
963 }
964
965 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 966 log_debug("udevd message (RELOAD) received");
62d43dac 967 manager_reload(manager);
912541b0
KS
968 }
969
970 str = udev_ctrl_get_set_env(ctrl_msg);
971 if (str != NULL) {
c0c6806b 972 _cleanup_free_ char *key = NULL;
912541b0
KS
973
974 key = strdup(str);
c0c6806b 975 if (key) {
912541b0
KS
976 char *val;
977
978 val = strchr(key, '=');
979 if (val != NULL) {
980 val[0] = '\0';
981 val = &val[1];
982 if (val[0] == '\0') {
9f6445e3 983 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 984 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 985 } else {
9f6445e3 986 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 987 udev_list_entry_add(&manager->properties, key, val);
912541b0 988 }
c0c6806b 989 } else
9f6445e3 990 log_error("wrong key format '%s'", key);
912541b0 991 }
c0c6806b 992 manager_kill_workers(manager);
912541b0
KS
993 }
994
995 i = udev_ctrl_get_set_children_max(ctrl_msg);
996 if (i >= 0) {
9f6445e3 997 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 998 arg_children_max = i;
912541b0
KS
999 }
1000
cb49a4f2 1001 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1002 log_debug("udevd message (SYNC) received");
912541b0
KS
1003
1004 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1005 log_debug("udevd message (EXIT) received");
62d43dac 1006 manager_exit(manager);
c0c6806b
TG
1007 /* keep reference to block the client until we exit
1008 TODO: deal with several blocking exit requests */
1009 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1010 }
e4f66b77 1011
e82e8fa5 1012 return 1;
88f4b648 1013}
4a231017 1014
f3a740a5 1015static int synthesize_change(struct udev_device *dev) {
edd32000 1016 char filename[UTIL_PATH_SIZE];
f3a740a5 1017 int r;
edd32000 1018
f3a740a5 1019 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1020 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1021 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1022 bool part_table_read = false;
1023 bool has_partitions = false;
ede34445 1024 int fd;
f3a740a5
KS
1025 struct udev *udev = udev_device_get_udev(dev);
1026 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1027 struct udev_list_entry *item;
1028
ede34445 1029 /*
e9fc29f4
KS
1030 * Try to re-read the partition table. This only succeeds if
1031 * none of the devices is busy. The kernel returns 0 if no
1032 * partition table is found, and we will not get an event for
1033 * the disk.
ede34445 1034 */
02ba8fb3 1035 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1036 if (fd >= 0) {
02ba8fb3
KS
1037 r = flock(fd, LOCK_EX|LOCK_NB);
1038 if (r >= 0)
1039 r = ioctl(fd, BLKRRPART, 0);
1040
ede34445
KS
1041 close(fd);
1042 if (r >= 0)
e9fc29f4 1043 part_table_read = true;
ede34445
KS
1044 }
1045
e9fc29f4 1046 /* search for partitions */
f3a740a5
KS
1047 e = udev_enumerate_new(udev);
1048 if (!e)
1049 return -ENOMEM;
1050
1051 r = udev_enumerate_add_match_parent(e, dev);
1052 if (r < 0)
1053 return r;
1054
1055 r = udev_enumerate_add_match_subsystem(e, "block");
1056 if (r < 0)
1057 return r;
1058
1059 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1060 if (r < 0)
1061 return r;
e9fc29f4
KS
1062
1063 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1064 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1065
1066 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1067 if (!d)
1068 continue;
1069
1070 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1071 continue;
1072
1073 has_partitions = true;
1074 break;
1075 }
1076
1077 /*
1078 * We have partitions and re-read the table, the kernel already sent
1079 * out a "change" event for the disk, and "remove/add" for all
1080 * partitions.
1081 */
1082 if (part_table_read && has_partitions)
1083 return 0;
1084
1085 /*
1086 * We have partitions but re-reading the partition table did not
1087 * work, synthesize "change" for the disk and all partitions.
1088 */
1089 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1090 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1091 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1092
f3a740a5
KS
1093 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1094 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1095
1096 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1097 if (!d)
1098 continue;
1099
1100 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1101 continue;
1102
1103 log_debug("device %s closed, synthesising partition '%s' 'change'",
1104 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1105 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1106 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1107 }
ede34445
KS
1108
1109 return 0;
f3a740a5
KS
1110 }
1111
ede34445
KS
1112 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1113 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1114 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1115
f3a740a5 1116 return 0;
edd32000
KS
1117}
1118
e82e8fa5 1119static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1120 Manager *manager = userdata;
0254e944 1121 union inotify_event_buffer buffer;
f7c1ad4f
LP
1122 struct inotify_event *e;
1123 ssize_t l;
912541b0 1124
c0c6806b 1125 assert(manager);
e82e8fa5
TG
1126
1127 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1128 if (l < 0) {
1129 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1130 return 1;
912541b0 1131
f7c1ad4f 1132 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1133 }
1134
f7c1ad4f 1135 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1136 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1137
c0c6806b 1138 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1139 if (!dev)
1140 continue;
912541b0 1141
f7c1ad4f 1142 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1143 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1144 synthesize_change(dev);
a8389097
TG
1145
1146 /* settle might be waiting on us to determine the queue
1147 * state. If we just handled an inotify event, we might have
1148 * generated a "change" event, but we won't have queued up
1149 * the resultant uevent yet. Do that.
1150 */
c0c6806b 1151 on_uevent(NULL, -1, 0, manager);
a8389097 1152 } else if (e->mask & IN_IGNORED)
c0c6806b 1153 udev_watch_end(manager->udev, dev);
912541b0
KS
1154 }
1155
e82e8fa5 1156 return 1;
bd284db1
SJR
1157}
1158
0561329d 1159static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1160 Manager *manager = userdata;
1161
1162 assert(manager);
1163
62d43dac 1164 manager_exit(manager);
912541b0 1165
e82e8fa5
TG
1166 return 1;
1167}
912541b0 1168
0561329d 1169static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1170 Manager *manager = userdata;
1171
1172 assert(manager);
1173
62d43dac 1174 manager_reload(manager);
912541b0 1175
e82e8fa5
TG
1176 return 1;
1177}
912541b0 1178
e82e8fa5 1179static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1180 Manager *manager = userdata;
1181
1182 assert(manager);
1183
e82e8fa5
TG
1184 for (;;) {
1185 pid_t pid;
1186 int status;
1187 struct worker *worker;
d1317d02 1188
e82e8fa5
TG
1189 pid = waitpid(-1, &status, WNOHANG);
1190 if (pid <= 0)
f29328d6 1191 break;
e82e8fa5 1192
c0c6806b 1193 worker = hashmap_get(manager->workers, UINT_TO_PTR(pid));
e82e8fa5
TG
1194 if (!worker) {
1195 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1196 continue;
912541b0 1197 }
e82e8fa5
TG
1198
1199 if (WIFEXITED(status)) {
1200 if (WEXITSTATUS(status) == 0)
1201 log_debug("worker ["PID_FMT"] exited", pid);
1202 else
1203 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1204 } else if (WIFSIGNALED(status)) {
1205 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1206 } else if (WIFSTOPPED(status)) {
1207 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1208 continue;
e82e8fa5
TG
1209 } else if (WIFCONTINUED(status)) {
1210 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1211 continue;
e82e8fa5
TG
1212 } else
1213 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1214
1215 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1216 if (worker->event) {
1217 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1218 /* delete state from disk */
1219 udev_device_delete_db(worker->event->dev);
1220 udev_device_tag_index(worker->event->dev, NULL, false);
1221 /* forward kernel event without amending it */
c0c6806b 1222 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1223 }
1224 }
1225
1226 worker_free(worker);
912541b0 1227 }
e82e8fa5 1228
8302fe5a
TG
1229 /* we can start new workers, try to schedule events */
1230 event_queue_start(manager);
1231
e82e8fa5 1232 return 1;
f27125f9 1233}
1234
693d371d
TG
1235static int on_post(sd_event_source *s, void *userdata) {
1236 Manager *manager = userdata;
1237 int r;
1238
1239 assert(manager);
1240
1241 if (udev_list_node_is_empty(&manager->events)) {
1242 /* no pending events */
1243 if (!hashmap_isempty(manager->workers)) {
1244 /* there are idle workers */
1245 log_debug("cleanup idle workers");
1246 manager_kill_workers(manager);
1247 } else {
1248 /* we are idle */
1249 if (manager->exit) {
1250 r = sd_event_exit(manager->event, 0);
1251 if (r < 0)
1252 return r;
1253 } else if (manager->cgroup)
1254 /* cleanup possible left-over processes in our cgroup */
1255 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
1256 }
1257 }
1258
1259 return 1;
1260}
1261
fcff1e72 1262static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1263 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1264 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1265 int fd, n, r;
912541b0 1266
fcff1e72
TG
1267 assert(rctrl);
1268 assert(rnetlink);
1269
912541b0 1270 n = sd_listen_fds(true);
fcff1e72
TG
1271 if (n < 0)
1272 return n;
912541b0
KS
1273
1274 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1275 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1276 if (ctrl_fd >= 0)
1277 return -EINVAL;
1278 ctrl_fd = fd;
912541b0
KS
1279 continue;
1280 }
1281
1282 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1283 if (netlink_fd >= 0)
1284 return -EINVAL;
1285 netlink_fd = fd;
912541b0
KS
1286 continue;
1287 }
1288
fcff1e72 1289 return -EINVAL;
912541b0
KS
1290 }
1291
f59118ec
TG
1292 if (ctrl_fd < 0) {
1293 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1294
1295 udev = udev_new();
1296 if (!udev)
1297 return -ENOMEM;
1298
1299 ctrl = udev_ctrl_new(udev);
1300 if (!ctrl)
1301 return log_error_errno(EINVAL, "error initializing udev control socket");
1302
1303 r = udev_ctrl_enable_receiving(ctrl);
1304 if (r < 0)
1305 return log_error_errno(EINVAL, "error binding udev control socket");
1306
1307 fd = udev_ctrl_get_fd(ctrl);
1308 if (fd < 0)
1309 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1310
f59118ec
TG
1311 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1312 if (ctrl_fd < 0)
1313 return log_error_errno(errno, "could not dup ctrl fd: %m");
1314 }
1315
1316 if (netlink_fd < 0) {
1317 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1318
1319 if (!udev) {
1320 udev = udev_new();
1321 if (!udev)
1322 return -ENOMEM;
1323 }
1324
1325 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1326 if (!monitor)
1327 return log_error_errno(EINVAL, "error initializing netlink socket");
1328
1329 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1330
1331 r = udev_monitor_enable_receiving(monitor);
1332 if (r < 0)
1333 return log_error_errno(EINVAL, "error binding netlink socket");
1334
1335 fd = udev_monitor_get_fd(monitor);
1336 if (fd < 0)
1337 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1338
1339 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1340 if (ctrl_fd < 0)
1341 return log_error_errno(errno, "could not dup netlink fd: %m");
1342 }
fcff1e72
TG
1343
1344 *rctrl = ctrl_fd;
1345 *rnetlink = netlink_fd;
912541b0 1346
912541b0 1347 return 0;
7459bcdc
KS
1348}
1349
e6f86cac 1350/*
3f85ef0f 1351 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1352 * udev.log-priority=<level> syslog priority
1353 * udev.children-max=<number of workers> events are fully serialized if set to 1
1354 * udev.exec-delay=<number of seconds> delay execution of every executed program
1355 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1356 */
614a823c 1357static int parse_proc_cmdline_item(const char *key, const char *value) {
3567afa5 1358 const char *full_key = key;
74df0fca 1359 int r;
e6f86cac 1360
614a823c 1361 assert(key);
e6f86cac 1362
614a823c
TG
1363 if (!value)
1364 return 0;
e6f86cac 1365
614a823c
TG
1366 if (startswith(key, "rd."))
1367 key += strlen("rd.");
e6f86cac 1368
614a823c
TG
1369 if (startswith(key, "udev."))
1370 key += strlen("udev.");
1371 else
1372 return 0;
e6f86cac 1373
614a823c
TG
1374 if (streq(key, "log-priority")) {
1375 int prio;
e6f86cac 1376
614a823c 1377 prio = util_log_priority(value);
e00f5bdd 1378 if (prio < 0)
3567afa5
MS
1379 goto invalid;
1380 log_set_max_level(prio);
614a823c 1381 } else if (streq(key, "children-max")) {
020328e1 1382 r = safe_atou(value, &arg_children_max);
614a823c 1383 if (r < 0)
3567afa5 1384 goto invalid;
614a823c
TG
1385 } else if (streq(key, "exec-delay")) {
1386 r = safe_atoi(value, &arg_exec_delay);
1387 if (r < 0)
3567afa5 1388 goto invalid;
614a823c
TG
1389 } else if (streq(key, "event-timeout")) {
1390 r = safe_atou64(value, &arg_event_timeout_usec);
1391 if (r < 0)
3567afa5
MS
1392 goto invalid;
1393 arg_event_timeout_usec *= USEC_PER_SEC;
1394 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
e6f86cac 1395 }
614a823c 1396
3567afa5
MS
1397 return 0;
1398invalid:
1399 log_warning("invalid %s ignored: %s", full_key, value);
614a823c 1400 return 0;
e6f86cac
KS
1401}
1402
ed216e1f
TG
1403static void help(void) {
1404 printf("%s [OPTIONS...]\n\n"
1405 "Manages devices.\n\n"
5ac0162c
LP
1406 " -h --help Print this message\n"
1407 " --version Print version of the program\n"
1408 " --daemon Detach and run in the background\n"
1409 " --debug Enable debug output\n"
1410 " --children-max=INT Set maximum number of workers\n"
1411 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1412 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1413 " --resolve-names=early|late|never\n"
1414 " When to resolve users and groups\n"
ed216e1f
TG
1415 , program_invocation_short_name);
1416}
1417
bba7a484 1418static int parse_argv(int argc, char *argv[]) {
912541b0 1419 static const struct option options[] = {
bba7a484
TG
1420 { "daemon", no_argument, NULL, 'd' },
1421 { "debug", no_argument, NULL, 'D' },
1422 { "children-max", required_argument, NULL, 'c' },
1423 { "exec-delay", required_argument, NULL, 'e' },
1424 { "event-timeout", required_argument, NULL, 't' },
1425 { "resolve-names", required_argument, NULL, 'N' },
1426 { "help", no_argument, NULL, 'h' },
1427 { "version", no_argument, NULL, 'V' },
912541b0
KS
1428 {}
1429 };
689a97f5 1430
bba7a484 1431 int c;
689a97f5 1432
bba7a484
TG
1433 assert(argc >= 0);
1434 assert(argv);
912541b0 1435
e14b6f21 1436 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1437 int r;
912541b0 1438
bba7a484 1439 switch (c) {
912541b0 1440
912541b0 1441 case 'd':
bba7a484 1442 arg_daemonize = true;
912541b0
KS
1443 break;
1444 case 'c':
020328e1 1445 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1446 if (r < 0)
1447 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1448 break;
1449 case 'e':
6f5cf8a8
TG
1450 r = safe_atoi(optarg, &arg_exec_delay);
1451 if (r < 0)
1452 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1453 break;
9719859c 1454 case 't':
f1e8664e
TG
1455 r = safe_atou64(optarg, &arg_event_timeout_usec);
1456 if (r < 0)
65fea570 1457 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1458 else {
1459 arg_event_timeout_usec *= USEC_PER_SEC;
1460 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1461 }
9719859c 1462 break;
912541b0 1463 case 'D':
bba7a484 1464 arg_debug = true;
912541b0
KS
1465 break;
1466 case 'N':
090be865 1467 if (streq(optarg, "early")) {
bba7a484 1468 arg_resolve_names = 1;
090be865 1469 } else if (streq(optarg, "late")) {
bba7a484 1470 arg_resolve_names = 0;
090be865 1471 } else if (streq(optarg, "never")) {
bba7a484 1472 arg_resolve_names = -1;
912541b0 1473 } else {
9f6445e3 1474 log_error("resolve-names must be early, late or never");
bba7a484 1475 return 0;
912541b0
KS
1476 }
1477 break;
1478 case 'h':
ed216e1f 1479 help();
bba7a484 1480 return 0;
912541b0
KS
1481 case 'V':
1482 printf("%s\n", VERSION);
bba7a484
TG
1483 return 0;
1484 case '?':
1485 return -EINVAL;
912541b0 1486 default:
bba7a484
TG
1487 assert_not_reached("Unhandled option");
1488
912541b0
KS
1489 }
1490 }
1491
bba7a484
TG
1492 return 1;
1493}
1494
b7f74dd4 1495static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1496 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1497 int r, fd_worker, one = 1;
c0c6806b
TG
1498
1499 assert(ret);
11b1dd8c
TG
1500 assert(fd_ctrl >= 0);
1501 assert(fd_uevent >= 0);
c0c6806b
TG
1502
1503 manager = new0(Manager, 1);
1504 if (!manager)
1505 return log_oom();
1506
e237d8cb
TG
1507 manager->fd_inotify = -1;
1508 manager->worker_watch[WRITE_END] = -1;
1509 manager->worker_watch[READ_END] = -1;
1510
c0c6806b
TG
1511 manager->udev = udev_new();
1512 if (!manager->udev)
1513 return log_error_errno(errno, "could not allocate udev context: %m");
1514
b2d21d93
TG
1515 udev_builtin_init(manager->udev);
1516
ecb17862
TG
1517 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1518 if (!manager->rules)
1519 return log_error_errno(ENOMEM, "error reading rules");
1520
1521 udev_list_node_init(&manager->events);
1522 udev_list_init(manager->udev, &manager->properties, true);
1523
c26d1879
TG
1524 manager->cgroup = cgroup;
1525
f59118ec
TG
1526 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1527 if (!manager->ctrl)
1528 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1529
f59118ec
TG
1530 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1531 if (!manager->monitor)
1532 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1533
1534 /* unnamed socket from workers to the main daemon */
1535 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1536 if (r < 0)
1537 return log_error_errno(errno, "error creating socketpair: %m");
1538
693d371d 1539 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1540
693d371d 1541 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1542 if (r < 0)
1543 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1544
1545 manager->fd_inotify = udev_watch_init(manager->udev);
1546 if (manager->fd_inotify < 0)
1547 return log_error_errno(ENOMEM, "error initializing inotify");
1548
1549 udev_watch_restore(manager->udev);
1550
1551 /* block and listen to all signals on signalfd */
72c0a2c2 1552 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1553
49f997f3
TG
1554 r = sd_event_default(&manager->event);
1555 if (r < 0)
1556 return log_error_errno(errno, "could not allocate event loop: %m");
1557
693d371d
TG
1558 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1559 if (r < 0)
1560 return log_error_errno(r, "error creating sigint event source: %m");
1561
1562 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1563 if (r < 0)
1564 return log_error_errno(r, "error creating sigterm event source: %m");
1565
1566 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1567 if (r < 0)
1568 return log_error_errno(r, "error creating sighup event source: %m");
1569
1570 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1571 if (r < 0)
1572 return log_error_errno(r, "error creating sigchld event source: %m");
1573
1574 r = sd_event_set_watchdog(manager->event, true);
1575 if (r < 0)
1576 return log_error_errno(r, "error creating watchdog event source: %m");
1577
11b1dd8c 1578 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1579 if (r < 0)
1580 return log_error_errno(r, "error creating ctrl event source: %m");
1581
1582 /* This needs to be after the inotify and uevent handling, to make sure
1583 * that the ping is send back after fully processing the pending uevents
1584 * (including the synthetic ones we may create due to inotify events).
1585 */
1586 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1587 if (r < 0)
1588 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1589
1590 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1591 if (r < 0)
1592 return log_error_errno(r, "error creating inotify event source: %m");
1593
11b1dd8c 1594 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1595 if (r < 0)
1596 return log_error_errno(r, "error creating uevent event source: %m");
1597
1598 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1599 if (r < 0)
1600 return log_error_errno(r, "error creating worker event source: %m");
1601
1602 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1603 if (r < 0)
1604 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1605
11b1dd8c
TG
1606 *ret = manager;
1607 manager = NULL;
1608
86c3bece 1609 return 0;
c0c6806b
TG
1610}
1611
077fc5e2 1612static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1613 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1614 int r;
1615
1616 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1617 if (r < 0) {
1618 r = log_error_errno(r, "failed to allocate manager object: %m");
1619 goto exit;
1620 }
1621
1622 r = udev_rules_apply_static_dev_perms(manager->rules);
1623 if (r < 0)
1624 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1625
1626 (void) sd_notify(false,
1627 "READY=1\n"
1628 "STATUS=Processing...");
1629
1630 r = sd_event_loop(manager->event);
1631 if (r < 0) {
1632 log_error_errno(r, "event loop failed: %m");
1633 goto exit;
1634 }
1635
1636 sd_event_get_exit_code(manager->event, &r);
1637
1638exit:
1639 sd_notify(false,
1640 "STOPPING=1\n"
1641 "STATUS=Shutting down...");
1642 if (manager)
1643 udev_ctrl_cleanup(manager->ctrl);
1644 return r;
1645}
1646
1647int main(int argc, char *argv[]) {
c26d1879 1648 _cleanup_free_ char *cgroup = NULL;
b7f74dd4 1649 int r, fd_ctrl, fd_uevent;
bba7a484 1650
bba7a484
TG
1651 log_set_target(LOG_TARGET_AUTO);
1652 log_parse_environment();
1653 log_open();
1654
bba7a484
TG
1655 r = parse_argv(argc, argv);
1656 if (r <= 0)
1657 goto exit;
1658
614a823c
TG
1659 r = parse_proc_cmdline(parse_proc_cmdline_item);
1660 if (r < 0)
1661 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1662
78d3e041
KS
1663 if (arg_debug) {
1664 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1665 log_set_max_level(LOG_DEBUG);
78d3e041 1666 }
bba7a484 1667
912541b0 1668 if (getuid() != 0) {
6af5e6a4 1669 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1670 goto exit;
1671 }
1672
712cebf1
TG
1673 if (arg_children_max == 0) {
1674 cpu_set_t cpu_set;
ebc164ef 1675
712cebf1 1676 arg_children_max = 8;
d457ff83 1677
ece174c5 1678 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1679 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1680
712cebf1 1681 log_debug("set children_max to %u", arg_children_max);
d457ff83 1682 }
912541b0 1683
712cebf1
TG
1684 /* set umask before creating any file/directory */
1685 r = chdir("/");
1686 if (r < 0) {
1687 r = log_error_errno(errno, "could not change dir to /: %m");
1688 goto exit;
1689 }
194bbe33 1690
712cebf1 1691 umask(022);
912541b0 1692
712cebf1
TG
1693 r = mac_selinux_init("/dev");
1694 if (r < 0) {
1695 log_error_errno(r, "could not initialize labelling: %m");
1696 goto exit;
912541b0
KS
1697 }
1698
712cebf1
TG
1699 r = mkdir("/run/udev", 0755);
1700 if (r < 0 && errno != EEXIST) {
1701 r = log_error_errno(errno, "could not create /run/udev: %m");
1702 goto exit;
1703 }
1704
03cfe0d5 1705 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1706
c26d1879
TG
1707 if (getppid() == 1) {
1708 /* get our own cgroup, we regularly kill everything udev has left behind
1709 we only do this on systemd systems, and only if we are directly spawned
1710 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1711 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1712 if (r < 0) {
e859aa9e 1713 if (r == -ENOENT || r == -ENOEXEC)
11b9fb15
TG
1714 log_debug_errno(r, "did not find dedicated cgroup: %m");
1715 else
1716 log_warning_errno(r, "failed to get cgroup: %m");
1717 }
c26d1879
TG
1718 }
1719
b7f74dd4
TG
1720 r = listen_fds(&fd_ctrl, &fd_uevent);
1721 if (r < 0) {
1722 r = log_error_errno(r, "could not listen on fds: %m");
1723 goto exit;
1724 }
1725
bba7a484 1726 if (arg_daemonize) {
912541b0 1727 pid_t pid;
912541b0 1728
3cbb2057
TG
1729 log_info("starting version " VERSION);
1730
40e749b5
TG
1731 /* connect /dev/null to stdin, stdout, stderr */
1732 if (log_get_max_level() < LOG_DEBUG)
1733 (void) make_null_stdio();
1734
912541b0
KS
1735 pid = fork();
1736 switch (pid) {
1737 case 0:
1738 break;
1739 case -1:
6af5e6a4 1740 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1741 goto exit;
1742 default:
f53d1fcd
TG
1743 mac_selinux_finish();
1744 log_close();
1745 _exit(EXIT_SUCCESS);
912541b0
KS
1746 }
1747
1748 setsid();
1749
ad118bda 1750 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1751 }
912541b0 1752
077fc5e2 1753 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1754
53921bfa 1755exit:
cc56fafe 1756 mac_selinux_finish();
baa30fbc 1757 log_close();
6af5e6a4 1758 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1759}