]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
util-lib: split out IO related calls to io-util.[ch]
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
7fafc032 1/*
1298001e 2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
55e9959b
KS
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
7fafc032 11 *
55e9959b
KS
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
7fafc032 16 *
55e9959b
KS
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
19 */
20
7fafc032 21#include <errno.h>
618234a5
LP
22#include <fcntl.h>
23#include <getopt.h>
24#include <signal.h>
25#include <stdbool.h>
26#include <stddef.h>
7fafc032
KS
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
618234a5 30#include <sys/epoll.h>
3ebdb81e 31#include <sys/file.h>
618234a5
LP
32#include <sys/inotify.h>
33#include <sys/ioctl.h>
34#include <sys/mount.h>
1e03b754 35#include <sys/prctl.h>
1e03b754 36#include <sys/signalfd.h>
618234a5 37#include <sys/socket.h>
dc117daa 38#include <sys/stat.h>
618234a5
LP
39#include <sys/time.h>
40#include <sys/wait.h>
41#include <unistd.h>
7fafc032 42
392ef7a2 43#include "sd-daemon.h"
693d371d 44#include "sd-event.h"
8314de1d 45
194bbe33 46#include "cgroup-util.h"
618234a5 47#include "cpu-set-util.h"
5ba2dc25 48#include "dev-setup.h"
618234a5 49#include "event-util.h"
3ffd4af2 50#include "fd-util.h"
a5c32cff 51#include "fileio.h"
6482f626 52#include "formats-util.h"
a505965d 53#include "hashmap.h"
c004493c 54#include "io-util.h"
618234a5
LP
55#include "netlink-util.h"
56#include "process-util.h"
57#include "selinux-util.h"
58#include "signal-util.h"
07630cea 59#include "string-util.h"
618234a5
LP
60#include "terminal-util.h"
61#include "udev-util.h"
62#include "udev.h"
7fafc032 63
bba7a484
TG
64static bool arg_debug = false;
65static int arg_daemonize = false;
66static int arg_resolve_names = 1;
020328e1 67static unsigned arg_children_max;
bba7a484
TG
68static int arg_exec_delay;
69static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
70static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
71
72typedef struct Manager {
73 struct udev *udev;
693d371d 74 sd_event *event;
c0c6806b 75 Hashmap *workers;
ecb17862 76 struct udev_list_node events;
c26d1879 77 const char *cgroup;
cb49a4f2 78 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 79
ecb17862 80 struct udev_rules *rules;
c0c6806b
TG
81 struct udev_list properties;
82
83 struct udev_monitor *monitor;
84 struct udev_ctrl *ctrl;
85 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 86 int fd_inotify;
e237d8cb
TG
87 int worker_watch[2];
88
693d371d
TG
89 sd_event_source *ctrl_event;
90 sd_event_source *uevent_event;
91 sd_event_source *inotify_event;
92
7c4c7e89
TG
93 usec_t last_usec;
94
c0c6806b 95 bool stop_exec_queue:1;
c0c6806b
TG
96 bool exit:1;
97} Manager;
1e03b754 98
1e03b754 99enum event_state {
912541b0
KS
100 EVENT_UNDEF,
101 EVENT_QUEUED,
102 EVENT_RUNNING,
1e03b754
KS
103};
104
105struct event {
912541b0 106 struct udev_list_node node;
cb49a4f2 107 Manager *manager;
912541b0
KS
108 struct udev *udev;
109 struct udev_device *dev;
6969c349 110 struct udev_device *dev_kernel;
c6aa11f2 111 struct worker *worker;
912541b0 112 enum event_state state;
912541b0
KS
113 unsigned long long int delaying_seqnum;
114 unsigned long long int seqnum;
115 const char *devpath;
116 size_t devpath_len;
117 const char *devpath_old;
118 dev_t devnum;
912541b0 119 int ifindex;
ea6039a3 120 bool is_block;
693d371d
TG
121 sd_event_source *timeout_warning;
122 sd_event_source *timeout;
1e03b754
KS
123};
124
9ec6e95b 125static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 126 return container_of(node, struct event, node);
1e03b754
KS
127}
128
ecb17862 129static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 130
1e03b754 131enum worker_state {
912541b0
KS
132 WORKER_UNDEF,
133 WORKER_RUNNING,
134 WORKER_IDLE,
135 WORKER_KILLED,
1e03b754
KS
136};
137
138struct worker {
c0c6806b 139 Manager *manager;
912541b0 140 struct udev_list_node node;
912541b0
KS
141 int refcount;
142 pid_t pid;
143 struct udev_monitor *monitor;
144 enum worker_state state;
145 struct event *event;
1e03b754
KS
146};
147
148/* passed from worker to main process */
149struct worker_message {
1e03b754
KS
150};
151
c6aa11f2 152static void event_free(struct event *event) {
cb49a4f2
TG
153 int r;
154
c6aa11f2
TG
155 if (!event)
156 return;
157
912541b0 158 udev_list_node_remove(&event->node);
912541b0 159 udev_device_unref(event->dev);
6969c349 160 udev_device_unref(event->dev_kernel);
c6aa11f2 161
693d371d
TG
162 sd_event_source_unref(event->timeout_warning);
163 sd_event_source_unref(event->timeout);
164
c6aa11f2
TG
165 if (event->worker)
166 event->worker->event = NULL;
167
cb49a4f2
TG
168 assert(event->manager);
169
170 if (udev_list_node_is_empty(&event->manager->events)) {
171 /* only clean up the queue from the process that created it */
172 if (event->manager->pid == getpid()) {
173 r = unlink("/run/udev/queue");
174 if (r < 0)
175 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
176 }
177 }
178
912541b0 179 free(event);
aa8734ff 180}
7a770250 181
c6aa11f2
TG
182static void worker_free(struct worker *worker) {
183 if (!worker)
184 return;
bc113de9 185
c0c6806b
TG
186 assert(worker->manager);
187
188 hashmap_remove(worker->manager->workers, UINT_TO_PTR(worker->pid));
912541b0 189 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
190 event_free(worker->event);
191
c6aa11f2 192 free(worker);
ff2c503d
KS
193}
194
c0c6806b 195static void manager_workers_free(Manager *manager) {
a505965d
TG
196 struct worker *worker;
197 Iterator i;
ff2c503d 198
c0c6806b
TG
199 assert(manager);
200
201 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 202 worker_free(worker);
a505965d 203
c0c6806b 204 manager->workers = hashmap_free(manager->workers);
fc465079
KS
205}
206
c0c6806b 207static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
208 _cleanup_free_ struct worker *worker = NULL;
209 int r;
3a19b32a
TG
210
211 assert(ret);
c0c6806b 212 assert(manager);
3a19b32a
TG
213 assert(worker_monitor);
214 assert(pid > 1);
215
216 worker = new0(struct worker, 1);
217 if (!worker)
218 return -ENOMEM;
219
39c19cf1 220 worker->refcount = 1;
c0c6806b 221 worker->manager = manager;
3a19b32a
TG
222 /* close monitor, but keep address around */
223 udev_monitor_disconnect(worker_monitor);
224 worker->monitor = udev_monitor_ref(worker_monitor);
225 worker->pid = pid;
a505965d 226
c0c6806b 227 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
228 if (r < 0)
229 return r;
230
c0c6806b 231 r = hashmap_put(manager->workers, UINT_TO_PTR(pid), worker);
a505965d
TG
232 if (r < 0)
233 return r;
234
3a19b32a 235 *ret = worker;
a505965d 236 worker = NULL;
3a19b32a
TG
237
238 return 0;
239}
240
4fa4d885
TG
241static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
242 struct event *event = userdata;
243
244 assert(event);
245 assert(event->worker);
246
247 kill_and_sigcont(event->worker->pid, SIGKILL);
248 event->worker->state = WORKER_KILLED;
249
250 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
251
252 return 1;
253}
254
255static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
256 struct event *event = userdata;
257
258 assert(event);
259
260 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
261
262 return 1;
263}
264
39c19cf1 265static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
266 sd_event *e;
267 uint64_t usec;
693d371d 268
c6aa11f2 269 assert(worker);
693d371d 270 assert(worker->manager);
c6aa11f2
TG
271 assert(event);
272 assert(!event->worker);
273 assert(!worker->event);
274
39c19cf1 275 worker->state = WORKER_RUNNING;
39c19cf1
TG
276 worker->event = event;
277 event->state = EVENT_RUNNING;
c6aa11f2 278 event->worker = worker;
693d371d
TG
279
280 e = worker->manager->event;
281
38a03f06 282 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
283
284 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
285 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
286
287 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
288 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
289}
290
e237d8cb
TG
291static void manager_free(Manager *manager) {
292 if (!manager)
293 return;
294
b2d21d93
TG
295 udev_builtin_exit(manager->udev);
296
693d371d
TG
297 sd_event_source_unref(manager->ctrl_event);
298 sd_event_source_unref(manager->uevent_event);
299 sd_event_source_unref(manager->inotify_event);
300
e237d8cb 301 udev_unref(manager->udev);
693d371d 302 sd_event_unref(manager->event);
e237d8cb
TG
303 manager_workers_free(manager);
304 event_queue_cleanup(manager, EVENT_UNDEF);
305
306 udev_monitor_unref(manager->monitor);
307 udev_ctrl_unref(manager->ctrl);
308 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
309
310 udev_list_cleanup(&manager->properties);
311 udev_rules_unref(manager->rules);
e237d8cb 312
e237d8cb
TG
313 safe_close(manager->fd_inotify);
314 safe_close_pair(manager->worker_watch);
315
316 free(manager);
317}
318
319DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
320
9a73bd7c
TG
321static int worker_send_message(int fd) {
322 struct worker_message message = {};
323
324 return loop_write(fd, &message, sizeof(message), false);
325}
326
c0c6806b 327static void worker_spawn(Manager *manager, struct event *event) {
912541b0 328 struct udev *udev = event->udev;
3a19b32a 329 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 330 pid_t pid;
b6aab8ef 331 int r = 0;
912541b0
KS
332
333 /* listen for new events */
334 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
335 if (worker_monitor == NULL)
336 return;
337 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 338 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
339 r = udev_monitor_enable_receiving(worker_monitor);
340 if (r < 0)
341 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 342
912541b0
KS
343 pid = fork();
344 switch (pid) {
345 case 0: {
346 struct udev_device *dev = NULL;
1c4baffc 347 _cleanup_netlink_unref_ sd_netlink *rtnl = NULL;
912541b0 348 int fd_monitor;
e237d8cb 349 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
350 struct epoll_event ep_signal = { .events = EPOLLIN };
351 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 352 sigset_t mask;
912541b0 353
43095991 354 /* take initial device from queue */
912541b0
KS
355 dev = event->dev;
356 event->dev = NULL;
357
39fd2ca1
TG
358 unsetenv("NOTIFY_SOCKET");
359
c0c6806b 360 manager_workers_free(manager);
ecb17862 361 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 362
e237d8cb 363 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 364 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 365 manager->ctrl = udev_ctrl_unref(manager->ctrl);
693d371d 366 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 367 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 368
693d371d
TG
369 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
370 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
371 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
372
373 manager->event = sd_event_unref(manager->event);
374
912541b0
KS
375 sigfillset(&mask);
376 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
377 if (fd_signal < 0) {
6af5e6a4 378 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
379 goto out;
380 }
2dd9f98d
TG
381 ep_signal.data.fd = fd_signal;
382
383 fd_monitor = udev_monitor_get_fd(worker_monitor);
384 ep_monitor.data.fd = fd_monitor;
912541b0
KS
385
386 fd_ep = epoll_create1(EPOLL_CLOEXEC);
387 if (fd_ep < 0) {
6af5e6a4 388 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
389 goto out;
390 }
391
912541b0
KS
392 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
393 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 394 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
395 goto out;
396 }
397
398 /* request TERM signal if parent exits */
399 prctl(PR_SET_PDEATHSIG, SIGTERM);
400
145dae7e 401 /* reset OOM score, we only protect the main daemon */
ad118bda 402 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 403
912541b0
KS
404 for (;;) {
405 struct udev_event *udev_event;
6af5e6a4 406 int fd_lock = -1;
912541b0 407
3b64e4d4
TG
408 assert(dev);
409
9f6445e3 410 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
411 udev_event = udev_event_new(dev);
412 if (udev_event == NULL) {
6af5e6a4 413 r = -ENOMEM;
912541b0
KS
414 goto out;
415 }
416
bba7a484
TG
417 if (arg_exec_delay > 0)
418 udev_event->exec_delay = arg_exec_delay;
912541b0 419
3ebdb81e 420 /*
2e5b17d0 421 * Take a shared lock on the device node; this establishes
3ebdb81e 422 * a concept of device "ownership" to serialize device
2e5b17d0 423 * access. External processes holding an exclusive lock will
3ebdb81e 424 * cause udev to skip the event handling; in the case udev
2e5b17d0 425 * acquired the lock, the external process can block until
3ebdb81e
KS
426 * udev has finished its event handling.
427 */
2e5b17d0
KS
428 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
429 streq_ptr("block", udev_device_get_subsystem(dev)) &&
430 !startswith(udev_device_get_sysname(dev), "dm-") &&
431 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
432 struct udev_device *d = dev;
433
434 if (streq_ptr("partition", udev_device_get_devtype(d)))
435 d = udev_device_get_parent(d);
436
437 if (d) {
438 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
439 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 440 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 441 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
442 goto skip;
443 }
444 }
445 }
446
4c83d994
TG
447 /* needed for renaming netifs */
448 udev_event->rtnl = rtnl;
449
912541b0 450 /* apply rules, create node, symlinks */
adeba500
KS
451 udev_event_execute_rules(udev_event,
452 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 453 &manager->properties,
8314de1d 454 manager->rules);
adeba500
KS
455
456 udev_event_execute_run(udev_event,
8314de1d 457 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 458
523c620b
TG
459 if (udev_event->rtnl)
460 /* in case rtnl was initialized */
1c4baffc 461 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 462
912541b0 463 /* apply/restore inotify watch */
bf9bead1 464 if (udev_event->inotify_watch) {
912541b0
KS
465 udev_watch_begin(udev, dev);
466 udev_device_update_db(dev);
467 }
468
3d06f418 469 safe_close(fd_lock);
3ebdb81e 470
912541b0
KS
471 /* send processed event back to libudev listeners */
472 udev_monitor_send_device(worker_monitor, NULL, dev);
473
3ebdb81e 474skip:
4914cb2d 475 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 476
912541b0 477 /* send udevd the result of the event execution */
e237d8cb 478 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 479 if (r < 0)
9a73bd7c 480 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 481 udev_device_get_seqnum(dev));
912541b0
KS
482
483 udev_device_unref(dev);
484 dev = NULL;
485
73814ca2 486 udev_event_unref(udev_event);
47e737dc 487
912541b0
KS
488 /* wait for more device messages from main udevd, or term signal */
489 while (dev == NULL) {
490 struct epoll_event ev[4];
491 int fdcount;
492 int i;
493
8fef0ff2 494 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
495 if (fdcount < 0) {
496 if (errno == EINTR)
497 continue;
6af5e6a4 498 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
499 goto out;
500 }
501
502 for (i = 0; i < fdcount; i++) {
503 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
504 dev = udev_monitor_receive_device(worker_monitor);
505 break;
506 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
507 struct signalfd_siginfo fdsi;
508 ssize_t size;
509
510 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
511 if (size != sizeof(struct signalfd_siginfo))
512 continue;
513 switch (fdsi.ssi_signo) {
514 case SIGTERM:
515 goto out;
516 }
517 }
518 }
519 }
520 }
82063a88 521out:
912541b0 522 udev_device_unref(dev);
e237d8cb 523 manager_free(manager);
baa30fbc 524 log_close();
8b46c3fc 525 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
526 }
527 case -1:
912541b0 528 event->state = EVENT_QUEUED;
56f64d95 529 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
530 break;
531 default:
e03c7cc2
TG
532 {
533 struct worker *worker;
534
c0c6806b 535 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 536 if (r < 0)
e03c7cc2 537 return;
e03c7cc2 538
39c19cf1
TG
539 worker_attach_event(worker, event);
540
1fa2f38f 541 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
542 break;
543 }
e03c7cc2 544 }
7fafc032
KS
545}
546
c0c6806b 547static void event_run(Manager *manager, struct event *event) {
a505965d
TG
548 struct worker *worker;
549 Iterator i;
912541b0 550
c0c6806b
TG
551 assert(manager);
552 assert(event);
553
554 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
555 ssize_t count;
556
557 if (worker->state != WORKER_IDLE)
558 continue;
559
c0c6806b 560 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 561 if (count < 0) {
1fa2f38f
ZJS
562 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
563 worker->pid, count);
912541b0
KS
564 kill(worker->pid, SIGKILL);
565 worker->state = WORKER_KILLED;
566 continue;
567 }
39c19cf1 568 worker_attach_event(worker, event);
912541b0
KS
569 return;
570 }
571
c0c6806b 572 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 573 if (arg_children_max > 1)
c0c6806b 574 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
575 return;
576 }
577
578 /* start new worker and pass initial device */
c0c6806b 579 worker_spawn(manager, event);
1e03b754
KS
580}
581
ecb17862 582static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 583 struct event *event;
cb49a4f2 584 int r;
912541b0 585
ecb17862
TG
586 assert(manager);
587 assert(dev);
588
040e6896
TG
589 /* only one process can add events to the queue */
590 if (manager->pid == 0)
591 manager->pid = getpid();
592
cb49a4f2
TG
593 assert(manager->pid == getpid());
594
955d98c9 595 event = new0(struct event, 1);
cb49a4f2
TG
596 if (!event)
597 return -ENOMEM;
912541b0
KS
598
599 event->udev = udev_device_get_udev(dev);
cb49a4f2 600 event->manager = manager;
912541b0 601 event->dev = dev;
6969c349
TG
602 event->dev_kernel = udev_device_shallow_clone(dev);
603 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
604 event->seqnum = udev_device_get_seqnum(dev);
605 event->devpath = udev_device_get_devpath(dev);
606 event->devpath_len = strlen(event->devpath);
607 event->devpath_old = udev_device_get_devpath_old(dev);
608 event->devnum = udev_device_get_devnum(dev);
ea6039a3 609 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
610 event->ifindex = udev_device_get_ifindex(dev);
611
9f6445e3 612 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
613 udev_device_get_action(dev), udev_device_get_subsystem(dev));
614
615 event->state = EVENT_QUEUED;
cb49a4f2
TG
616
617 if (udev_list_node_is_empty(&manager->events)) {
618 r = touch("/run/udev/queue");
619 if (r < 0)
620 log_warning_errno(r, "could not touch /run/udev/queue: %m");
621 }
622
ecb17862 623 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 624
912541b0 625 return 0;
fc465079
KS
626}
627
c0c6806b 628static void manager_kill_workers(Manager *manager) {
a505965d
TG
629 struct worker *worker;
630 Iterator i;
1e03b754 631
c0c6806b
TG
632 assert(manager);
633
634 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
635 if (worker->state == WORKER_KILLED)
636 continue;
1e03b754 637
912541b0
KS
638 worker->state = WORKER_KILLED;
639 kill(worker->pid, SIGTERM);
640 }
1e03b754
KS
641}
642
e3196993 643/* lookup event for identical, parent, child device */
ecb17862 644static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
645 struct udev_list_node *loop;
646 size_t common;
647
648 /* check if queue contains events we depend on */
ecb17862 649 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
650 struct event *loop_event = node_to_event(loop);
651
652 /* we already found a later event, earlier can not block us, no need to check again */
653 if (loop_event->seqnum < event->delaying_seqnum)
654 continue;
655
656 /* event we checked earlier still exists, no need to check again */
657 if (loop_event->seqnum == event->delaying_seqnum)
658 return true;
659
660 /* found ourself, no later event can block us */
661 if (loop_event->seqnum >= event->seqnum)
662 break;
663
664 /* check major/minor */
665 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
666 return true;
667
668 /* check network device ifindex */
669 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
670 return true;
671
672 /* check our old name */
090be865 673 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
674 event->delaying_seqnum = loop_event->seqnum;
675 return true;
676 }
677
678 /* compare devpath */
679 common = MIN(loop_event->devpath_len, event->devpath_len);
680
681 /* one devpath is contained in the other? */
682 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
683 continue;
684
685 /* identical device event found */
686 if (loop_event->devpath_len == event->devpath_len) {
687 /* devices names might have changed/swapped in the meantime */
688 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
689 continue;
690 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
691 continue;
692 event->delaying_seqnum = loop_event->seqnum;
693 return true;
694 }
695
696 /* parent device event found */
697 if (event->devpath[common] == '/') {
698 event->delaying_seqnum = loop_event->seqnum;
699 return true;
700 }
701
702 /* child device event found */
703 if (loop_event->devpath[common] == '/') {
704 event->delaying_seqnum = loop_event->seqnum;
705 return true;
706 }
707
708 /* no matching device */
709 continue;
710 }
711
712 return false;
7fafc032
KS
713}
714
693d371d
TG
715static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
716 Manager *manager = userdata;
717
718 assert(manager);
719
720 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
721
722 sd_event_exit(manager->event, -ETIMEDOUT);
723
724 return 1;
725}
726
62d43dac 727static void manager_exit(Manager *manager) {
693d371d
TG
728 uint64_t usec;
729 int r;
62d43dac
TG
730
731 assert(manager);
732
733 manager->exit = true;
734
b79aacbf
TG
735 sd_notify(false,
736 "STOPPING=1\n"
737 "STATUS=Starting shutdown...");
738
62d43dac 739 /* close sources of new events and discard buffered events */
693d371d 740 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 741 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 742
693d371d 743 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 744 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 745
693d371d 746 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 747 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
748
749 /* discard queued events and kill workers */
750 event_queue_cleanup(manager, EVENT_QUEUED);
751 manager_kill_workers(manager);
693d371d 752
38a03f06 753 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
754
755 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
756 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
757 if (r < 0)
758 return;
62d43dac
TG
759}
760
761/* reload requested, HUP signal received, rules changed, builtin changed */
762static void manager_reload(Manager *manager) {
763
764 assert(manager);
765
b79aacbf
TG
766 sd_notify(false,
767 "RELOADING=1\n"
768 "STATUS=Flushing configuration...");
769
62d43dac
TG
770 manager_kill_workers(manager);
771 manager->rules = udev_rules_unref(manager->rules);
772 udev_builtin_exit(manager->udev);
b79aacbf
TG
773
774 sd_notify(false,
775 "READY=1\n"
776 "STATUS=Processing...");
62d43dac
TG
777}
778
c0c6806b 779static void event_queue_start(Manager *manager) {
912541b0 780 struct udev_list_node *loop;
693d371d 781 usec_t usec;
8ab44e3f 782
c0c6806b
TG
783 assert(manager);
784
7c4c7e89
TG
785 if (udev_list_node_is_empty(&manager->events) ||
786 manager->exit || manager->stop_exec_queue)
787 return;
788
38a03f06
LP
789 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
790 /* check for changed config, every 3 seconds at most */
791 if (manager->last_usec == 0 ||
792 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
793 if (udev_rules_check_timestamp(manager->rules) ||
794 udev_builtin_validate(manager->udev))
795 manager_reload(manager);
693d371d 796
38a03f06 797 manager->last_usec = usec;
7c4c7e89
TG
798 }
799
800 udev_builtin_init(manager->udev);
801
802 if (!manager->rules) {
803 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
804 if (!manager->rules)
805 return;
806 }
807
ecb17862 808 udev_list_node_foreach(loop, &manager->events) {
912541b0 809 struct event *event = node_to_event(loop);
0bc74ea7 810
912541b0
KS
811 if (event->state != EVENT_QUEUED)
812 continue;
0bc74ea7 813
912541b0 814 /* do not start event if parent or child event is still running */
ecb17862 815 if (is_devpath_busy(manager, event))
912541b0 816 continue;
fc465079 817
c0c6806b 818 event_run(manager, event);
912541b0 819 }
1e03b754
KS
820}
821
ecb17862 822static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 823 struct udev_list_node *loop, *tmp;
ff2c503d 824
ecb17862 825 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 826 struct event *event = node_to_event(loop);
ff2c503d 827
912541b0
KS
828 if (match_type != EVENT_UNDEF && match_type != event->state)
829 continue;
ff2c503d 830
c6aa11f2 831 event_free(event);
912541b0 832 }
ff2c503d
KS
833}
834
e82e8fa5 835static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
836 Manager *manager = userdata;
837
838 assert(manager);
839
912541b0
KS
840 for (;;) {
841 struct worker_message msg;
979558f3
TG
842 struct iovec iovec = {
843 .iov_base = &msg,
844 .iov_len = sizeof(msg),
845 };
846 union {
847 struct cmsghdr cmsghdr;
848 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
849 } control = {};
850 struct msghdr msghdr = {
851 .msg_iov = &iovec,
852 .msg_iovlen = 1,
853 .msg_control = &control,
854 .msg_controllen = sizeof(control),
855 };
856 struct cmsghdr *cmsg;
912541b0 857 ssize_t size;
979558f3 858 struct ucred *ucred = NULL;
a505965d 859 struct worker *worker;
912541b0 860
e82e8fa5 861 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 862 if (size < 0) {
738a7907
TG
863 if (errno == EINTR)
864 continue;
865 else if (errno == EAGAIN)
866 /* nothing more to read */
867 break;
979558f3 868
e82e8fa5 869 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
870 } else if (size != sizeof(struct worker_message)) {
871 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 872 continue;
979558f3
TG
873 }
874
2a1288ff 875 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
876 if (cmsg->cmsg_level == SOL_SOCKET &&
877 cmsg->cmsg_type == SCM_CREDENTIALS &&
878 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
879 ucred = (struct ucred*) CMSG_DATA(cmsg);
880 }
881
882 if (!ucred || ucred->pid <= 0) {
883 log_warning_errno(EIO, "ignoring worker message without valid PID");
884 continue;
885 }
912541b0
KS
886
887 /* lookup worker who sent the signal */
c0c6806b 888 worker = hashmap_get(manager->workers, UINT_TO_PTR(ucred->pid));
a505965d
TG
889 if (!worker) {
890 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
891 continue;
912541b0 892 }
c0bbfd72 893
a505965d
TG
894 if (worker->state != WORKER_KILLED)
895 worker->state = WORKER_IDLE;
896
897 /* worker returned */
898 event_free(worker->event);
912541b0 899 }
e82e8fa5 900
8302fe5a
TG
901 /* we have free workers, try to schedule events */
902 event_queue_start(manager);
903
e82e8fa5
TG
904 return 1;
905}
906
907static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 908 Manager *manager = userdata;
e82e8fa5
TG
909 struct udev_device *dev;
910 int r;
911
c0c6806b 912 assert(manager);
e82e8fa5 913
c0c6806b 914 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
915 if (dev) {
916 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 917 r = event_queue_insert(manager, dev);
e82e8fa5
TG
918 if (r < 0)
919 udev_device_unref(dev);
8302fe5a
TG
920 else
921 /* we have fresh events, try to schedule them */
922 event_queue_start(manager);
e82e8fa5
TG
923 }
924
925 return 1;
88f4b648
KS
926}
927
3b47c739 928/* receive the udevd message from userspace */
e82e8fa5 929static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 930 Manager *manager = userdata;
e4f66b77
TG
931 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
932 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
933 const char *str;
934 int i;
935
c0c6806b 936 assert(manager);
e4f66b77 937
c0c6806b 938 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 939 if (!ctrl_conn)
e82e8fa5 940 return 1;
912541b0
KS
941
942 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 943 if (!ctrl_msg)
e82e8fa5 944 return 1;
912541b0
KS
945
946 i = udev_ctrl_get_set_log_level(ctrl_msg);
947 if (i >= 0) {
ed14edc0 948 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 949 log_set_max_level(i);
c0c6806b 950 manager_kill_workers(manager);
912541b0
KS
951 }
952
953 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 954 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 955 manager->stop_exec_queue = true;
912541b0
KS
956 }
957
958 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 959 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 960 manager->stop_exec_queue = false;
8302fe5a 961 event_queue_start(manager);
912541b0
KS
962 }
963
964 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 965 log_debug("udevd message (RELOAD) received");
62d43dac 966 manager_reload(manager);
912541b0
KS
967 }
968
969 str = udev_ctrl_get_set_env(ctrl_msg);
970 if (str != NULL) {
c0c6806b 971 _cleanup_free_ char *key = NULL;
912541b0
KS
972
973 key = strdup(str);
c0c6806b 974 if (key) {
912541b0
KS
975 char *val;
976
977 val = strchr(key, '=');
978 if (val != NULL) {
979 val[0] = '\0';
980 val = &val[1];
981 if (val[0] == '\0') {
9f6445e3 982 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 983 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 984 } else {
9f6445e3 985 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 986 udev_list_entry_add(&manager->properties, key, val);
912541b0 987 }
c0c6806b 988 } else
9f6445e3 989 log_error("wrong key format '%s'", key);
912541b0 990 }
c0c6806b 991 manager_kill_workers(manager);
912541b0
KS
992 }
993
994 i = udev_ctrl_get_set_children_max(ctrl_msg);
995 if (i >= 0) {
9f6445e3 996 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 997 arg_children_max = i;
912541b0
KS
998 }
999
cb49a4f2 1000 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1001 log_debug("udevd message (SYNC) received");
912541b0
KS
1002
1003 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1004 log_debug("udevd message (EXIT) received");
62d43dac 1005 manager_exit(manager);
c0c6806b
TG
1006 /* keep reference to block the client until we exit
1007 TODO: deal with several blocking exit requests */
1008 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1009 }
e4f66b77 1010
e82e8fa5 1011 return 1;
88f4b648 1012}
4a231017 1013
f3a740a5 1014static int synthesize_change(struct udev_device *dev) {
edd32000 1015 char filename[UTIL_PATH_SIZE];
f3a740a5 1016 int r;
edd32000 1017
f3a740a5 1018 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1019 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1020 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1021 bool part_table_read = false;
1022 bool has_partitions = false;
ede34445 1023 int fd;
f3a740a5
KS
1024 struct udev *udev = udev_device_get_udev(dev);
1025 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1026 struct udev_list_entry *item;
1027
ede34445 1028 /*
e9fc29f4
KS
1029 * Try to re-read the partition table. This only succeeds if
1030 * none of the devices is busy. The kernel returns 0 if no
1031 * partition table is found, and we will not get an event for
1032 * the disk.
ede34445 1033 */
02ba8fb3 1034 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1035 if (fd >= 0) {
02ba8fb3
KS
1036 r = flock(fd, LOCK_EX|LOCK_NB);
1037 if (r >= 0)
1038 r = ioctl(fd, BLKRRPART, 0);
1039
ede34445
KS
1040 close(fd);
1041 if (r >= 0)
e9fc29f4 1042 part_table_read = true;
ede34445
KS
1043 }
1044
e9fc29f4 1045 /* search for partitions */
f3a740a5
KS
1046 e = udev_enumerate_new(udev);
1047 if (!e)
1048 return -ENOMEM;
1049
1050 r = udev_enumerate_add_match_parent(e, dev);
1051 if (r < 0)
1052 return r;
1053
1054 r = udev_enumerate_add_match_subsystem(e, "block");
1055 if (r < 0)
1056 return r;
1057
1058 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1059 if (r < 0)
1060 return r;
e9fc29f4
KS
1061
1062 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1063 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1064
1065 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1066 if (!d)
1067 continue;
1068
1069 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1070 continue;
1071
1072 has_partitions = true;
1073 break;
1074 }
1075
1076 /*
1077 * We have partitions and re-read the table, the kernel already sent
1078 * out a "change" event for the disk, and "remove/add" for all
1079 * partitions.
1080 */
1081 if (part_table_read && has_partitions)
1082 return 0;
1083
1084 /*
1085 * We have partitions but re-reading the partition table did not
1086 * work, synthesize "change" for the disk and all partitions.
1087 */
1088 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1089 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1090 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1091
f3a740a5
KS
1092 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1093 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1094
1095 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1096 if (!d)
1097 continue;
1098
1099 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1100 continue;
1101
1102 log_debug("device %s closed, synthesising partition '%s' 'change'",
1103 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1104 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1105 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1106 }
ede34445
KS
1107
1108 return 0;
f3a740a5
KS
1109 }
1110
ede34445
KS
1111 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1112 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1113 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1114
f3a740a5 1115 return 0;
edd32000
KS
1116}
1117
e82e8fa5 1118static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1119 Manager *manager = userdata;
0254e944 1120 union inotify_event_buffer buffer;
f7c1ad4f
LP
1121 struct inotify_event *e;
1122 ssize_t l;
912541b0 1123
c0c6806b 1124 assert(manager);
e82e8fa5
TG
1125
1126 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1127 if (l < 0) {
1128 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1129 return 1;
912541b0 1130
f7c1ad4f 1131 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1132 }
1133
f7c1ad4f 1134 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1135 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1136
c0c6806b 1137 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1138 if (!dev)
1139 continue;
912541b0 1140
f7c1ad4f 1141 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1142 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1143 synthesize_change(dev);
a8389097
TG
1144
1145 /* settle might be waiting on us to determine the queue
1146 * state. If we just handled an inotify event, we might have
1147 * generated a "change" event, but we won't have queued up
1148 * the resultant uevent yet. Do that.
1149 */
c0c6806b 1150 on_uevent(NULL, -1, 0, manager);
a8389097 1151 } else if (e->mask & IN_IGNORED)
c0c6806b 1152 udev_watch_end(manager->udev, dev);
912541b0
KS
1153 }
1154
e82e8fa5 1155 return 1;
bd284db1
SJR
1156}
1157
0561329d 1158static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1159 Manager *manager = userdata;
1160
1161 assert(manager);
1162
62d43dac 1163 manager_exit(manager);
912541b0 1164
e82e8fa5
TG
1165 return 1;
1166}
912541b0 1167
0561329d 1168static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1169 Manager *manager = userdata;
1170
1171 assert(manager);
1172
62d43dac 1173 manager_reload(manager);
912541b0 1174
e82e8fa5
TG
1175 return 1;
1176}
912541b0 1177
e82e8fa5 1178static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1179 Manager *manager = userdata;
1180
1181 assert(manager);
1182
e82e8fa5
TG
1183 for (;;) {
1184 pid_t pid;
1185 int status;
1186 struct worker *worker;
d1317d02 1187
e82e8fa5
TG
1188 pid = waitpid(-1, &status, WNOHANG);
1189 if (pid <= 0)
f29328d6 1190 break;
e82e8fa5 1191
c0c6806b 1192 worker = hashmap_get(manager->workers, UINT_TO_PTR(pid));
e82e8fa5
TG
1193 if (!worker) {
1194 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1195 continue;
912541b0 1196 }
e82e8fa5
TG
1197
1198 if (WIFEXITED(status)) {
1199 if (WEXITSTATUS(status) == 0)
1200 log_debug("worker ["PID_FMT"] exited", pid);
1201 else
1202 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1203 } else if (WIFSIGNALED(status)) {
1204 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1205 } else if (WIFSTOPPED(status)) {
1206 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1207 continue;
e82e8fa5
TG
1208 } else if (WIFCONTINUED(status)) {
1209 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1210 continue;
e82e8fa5
TG
1211 } else
1212 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1213
1214 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1215 if (worker->event) {
1216 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1217 /* delete state from disk */
1218 udev_device_delete_db(worker->event->dev);
1219 udev_device_tag_index(worker->event->dev, NULL, false);
1220 /* forward kernel event without amending it */
c0c6806b 1221 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1222 }
1223 }
1224
1225 worker_free(worker);
912541b0 1226 }
e82e8fa5 1227
8302fe5a
TG
1228 /* we can start new workers, try to schedule events */
1229 event_queue_start(manager);
1230
e82e8fa5 1231 return 1;
f27125f9 1232}
1233
693d371d
TG
1234static int on_post(sd_event_source *s, void *userdata) {
1235 Manager *manager = userdata;
1236 int r;
1237
1238 assert(manager);
1239
1240 if (udev_list_node_is_empty(&manager->events)) {
1241 /* no pending events */
1242 if (!hashmap_isempty(manager->workers)) {
1243 /* there are idle workers */
1244 log_debug("cleanup idle workers");
1245 manager_kill_workers(manager);
1246 } else {
1247 /* we are idle */
1248 if (manager->exit) {
1249 r = sd_event_exit(manager->event, 0);
1250 if (r < 0)
1251 return r;
1252 } else if (manager->cgroup)
1253 /* cleanup possible left-over processes in our cgroup */
1254 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
1255 }
1256 }
1257
1258 return 1;
1259}
1260
fcff1e72 1261static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1262 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1263 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1264 int fd, n, r;
912541b0 1265
fcff1e72
TG
1266 assert(rctrl);
1267 assert(rnetlink);
1268
912541b0 1269 n = sd_listen_fds(true);
fcff1e72
TG
1270 if (n < 0)
1271 return n;
912541b0
KS
1272
1273 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1274 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1275 if (ctrl_fd >= 0)
1276 return -EINVAL;
1277 ctrl_fd = fd;
912541b0
KS
1278 continue;
1279 }
1280
1281 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1282 if (netlink_fd >= 0)
1283 return -EINVAL;
1284 netlink_fd = fd;
912541b0
KS
1285 continue;
1286 }
1287
fcff1e72 1288 return -EINVAL;
912541b0
KS
1289 }
1290
f59118ec
TG
1291 if (ctrl_fd < 0) {
1292 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1293
1294 udev = udev_new();
1295 if (!udev)
1296 return -ENOMEM;
1297
1298 ctrl = udev_ctrl_new(udev);
1299 if (!ctrl)
1300 return log_error_errno(EINVAL, "error initializing udev control socket");
1301
1302 r = udev_ctrl_enable_receiving(ctrl);
1303 if (r < 0)
1304 return log_error_errno(EINVAL, "error binding udev control socket");
1305
1306 fd = udev_ctrl_get_fd(ctrl);
1307 if (fd < 0)
1308 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1309
f59118ec
TG
1310 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1311 if (ctrl_fd < 0)
1312 return log_error_errno(errno, "could not dup ctrl fd: %m");
1313 }
1314
1315 if (netlink_fd < 0) {
1316 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1317
1318 if (!udev) {
1319 udev = udev_new();
1320 if (!udev)
1321 return -ENOMEM;
1322 }
1323
1324 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1325 if (!monitor)
1326 return log_error_errno(EINVAL, "error initializing netlink socket");
1327
1328 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1329
1330 r = udev_monitor_enable_receiving(monitor);
1331 if (r < 0)
1332 return log_error_errno(EINVAL, "error binding netlink socket");
1333
1334 fd = udev_monitor_get_fd(monitor);
1335 if (fd < 0)
1336 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1337
1338 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1339 if (ctrl_fd < 0)
1340 return log_error_errno(errno, "could not dup netlink fd: %m");
1341 }
fcff1e72
TG
1342
1343 *rctrl = ctrl_fd;
1344 *rnetlink = netlink_fd;
912541b0 1345
912541b0 1346 return 0;
7459bcdc
KS
1347}
1348
e6f86cac 1349/*
3f85ef0f 1350 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1351 * udev.log-priority=<level> syslog priority
1352 * udev.children-max=<number of workers> events are fully serialized if set to 1
1353 * udev.exec-delay=<number of seconds> delay execution of every executed program
1354 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1355 */
614a823c 1356static int parse_proc_cmdline_item(const char *key, const char *value) {
3567afa5 1357 const char *full_key = key;
74df0fca 1358 int r;
e6f86cac 1359
614a823c 1360 assert(key);
e6f86cac 1361
614a823c
TG
1362 if (!value)
1363 return 0;
e6f86cac 1364
614a823c
TG
1365 if (startswith(key, "rd."))
1366 key += strlen("rd.");
e6f86cac 1367
614a823c
TG
1368 if (startswith(key, "udev."))
1369 key += strlen("udev.");
1370 else
1371 return 0;
e6f86cac 1372
614a823c
TG
1373 if (streq(key, "log-priority")) {
1374 int prio;
e6f86cac 1375
614a823c 1376 prio = util_log_priority(value);
e00f5bdd 1377 if (prio < 0)
3567afa5
MS
1378 goto invalid;
1379 log_set_max_level(prio);
614a823c 1380 } else if (streq(key, "children-max")) {
020328e1 1381 r = safe_atou(value, &arg_children_max);
614a823c 1382 if (r < 0)
3567afa5 1383 goto invalid;
614a823c
TG
1384 } else if (streq(key, "exec-delay")) {
1385 r = safe_atoi(value, &arg_exec_delay);
1386 if (r < 0)
3567afa5 1387 goto invalid;
614a823c
TG
1388 } else if (streq(key, "event-timeout")) {
1389 r = safe_atou64(value, &arg_event_timeout_usec);
1390 if (r < 0)
3567afa5
MS
1391 goto invalid;
1392 arg_event_timeout_usec *= USEC_PER_SEC;
1393 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
e6f86cac 1394 }
614a823c 1395
3567afa5
MS
1396 return 0;
1397invalid:
1398 log_warning("invalid %s ignored: %s", full_key, value);
614a823c 1399 return 0;
e6f86cac
KS
1400}
1401
ed216e1f
TG
1402static void help(void) {
1403 printf("%s [OPTIONS...]\n\n"
1404 "Manages devices.\n\n"
5ac0162c
LP
1405 " -h --help Print this message\n"
1406 " --version Print version of the program\n"
1407 " --daemon Detach and run in the background\n"
1408 " --debug Enable debug output\n"
1409 " --children-max=INT Set maximum number of workers\n"
1410 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1411 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1412 " --resolve-names=early|late|never\n"
1413 " When to resolve users and groups\n"
ed216e1f
TG
1414 , program_invocation_short_name);
1415}
1416
bba7a484 1417static int parse_argv(int argc, char *argv[]) {
912541b0 1418 static const struct option options[] = {
bba7a484
TG
1419 { "daemon", no_argument, NULL, 'd' },
1420 { "debug", no_argument, NULL, 'D' },
1421 { "children-max", required_argument, NULL, 'c' },
1422 { "exec-delay", required_argument, NULL, 'e' },
1423 { "event-timeout", required_argument, NULL, 't' },
1424 { "resolve-names", required_argument, NULL, 'N' },
1425 { "help", no_argument, NULL, 'h' },
1426 { "version", no_argument, NULL, 'V' },
912541b0
KS
1427 {}
1428 };
689a97f5 1429
bba7a484 1430 int c;
689a97f5 1431
bba7a484
TG
1432 assert(argc >= 0);
1433 assert(argv);
912541b0 1434
e14b6f21 1435 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1436 int r;
912541b0 1437
bba7a484 1438 switch (c) {
912541b0 1439
912541b0 1440 case 'd':
bba7a484 1441 arg_daemonize = true;
912541b0
KS
1442 break;
1443 case 'c':
020328e1 1444 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1445 if (r < 0)
1446 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1447 break;
1448 case 'e':
6f5cf8a8
TG
1449 r = safe_atoi(optarg, &arg_exec_delay);
1450 if (r < 0)
1451 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1452 break;
9719859c 1453 case 't':
f1e8664e
TG
1454 r = safe_atou64(optarg, &arg_event_timeout_usec);
1455 if (r < 0)
65fea570 1456 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1457 else {
1458 arg_event_timeout_usec *= USEC_PER_SEC;
1459 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1460 }
9719859c 1461 break;
912541b0 1462 case 'D':
bba7a484 1463 arg_debug = true;
912541b0
KS
1464 break;
1465 case 'N':
090be865 1466 if (streq(optarg, "early")) {
bba7a484 1467 arg_resolve_names = 1;
090be865 1468 } else if (streq(optarg, "late")) {
bba7a484 1469 arg_resolve_names = 0;
090be865 1470 } else if (streq(optarg, "never")) {
bba7a484 1471 arg_resolve_names = -1;
912541b0 1472 } else {
9f6445e3 1473 log_error("resolve-names must be early, late or never");
bba7a484 1474 return 0;
912541b0
KS
1475 }
1476 break;
1477 case 'h':
ed216e1f 1478 help();
bba7a484 1479 return 0;
912541b0
KS
1480 case 'V':
1481 printf("%s\n", VERSION);
bba7a484
TG
1482 return 0;
1483 case '?':
1484 return -EINVAL;
912541b0 1485 default:
bba7a484
TG
1486 assert_not_reached("Unhandled option");
1487
912541b0
KS
1488 }
1489 }
1490
bba7a484
TG
1491 return 1;
1492}
1493
b7f74dd4 1494static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1495 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1496 int r, fd_worker, one = 1;
c0c6806b
TG
1497
1498 assert(ret);
11b1dd8c
TG
1499 assert(fd_ctrl >= 0);
1500 assert(fd_uevent >= 0);
c0c6806b
TG
1501
1502 manager = new0(Manager, 1);
1503 if (!manager)
1504 return log_oom();
1505
e237d8cb
TG
1506 manager->fd_inotify = -1;
1507 manager->worker_watch[WRITE_END] = -1;
1508 manager->worker_watch[READ_END] = -1;
1509
c0c6806b
TG
1510 manager->udev = udev_new();
1511 if (!manager->udev)
1512 return log_error_errno(errno, "could not allocate udev context: %m");
1513
b2d21d93
TG
1514 udev_builtin_init(manager->udev);
1515
ecb17862
TG
1516 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1517 if (!manager->rules)
1518 return log_error_errno(ENOMEM, "error reading rules");
1519
1520 udev_list_node_init(&manager->events);
1521 udev_list_init(manager->udev, &manager->properties, true);
1522
c26d1879
TG
1523 manager->cgroup = cgroup;
1524
f59118ec
TG
1525 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1526 if (!manager->ctrl)
1527 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1528
f59118ec
TG
1529 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1530 if (!manager->monitor)
1531 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1532
1533 /* unnamed socket from workers to the main daemon */
1534 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1535 if (r < 0)
1536 return log_error_errno(errno, "error creating socketpair: %m");
1537
693d371d 1538 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1539
693d371d 1540 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1541 if (r < 0)
1542 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1543
1544 manager->fd_inotify = udev_watch_init(manager->udev);
1545 if (manager->fd_inotify < 0)
1546 return log_error_errno(ENOMEM, "error initializing inotify");
1547
1548 udev_watch_restore(manager->udev);
1549
1550 /* block and listen to all signals on signalfd */
72c0a2c2 1551 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1552
49f997f3
TG
1553 r = sd_event_default(&manager->event);
1554 if (r < 0)
1555 return log_error_errno(errno, "could not allocate event loop: %m");
1556
693d371d
TG
1557 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1558 if (r < 0)
1559 return log_error_errno(r, "error creating sigint event source: %m");
1560
1561 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1562 if (r < 0)
1563 return log_error_errno(r, "error creating sigterm event source: %m");
1564
1565 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1566 if (r < 0)
1567 return log_error_errno(r, "error creating sighup event source: %m");
1568
1569 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1570 if (r < 0)
1571 return log_error_errno(r, "error creating sigchld event source: %m");
1572
1573 r = sd_event_set_watchdog(manager->event, true);
1574 if (r < 0)
1575 return log_error_errno(r, "error creating watchdog event source: %m");
1576
11b1dd8c 1577 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1578 if (r < 0)
1579 return log_error_errno(r, "error creating ctrl event source: %m");
1580
1581 /* This needs to be after the inotify and uevent handling, to make sure
1582 * that the ping is send back after fully processing the pending uevents
1583 * (including the synthetic ones we may create due to inotify events).
1584 */
1585 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1586 if (r < 0)
1587 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1588
1589 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1590 if (r < 0)
1591 return log_error_errno(r, "error creating inotify event source: %m");
1592
11b1dd8c 1593 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1594 if (r < 0)
1595 return log_error_errno(r, "error creating uevent event source: %m");
1596
1597 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1598 if (r < 0)
1599 return log_error_errno(r, "error creating worker event source: %m");
1600
1601 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1602 if (r < 0)
1603 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1604
11b1dd8c
TG
1605 *ret = manager;
1606 manager = NULL;
1607
86c3bece 1608 return 0;
c0c6806b
TG
1609}
1610
077fc5e2 1611static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1612 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1613 int r;
1614
1615 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1616 if (r < 0) {
1617 r = log_error_errno(r, "failed to allocate manager object: %m");
1618 goto exit;
1619 }
1620
1621 r = udev_rules_apply_static_dev_perms(manager->rules);
1622 if (r < 0)
1623 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1624
1625 (void) sd_notify(false,
1626 "READY=1\n"
1627 "STATUS=Processing...");
1628
1629 r = sd_event_loop(manager->event);
1630 if (r < 0) {
1631 log_error_errno(r, "event loop failed: %m");
1632 goto exit;
1633 }
1634
1635 sd_event_get_exit_code(manager->event, &r);
1636
1637exit:
1638 sd_notify(false,
1639 "STOPPING=1\n"
1640 "STATUS=Shutting down...");
1641 if (manager)
1642 udev_ctrl_cleanup(manager->ctrl);
1643 return r;
1644}
1645
1646int main(int argc, char *argv[]) {
c26d1879 1647 _cleanup_free_ char *cgroup = NULL;
b7f74dd4 1648 int r, fd_ctrl, fd_uevent;
bba7a484 1649
bba7a484
TG
1650 log_set_target(LOG_TARGET_AUTO);
1651 log_parse_environment();
1652 log_open();
1653
bba7a484
TG
1654 r = parse_argv(argc, argv);
1655 if (r <= 0)
1656 goto exit;
1657
614a823c
TG
1658 r = parse_proc_cmdline(parse_proc_cmdline_item);
1659 if (r < 0)
1660 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1661
78d3e041
KS
1662 if (arg_debug) {
1663 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1664 log_set_max_level(LOG_DEBUG);
78d3e041 1665 }
bba7a484 1666
912541b0 1667 if (getuid() != 0) {
6af5e6a4 1668 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1669 goto exit;
1670 }
1671
712cebf1
TG
1672 if (arg_children_max == 0) {
1673 cpu_set_t cpu_set;
ebc164ef 1674
712cebf1 1675 arg_children_max = 8;
d457ff83 1676
ece174c5 1677 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1678 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1679
712cebf1 1680 log_debug("set children_max to %u", arg_children_max);
d457ff83 1681 }
912541b0 1682
712cebf1
TG
1683 /* set umask before creating any file/directory */
1684 r = chdir("/");
1685 if (r < 0) {
1686 r = log_error_errno(errno, "could not change dir to /: %m");
1687 goto exit;
1688 }
194bbe33 1689
712cebf1 1690 umask(022);
912541b0 1691
712cebf1
TG
1692 r = mac_selinux_init("/dev");
1693 if (r < 0) {
1694 log_error_errno(r, "could not initialize labelling: %m");
1695 goto exit;
912541b0
KS
1696 }
1697
712cebf1
TG
1698 r = mkdir("/run/udev", 0755);
1699 if (r < 0 && errno != EEXIST) {
1700 r = log_error_errno(errno, "could not create /run/udev: %m");
1701 goto exit;
1702 }
1703
03cfe0d5 1704 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1705
c26d1879
TG
1706 if (getppid() == 1) {
1707 /* get our own cgroup, we regularly kill everything udev has left behind
1708 we only do this on systemd systems, and only if we are directly spawned
1709 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1710 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1711 if (r < 0) {
e859aa9e 1712 if (r == -ENOENT || r == -ENOEXEC)
11b9fb15
TG
1713 log_debug_errno(r, "did not find dedicated cgroup: %m");
1714 else
1715 log_warning_errno(r, "failed to get cgroup: %m");
1716 }
c26d1879
TG
1717 }
1718
b7f74dd4
TG
1719 r = listen_fds(&fd_ctrl, &fd_uevent);
1720 if (r < 0) {
1721 r = log_error_errno(r, "could not listen on fds: %m");
1722 goto exit;
1723 }
1724
bba7a484 1725 if (arg_daemonize) {
912541b0 1726 pid_t pid;
912541b0 1727
3cbb2057
TG
1728 log_info("starting version " VERSION);
1729
40e749b5
TG
1730 /* connect /dev/null to stdin, stdout, stderr */
1731 if (log_get_max_level() < LOG_DEBUG)
1732 (void) make_null_stdio();
1733
912541b0
KS
1734 pid = fork();
1735 switch (pid) {
1736 case 0:
1737 break;
1738 case -1:
6af5e6a4 1739 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1740 goto exit;
1741 default:
f53d1fcd
TG
1742 mac_selinux_finish();
1743 log_close();
1744 _exit(EXIT_SUCCESS);
912541b0
KS
1745 }
1746
1747 setsid();
1748
ad118bda 1749 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1750 }
912541b0 1751
077fc5e2 1752 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1753
53921bfa 1754exit:
cc56fafe 1755 mac_selinux_finish();
baa30fbc 1756 log_close();
6af5e6a4 1757 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1758}