]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
util-lib: split out fd-related operations into fd-util.[ch]
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
7fafc032 1/*
1298001e 2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
55e9959b
KS
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
7fafc032 11 *
55e9959b
KS
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
7fafc032 16 *
55e9959b
KS
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
19 */
20
7fafc032 21#include <errno.h>
618234a5
LP
22#include <fcntl.h>
23#include <getopt.h>
24#include <signal.h>
25#include <stdbool.h>
26#include <stddef.h>
7fafc032
KS
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
618234a5 30#include <sys/epoll.h>
3ebdb81e 31#include <sys/file.h>
618234a5
LP
32#include <sys/inotify.h>
33#include <sys/ioctl.h>
34#include <sys/mount.h>
1e03b754 35#include <sys/prctl.h>
1e03b754 36#include <sys/signalfd.h>
618234a5 37#include <sys/socket.h>
dc117daa 38#include <sys/stat.h>
618234a5
LP
39#include <sys/time.h>
40#include <sys/wait.h>
41#include <unistd.h>
7fafc032 42
392ef7a2 43#include "sd-daemon.h"
693d371d 44#include "sd-event.h"
8314de1d 45
194bbe33 46#include "cgroup-util.h"
618234a5 47#include "cpu-set-util.h"
5ba2dc25 48#include "dev-setup.h"
618234a5 49#include "event-util.h"
3ffd4af2 50#include "fd-util.h"
a5c32cff 51#include "fileio.h"
6482f626 52#include "formats-util.h"
a505965d 53#include "hashmap.h"
618234a5
LP
54#include "netlink-util.h"
55#include "process-util.h"
56#include "selinux-util.h"
57#include "signal-util.h"
07630cea 58#include "string-util.h"
618234a5
LP
59#include "terminal-util.h"
60#include "udev-util.h"
61#include "udev.h"
7fafc032 62
bba7a484
TG
63static bool arg_debug = false;
64static int arg_daemonize = false;
65static int arg_resolve_names = 1;
020328e1 66static unsigned arg_children_max;
bba7a484
TG
67static int arg_exec_delay;
68static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
69static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
70
71typedef struct Manager {
72 struct udev *udev;
693d371d 73 sd_event *event;
c0c6806b 74 Hashmap *workers;
ecb17862 75 struct udev_list_node events;
c26d1879 76 const char *cgroup;
cb49a4f2 77 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 78
ecb17862 79 struct udev_rules *rules;
c0c6806b
TG
80 struct udev_list properties;
81
82 struct udev_monitor *monitor;
83 struct udev_ctrl *ctrl;
84 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 85 int fd_inotify;
e237d8cb
TG
86 int worker_watch[2];
87
693d371d
TG
88 sd_event_source *ctrl_event;
89 sd_event_source *uevent_event;
90 sd_event_source *inotify_event;
91
7c4c7e89
TG
92 usec_t last_usec;
93
c0c6806b 94 bool stop_exec_queue:1;
c0c6806b
TG
95 bool exit:1;
96} Manager;
1e03b754 97
1e03b754 98enum event_state {
912541b0
KS
99 EVENT_UNDEF,
100 EVENT_QUEUED,
101 EVENT_RUNNING,
1e03b754
KS
102};
103
104struct event {
912541b0 105 struct udev_list_node node;
cb49a4f2 106 Manager *manager;
912541b0
KS
107 struct udev *udev;
108 struct udev_device *dev;
6969c349 109 struct udev_device *dev_kernel;
c6aa11f2 110 struct worker *worker;
912541b0 111 enum event_state state;
912541b0
KS
112 unsigned long long int delaying_seqnum;
113 unsigned long long int seqnum;
114 const char *devpath;
115 size_t devpath_len;
116 const char *devpath_old;
117 dev_t devnum;
912541b0 118 int ifindex;
ea6039a3 119 bool is_block;
693d371d
TG
120 sd_event_source *timeout_warning;
121 sd_event_source *timeout;
1e03b754
KS
122};
123
9ec6e95b 124static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 125 return container_of(node, struct event, node);
1e03b754
KS
126}
127
ecb17862 128static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 129
1e03b754 130enum worker_state {
912541b0
KS
131 WORKER_UNDEF,
132 WORKER_RUNNING,
133 WORKER_IDLE,
134 WORKER_KILLED,
1e03b754
KS
135};
136
137struct worker {
c0c6806b 138 Manager *manager;
912541b0 139 struct udev_list_node node;
912541b0
KS
140 int refcount;
141 pid_t pid;
142 struct udev_monitor *monitor;
143 enum worker_state state;
144 struct event *event;
1e03b754
KS
145};
146
147/* passed from worker to main process */
148struct worker_message {
1e03b754
KS
149};
150
c6aa11f2 151static void event_free(struct event *event) {
cb49a4f2
TG
152 int r;
153
c6aa11f2
TG
154 if (!event)
155 return;
156
912541b0 157 udev_list_node_remove(&event->node);
912541b0 158 udev_device_unref(event->dev);
6969c349 159 udev_device_unref(event->dev_kernel);
c6aa11f2 160
693d371d
TG
161 sd_event_source_unref(event->timeout_warning);
162 sd_event_source_unref(event->timeout);
163
c6aa11f2
TG
164 if (event->worker)
165 event->worker->event = NULL;
166
cb49a4f2
TG
167 assert(event->manager);
168
169 if (udev_list_node_is_empty(&event->manager->events)) {
170 /* only clean up the queue from the process that created it */
171 if (event->manager->pid == getpid()) {
172 r = unlink("/run/udev/queue");
173 if (r < 0)
174 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
175 }
176 }
177
912541b0 178 free(event);
aa8734ff 179}
7a770250 180
c6aa11f2
TG
181static void worker_free(struct worker *worker) {
182 if (!worker)
183 return;
bc113de9 184
c0c6806b
TG
185 assert(worker->manager);
186
187 hashmap_remove(worker->manager->workers, UINT_TO_PTR(worker->pid));
912541b0 188 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
189 event_free(worker->event);
190
c6aa11f2 191 free(worker);
ff2c503d
KS
192}
193
c0c6806b 194static void manager_workers_free(Manager *manager) {
a505965d
TG
195 struct worker *worker;
196 Iterator i;
ff2c503d 197
c0c6806b
TG
198 assert(manager);
199
200 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 201 worker_free(worker);
a505965d 202
c0c6806b 203 manager->workers = hashmap_free(manager->workers);
fc465079
KS
204}
205
c0c6806b 206static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
207 _cleanup_free_ struct worker *worker = NULL;
208 int r;
3a19b32a
TG
209
210 assert(ret);
c0c6806b 211 assert(manager);
3a19b32a
TG
212 assert(worker_monitor);
213 assert(pid > 1);
214
215 worker = new0(struct worker, 1);
216 if (!worker)
217 return -ENOMEM;
218
39c19cf1 219 worker->refcount = 1;
c0c6806b 220 worker->manager = manager;
3a19b32a
TG
221 /* close monitor, but keep address around */
222 udev_monitor_disconnect(worker_monitor);
223 worker->monitor = udev_monitor_ref(worker_monitor);
224 worker->pid = pid;
a505965d 225
c0c6806b 226 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
227 if (r < 0)
228 return r;
229
c0c6806b 230 r = hashmap_put(manager->workers, UINT_TO_PTR(pid), worker);
a505965d
TG
231 if (r < 0)
232 return r;
233
3a19b32a 234 *ret = worker;
a505965d 235 worker = NULL;
3a19b32a
TG
236
237 return 0;
238}
239
4fa4d885
TG
240static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
241 struct event *event = userdata;
242
243 assert(event);
244 assert(event->worker);
245
246 kill_and_sigcont(event->worker->pid, SIGKILL);
247 event->worker->state = WORKER_KILLED;
248
249 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
250
251 return 1;
252}
253
254static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
255 struct event *event = userdata;
256
257 assert(event);
258
259 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
260
261 return 1;
262}
263
39c19cf1 264static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
265 sd_event *e;
266 uint64_t usec;
693d371d 267
c6aa11f2 268 assert(worker);
693d371d 269 assert(worker->manager);
c6aa11f2
TG
270 assert(event);
271 assert(!event->worker);
272 assert(!worker->event);
273
39c19cf1 274 worker->state = WORKER_RUNNING;
39c19cf1
TG
275 worker->event = event;
276 event->state = EVENT_RUNNING;
c6aa11f2 277 event->worker = worker;
693d371d
TG
278
279 e = worker->manager->event;
280
38a03f06 281 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
282
283 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
284 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
285
286 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
287 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
288}
289
e237d8cb
TG
290static void manager_free(Manager *manager) {
291 if (!manager)
292 return;
293
b2d21d93
TG
294 udev_builtin_exit(manager->udev);
295
693d371d
TG
296 sd_event_source_unref(manager->ctrl_event);
297 sd_event_source_unref(manager->uevent_event);
298 sd_event_source_unref(manager->inotify_event);
299
e237d8cb 300 udev_unref(manager->udev);
693d371d 301 sd_event_unref(manager->event);
e237d8cb
TG
302 manager_workers_free(manager);
303 event_queue_cleanup(manager, EVENT_UNDEF);
304
305 udev_monitor_unref(manager->monitor);
306 udev_ctrl_unref(manager->ctrl);
307 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
308
309 udev_list_cleanup(&manager->properties);
310 udev_rules_unref(manager->rules);
e237d8cb 311
e237d8cb
TG
312 safe_close(manager->fd_inotify);
313 safe_close_pair(manager->worker_watch);
314
315 free(manager);
316}
317
318DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
319
9a73bd7c
TG
320static int worker_send_message(int fd) {
321 struct worker_message message = {};
322
323 return loop_write(fd, &message, sizeof(message), false);
324}
325
c0c6806b 326static void worker_spawn(Manager *manager, struct event *event) {
912541b0 327 struct udev *udev = event->udev;
3a19b32a 328 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 329 pid_t pid;
b6aab8ef 330 int r = 0;
912541b0
KS
331
332 /* listen for new events */
333 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
334 if (worker_monitor == NULL)
335 return;
336 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 337 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
338 r = udev_monitor_enable_receiving(worker_monitor);
339 if (r < 0)
340 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 341
912541b0
KS
342 pid = fork();
343 switch (pid) {
344 case 0: {
345 struct udev_device *dev = NULL;
1c4baffc 346 _cleanup_netlink_unref_ sd_netlink *rtnl = NULL;
912541b0 347 int fd_monitor;
e237d8cb 348 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
349 struct epoll_event ep_signal = { .events = EPOLLIN };
350 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 351 sigset_t mask;
912541b0 352
43095991 353 /* take initial device from queue */
912541b0
KS
354 dev = event->dev;
355 event->dev = NULL;
356
39fd2ca1
TG
357 unsetenv("NOTIFY_SOCKET");
358
c0c6806b 359 manager_workers_free(manager);
ecb17862 360 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 361
e237d8cb 362 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 363 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 364 manager->ctrl = udev_ctrl_unref(manager->ctrl);
693d371d 365 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 366 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 367
693d371d
TG
368 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
369 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
370 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
371
372 manager->event = sd_event_unref(manager->event);
373
912541b0
KS
374 sigfillset(&mask);
375 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
376 if (fd_signal < 0) {
6af5e6a4 377 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
378 goto out;
379 }
2dd9f98d
TG
380 ep_signal.data.fd = fd_signal;
381
382 fd_monitor = udev_monitor_get_fd(worker_monitor);
383 ep_monitor.data.fd = fd_monitor;
912541b0
KS
384
385 fd_ep = epoll_create1(EPOLL_CLOEXEC);
386 if (fd_ep < 0) {
6af5e6a4 387 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
388 goto out;
389 }
390
912541b0
KS
391 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
392 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 393 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
394 goto out;
395 }
396
397 /* request TERM signal if parent exits */
398 prctl(PR_SET_PDEATHSIG, SIGTERM);
399
145dae7e 400 /* reset OOM score, we only protect the main daemon */
ad118bda 401 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 402
912541b0
KS
403 for (;;) {
404 struct udev_event *udev_event;
6af5e6a4 405 int fd_lock = -1;
912541b0 406
3b64e4d4
TG
407 assert(dev);
408
9f6445e3 409 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
410 udev_event = udev_event_new(dev);
411 if (udev_event == NULL) {
6af5e6a4 412 r = -ENOMEM;
912541b0
KS
413 goto out;
414 }
415
bba7a484
TG
416 if (arg_exec_delay > 0)
417 udev_event->exec_delay = arg_exec_delay;
912541b0 418
3ebdb81e 419 /*
2e5b17d0 420 * Take a shared lock on the device node; this establishes
3ebdb81e 421 * a concept of device "ownership" to serialize device
2e5b17d0 422 * access. External processes holding an exclusive lock will
3ebdb81e 423 * cause udev to skip the event handling; in the case udev
2e5b17d0 424 * acquired the lock, the external process can block until
3ebdb81e
KS
425 * udev has finished its event handling.
426 */
2e5b17d0
KS
427 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
428 streq_ptr("block", udev_device_get_subsystem(dev)) &&
429 !startswith(udev_device_get_sysname(dev), "dm-") &&
430 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
431 struct udev_device *d = dev;
432
433 if (streq_ptr("partition", udev_device_get_devtype(d)))
434 d = udev_device_get_parent(d);
435
436 if (d) {
437 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
438 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 439 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 440 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
441 goto skip;
442 }
443 }
444 }
445
4c83d994
TG
446 /* needed for renaming netifs */
447 udev_event->rtnl = rtnl;
448
912541b0 449 /* apply rules, create node, symlinks */
adeba500
KS
450 udev_event_execute_rules(udev_event,
451 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 452 &manager->properties,
8314de1d 453 manager->rules);
adeba500
KS
454
455 udev_event_execute_run(udev_event,
8314de1d 456 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 457
523c620b
TG
458 if (udev_event->rtnl)
459 /* in case rtnl was initialized */
1c4baffc 460 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 461
912541b0 462 /* apply/restore inotify watch */
bf9bead1 463 if (udev_event->inotify_watch) {
912541b0
KS
464 udev_watch_begin(udev, dev);
465 udev_device_update_db(dev);
466 }
467
3d06f418 468 safe_close(fd_lock);
3ebdb81e 469
912541b0
KS
470 /* send processed event back to libudev listeners */
471 udev_monitor_send_device(worker_monitor, NULL, dev);
472
3ebdb81e 473skip:
4914cb2d 474 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 475
912541b0 476 /* send udevd the result of the event execution */
e237d8cb 477 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 478 if (r < 0)
9a73bd7c 479 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 480 udev_device_get_seqnum(dev));
912541b0
KS
481
482 udev_device_unref(dev);
483 dev = NULL;
484
73814ca2 485 udev_event_unref(udev_event);
47e737dc 486
912541b0
KS
487 /* wait for more device messages from main udevd, or term signal */
488 while (dev == NULL) {
489 struct epoll_event ev[4];
490 int fdcount;
491 int i;
492
8fef0ff2 493 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
494 if (fdcount < 0) {
495 if (errno == EINTR)
496 continue;
6af5e6a4 497 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
498 goto out;
499 }
500
501 for (i = 0; i < fdcount; i++) {
502 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
503 dev = udev_monitor_receive_device(worker_monitor);
504 break;
505 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
506 struct signalfd_siginfo fdsi;
507 ssize_t size;
508
509 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
510 if (size != sizeof(struct signalfd_siginfo))
511 continue;
512 switch (fdsi.ssi_signo) {
513 case SIGTERM:
514 goto out;
515 }
516 }
517 }
518 }
519 }
82063a88 520out:
912541b0 521 udev_device_unref(dev);
e237d8cb 522 manager_free(manager);
baa30fbc 523 log_close();
8b46c3fc 524 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
525 }
526 case -1:
912541b0 527 event->state = EVENT_QUEUED;
56f64d95 528 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
529 break;
530 default:
e03c7cc2
TG
531 {
532 struct worker *worker;
533
c0c6806b 534 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 535 if (r < 0)
e03c7cc2 536 return;
e03c7cc2 537
39c19cf1
TG
538 worker_attach_event(worker, event);
539
1fa2f38f 540 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
541 break;
542 }
e03c7cc2 543 }
7fafc032
KS
544}
545
c0c6806b 546static void event_run(Manager *manager, struct event *event) {
a505965d
TG
547 struct worker *worker;
548 Iterator i;
912541b0 549
c0c6806b
TG
550 assert(manager);
551 assert(event);
552
553 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
554 ssize_t count;
555
556 if (worker->state != WORKER_IDLE)
557 continue;
558
c0c6806b 559 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 560 if (count < 0) {
1fa2f38f
ZJS
561 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
562 worker->pid, count);
912541b0
KS
563 kill(worker->pid, SIGKILL);
564 worker->state = WORKER_KILLED;
565 continue;
566 }
39c19cf1 567 worker_attach_event(worker, event);
912541b0
KS
568 return;
569 }
570
c0c6806b 571 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 572 if (arg_children_max > 1)
c0c6806b 573 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
574 return;
575 }
576
577 /* start new worker and pass initial device */
c0c6806b 578 worker_spawn(manager, event);
1e03b754
KS
579}
580
ecb17862 581static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 582 struct event *event;
cb49a4f2 583 int r;
912541b0 584
ecb17862
TG
585 assert(manager);
586 assert(dev);
587
040e6896
TG
588 /* only one process can add events to the queue */
589 if (manager->pid == 0)
590 manager->pid = getpid();
591
cb49a4f2
TG
592 assert(manager->pid == getpid());
593
955d98c9 594 event = new0(struct event, 1);
cb49a4f2
TG
595 if (!event)
596 return -ENOMEM;
912541b0
KS
597
598 event->udev = udev_device_get_udev(dev);
cb49a4f2 599 event->manager = manager;
912541b0 600 event->dev = dev;
6969c349
TG
601 event->dev_kernel = udev_device_shallow_clone(dev);
602 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
603 event->seqnum = udev_device_get_seqnum(dev);
604 event->devpath = udev_device_get_devpath(dev);
605 event->devpath_len = strlen(event->devpath);
606 event->devpath_old = udev_device_get_devpath_old(dev);
607 event->devnum = udev_device_get_devnum(dev);
ea6039a3 608 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
609 event->ifindex = udev_device_get_ifindex(dev);
610
9f6445e3 611 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
612 udev_device_get_action(dev), udev_device_get_subsystem(dev));
613
614 event->state = EVENT_QUEUED;
cb49a4f2
TG
615
616 if (udev_list_node_is_empty(&manager->events)) {
617 r = touch("/run/udev/queue");
618 if (r < 0)
619 log_warning_errno(r, "could not touch /run/udev/queue: %m");
620 }
621
ecb17862 622 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 623
912541b0 624 return 0;
fc465079
KS
625}
626
c0c6806b 627static void manager_kill_workers(Manager *manager) {
a505965d
TG
628 struct worker *worker;
629 Iterator i;
1e03b754 630
c0c6806b
TG
631 assert(manager);
632
633 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
634 if (worker->state == WORKER_KILLED)
635 continue;
1e03b754 636
912541b0
KS
637 worker->state = WORKER_KILLED;
638 kill(worker->pid, SIGTERM);
639 }
1e03b754
KS
640}
641
e3196993 642/* lookup event for identical, parent, child device */
ecb17862 643static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
644 struct udev_list_node *loop;
645 size_t common;
646
647 /* check if queue contains events we depend on */
ecb17862 648 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
649 struct event *loop_event = node_to_event(loop);
650
651 /* we already found a later event, earlier can not block us, no need to check again */
652 if (loop_event->seqnum < event->delaying_seqnum)
653 continue;
654
655 /* event we checked earlier still exists, no need to check again */
656 if (loop_event->seqnum == event->delaying_seqnum)
657 return true;
658
659 /* found ourself, no later event can block us */
660 if (loop_event->seqnum >= event->seqnum)
661 break;
662
663 /* check major/minor */
664 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
665 return true;
666
667 /* check network device ifindex */
668 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
669 return true;
670
671 /* check our old name */
090be865 672 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
673 event->delaying_seqnum = loop_event->seqnum;
674 return true;
675 }
676
677 /* compare devpath */
678 common = MIN(loop_event->devpath_len, event->devpath_len);
679
680 /* one devpath is contained in the other? */
681 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
682 continue;
683
684 /* identical device event found */
685 if (loop_event->devpath_len == event->devpath_len) {
686 /* devices names might have changed/swapped in the meantime */
687 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
688 continue;
689 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
690 continue;
691 event->delaying_seqnum = loop_event->seqnum;
692 return true;
693 }
694
695 /* parent device event found */
696 if (event->devpath[common] == '/') {
697 event->delaying_seqnum = loop_event->seqnum;
698 return true;
699 }
700
701 /* child device event found */
702 if (loop_event->devpath[common] == '/') {
703 event->delaying_seqnum = loop_event->seqnum;
704 return true;
705 }
706
707 /* no matching device */
708 continue;
709 }
710
711 return false;
7fafc032
KS
712}
713
693d371d
TG
714static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
715 Manager *manager = userdata;
716
717 assert(manager);
718
719 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
720
721 sd_event_exit(manager->event, -ETIMEDOUT);
722
723 return 1;
724}
725
62d43dac 726static void manager_exit(Manager *manager) {
693d371d
TG
727 uint64_t usec;
728 int r;
62d43dac
TG
729
730 assert(manager);
731
732 manager->exit = true;
733
b79aacbf
TG
734 sd_notify(false,
735 "STOPPING=1\n"
736 "STATUS=Starting shutdown...");
737
62d43dac 738 /* close sources of new events and discard buffered events */
693d371d 739 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 740 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 741
693d371d 742 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 743 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 744
693d371d 745 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 746 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
747
748 /* discard queued events and kill workers */
749 event_queue_cleanup(manager, EVENT_QUEUED);
750 manager_kill_workers(manager);
693d371d 751
38a03f06 752 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
753
754 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
755 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
756 if (r < 0)
757 return;
62d43dac
TG
758}
759
760/* reload requested, HUP signal received, rules changed, builtin changed */
761static void manager_reload(Manager *manager) {
762
763 assert(manager);
764
b79aacbf
TG
765 sd_notify(false,
766 "RELOADING=1\n"
767 "STATUS=Flushing configuration...");
768
62d43dac
TG
769 manager_kill_workers(manager);
770 manager->rules = udev_rules_unref(manager->rules);
771 udev_builtin_exit(manager->udev);
b79aacbf
TG
772
773 sd_notify(false,
774 "READY=1\n"
775 "STATUS=Processing...");
62d43dac
TG
776}
777
c0c6806b 778static void event_queue_start(Manager *manager) {
912541b0 779 struct udev_list_node *loop;
693d371d 780 usec_t usec;
8ab44e3f 781
c0c6806b
TG
782 assert(manager);
783
7c4c7e89
TG
784 if (udev_list_node_is_empty(&manager->events) ||
785 manager->exit || manager->stop_exec_queue)
786 return;
787
38a03f06
LP
788 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
789 /* check for changed config, every 3 seconds at most */
790 if (manager->last_usec == 0 ||
791 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
792 if (udev_rules_check_timestamp(manager->rules) ||
793 udev_builtin_validate(manager->udev))
794 manager_reload(manager);
693d371d 795
38a03f06 796 manager->last_usec = usec;
7c4c7e89
TG
797 }
798
799 udev_builtin_init(manager->udev);
800
801 if (!manager->rules) {
802 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
803 if (!manager->rules)
804 return;
805 }
806
ecb17862 807 udev_list_node_foreach(loop, &manager->events) {
912541b0 808 struct event *event = node_to_event(loop);
0bc74ea7 809
912541b0
KS
810 if (event->state != EVENT_QUEUED)
811 continue;
0bc74ea7 812
912541b0 813 /* do not start event if parent or child event is still running */
ecb17862 814 if (is_devpath_busy(manager, event))
912541b0 815 continue;
fc465079 816
c0c6806b 817 event_run(manager, event);
912541b0 818 }
1e03b754
KS
819}
820
ecb17862 821static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 822 struct udev_list_node *loop, *tmp;
ff2c503d 823
ecb17862 824 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 825 struct event *event = node_to_event(loop);
ff2c503d 826
912541b0
KS
827 if (match_type != EVENT_UNDEF && match_type != event->state)
828 continue;
ff2c503d 829
c6aa11f2 830 event_free(event);
912541b0 831 }
ff2c503d
KS
832}
833
e82e8fa5 834static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
835 Manager *manager = userdata;
836
837 assert(manager);
838
912541b0
KS
839 for (;;) {
840 struct worker_message msg;
979558f3
TG
841 struct iovec iovec = {
842 .iov_base = &msg,
843 .iov_len = sizeof(msg),
844 };
845 union {
846 struct cmsghdr cmsghdr;
847 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
848 } control = {};
849 struct msghdr msghdr = {
850 .msg_iov = &iovec,
851 .msg_iovlen = 1,
852 .msg_control = &control,
853 .msg_controllen = sizeof(control),
854 };
855 struct cmsghdr *cmsg;
912541b0 856 ssize_t size;
979558f3 857 struct ucred *ucred = NULL;
a505965d 858 struct worker *worker;
912541b0 859
e82e8fa5 860 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 861 if (size < 0) {
738a7907
TG
862 if (errno == EINTR)
863 continue;
864 else if (errno == EAGAIN)
865 /* nothing more to read */
866 break;
979558f3 867
e82e8fa5 868 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
869 } else if (size != sizeof(struct worker_message)) {
870 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 871 continue;
979558f3
TG
872 }
873
2a1288ff 874 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
875 if (cmsg->cmsg_level == SOL_SOCKET &&
876 cmsg->cmsg_type == SCM_CREDENTIALS &&
877 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
878 ucred = (struct ucred*) CMSG_DATA(cmsg);
879 }
880
881 if (!ucred || ucred->pid <= 0) {
882 log_warning_errno(EIO, "ignoring worker message without valid PID");
883 continue;
884 }
912541b0
KS
885
886 /* lookup worker who sent the signal */
c0c6806b 887 worker = hashmap_get(manager->workers, UINT_TO_PTR(ucred->pid));
a505965d
TG
888 if (!worker) {
889 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
890 continue;
912541b0 891 }
c0bbfd72 892
a505965d
TG
893 if (worker->state != WORKER_KILLED)
894 worker->state = WORKER_IDLE;
895
896 /* worker returned */
897 event_free(worker->event);
912541b0 898 }
e82e8fa5 899
8302fe5a
TG
900 /* we have free workers, try to schedule events */
901 event_queue_start(manager);
902
e82e8fa5
TG
903 return 1;
904}
905
906static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 907 Manager *manager = userdata;
e82e8fa5
TG
908 struct udev_device *dev;
909 int r;
910
c0c6806b 911 assert(manager);
e82e8fa5 912
c0c6806b 913 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
914 if (dev) {
915 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 916 r = event_queue_insert(manager, dev);
e82e8fa5
TG
917 if (r < 0)
918 udev_device_unref(dev);
8302fe5a
TG
919 else
920 /* we have fresh events, try to schedule them */
921 event_queue_start(manager);
e82e8fa5
TG
922 }
923
924 return 1;
88f4b648
KS
925}
926
3b47c739 927/* receive the udevd message from userspace */
e82e8fa5 928static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 929 Manager *manager = userdata;
e4f66b77
TG
930 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
931 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
932 const char *str;
933 int i;
934
c0c6806b 935 assert(manager);
e4f66b77 936
c0c6806b 937 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 938 if (!ctrl_conn)
e82e8fa5 939 return 1;
912541b0
KS
940
941 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 942 if (!ctrl_msg)
e82e8fa5 943 return 1;
912541b0
KS
944
945 i = udev_ctrl_get_set_log_level(ctrl_msg);
946 if (i >= 0) {
ed14edc0 947 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 948 log_set_max_level(i);
c0c6806b 949 manager_kill_workers(manager);
912541b0
KS
950 }
951
952 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 953 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 954 manager->stop_exec_queue = true;
912541b0
KS
955 }
956
957 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 958 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 959 manager->stop_exec_queue = false;
8302fe5a 960 event_queue_start(manager);
912541b0
KS
961 }
962
963 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 964 log_debug("udevd message (RELOAD) received");
62d43dac 965 manager_reload(manager);
912541b0
KS
966 }
967
968 str = udev_ctrl_get_set_env(ctrl_msg);
969 if (str != NULL) {
c0c6806b 970 _cleanup_free_ char *key = NULL;
912541b0
KS
971
972 key = strdup(str);
c0c6806b 973 if (key) {
912541b0
KS
974 char *val;
975
976 val = strchr(key, '=');
977 if (val != NULL) {
978 val[0] = '\0';
979 val = &val[1];
980 if (val[0] == '\0') {
9f6445e3 981 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 982 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 983 } else {
9f6445e3 984 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 985 udev_list_entry_add(&manager->properties, key, val);
912541b0 986 }
c0c6806b 987 } else
9f6445e3 988 log_error("wrong key format '%s'", key);
912541b0 989 }
c0c6806b 990 manager_kill_workers(manager);
912541b0
KS
991 }
992
993 i = udev_ctrl_get_set_children_max(ctrl_msg);
994 if (i >= 0) {
9f6445e3 995 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 996 arg_children_max = i;
912541b0
KS
997 }
998
cb49a4f2 999 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1000 log_debug("udevd message (SYNC) received");
912541b0
KS
1001
1002 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1003 log_debug("udevd message (EXIT) received");
62d43dac 1004 manager_exit(manager);
c0c6806b
TG
1005 /* keep reference to block the client until we exit
1006 TODO: deal with several blocking exit requests */
1007 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1008 }
e4f66b77 1009
e82e8fa5 1010 return 1;
88f4b648 1011}
4a231017 1012
f3a740a5 1013static int synthesize_change(struct udev_device *dev) {
edd32000 1014 char filename[UTIL_PATH_SIZE];
f3a740a5 1015 int r;
edd32000 1016
f3a740a5 1017 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1018 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1019 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1020 bool part_table_read = false;
1021 bool has_partitions = false;
ede34445 1022 int fd;
f3a740a5
KS
1023 struct udev *udev = udev_device_get_udev(dev);
1024 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1025 struct udev_list_entry *item;
1026
ede34445 1027 /*
e9fc29f4
KS
1028 * Try to re-read the partition table. This only succeeds if
1029 * none of the devices is busy. The kernel returns 0 if no
1030 * partition table is found, and we will not get an event for
1031 * the disk.
ede34445 1032 */
02ba8fb3 1033 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1034 if (fd >= 0) {
02ba8fb3
KS
1035 r = flock(fd, LOCK_EX|LOCK_NB);
1036 if (r >= 0)
1037 r = ioctl(fd, BLKRRPART, 0);
1038
ede34445
KS
1039 close(fd);
1040 if (r >= 0)
e9fc29f4 1041 part_table_read = true;
ede34445
KS
1042 }
1043
e9fc29f4 1044 /* search for partitions */
f3a740a5
KS
1045 e = udev_enumerate_new(udev);
1046 if (!e)
1047 return -ENOMEM;
1048
1049 r = udev_enumerate_add_match_parent(e, dev);
1050 if (r < 0)
1051 return r;
1052
1053 r = udev_enumerate_add_match_subsystem(e, "block");
1054 if (r < 0)
1055 return r;
1056
1057 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1058 if (r < 0)
1059 return r;
e9fc29f4
KS
1060
1061 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1062 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1063
1064 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1065 if (!d)
1066 continue;
1067
1068 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1069 continue;
1070
1071 has_partitions = true;
1072 break;
1073 }
1074
1075 /*
1076 * We have partitions and re-read the table, the kernel already sent
1077 * out a "change" event for the disk, and "remove/add" for all
1078 * partitions.
1079 */
1080 if (part_table_read && has_partitions)
1081 return 0;
1082
1083 /*
1084 * We have partitions but re-reading the partition table did not
1085 * work, synthesize "change" for the disk and all partitions.
1086 */
1087 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1088 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1089 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1090
f3a740a5
KS
1091 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1092 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1093
1094 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1095 if (!d)
1096 continue;
1097
1098 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1099 continue;
1100
1101 log_debug("device %s closed, synthesising partition '%s' 'change'",
1102 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1103 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1104 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1105 }
ede34445
KS
1106
1107 return 0;
f3a740a5
KS
1108 }
1109
ede34445
KS
1110 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1111 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1112 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1113
f3a740a5 1114 return 0;
edd32000
KS
1115}
1116
e82e8fa5 1117static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1118 Manager *manager = userdata;
0254e944 1119 union inotify_event_buffer buffer;
f7c1ad4f
LP
1120 struct inotify_event *e;
1121 ssize_t l;
912541b0 1122
c0c6806b 1123 assert(manager);
e82e8fa5
TG
1124
1125 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1126 if (l < 0) {
1127 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1128 return 1;
912541b0 1129
f7c1ad4f 1130 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1131 }
1132
f7c1ad4f 1133 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1134 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1135
c0c6806b 1136 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1137 if (!dev)
1138 continue;
912541b0 1139
f7c1ad4f 1140 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1141 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1142 synthesize_change(dev);
a8389097
TG
1143
1144 /* settle might be waiting on us to determine the queue
1145 * state. If we just handled an inotify event, we might have
1146 * generated a "change" event, but we won't have queued up
1147 * the resultant uevent yet. Do that.
1148 */
c0c6806b 1149 on_uevent(NULL, -1, 0, manager);
a8389097 1150 } else if (e->mask & IN_IGNORED)
c0c6806b 1151 udev_watch_end(manager->udev, dev);
912541b0
KS
1152 }
1153
e82e8fa5 1154 return 1;
bd284db1
SJR
1155}
1156
0561329d 1157static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1158 Manager *manager = userdata;
1159
1160 assert(manager);
1161
62d43dac 1162 manager_exit(manager);
912541b0 1163
e82e8fa5
TG
1164 return 1;
1165}
912541b0 1166
0561329d 1167static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1168 Manager *manager = userdata;
1169
1170 assert(manager);
1171
62d43dac 1172 manager_reload(manager);
912541b0 1173
e82e8fa5
TG
1174 return 1;
1175}
912541b0 1176
e82e8fa5 1177static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1178 Manager *manager = userdata;
1179
1180 assert(manager);
1181
e82e8fa5
TG
1182 for (;;) {
1183 pid_t pid;
1184 int status;
1185 struct worker *worker;
d1317d02 1186
e82e8fa5
TG
1187 pid = waitpid(-1, &status, WNOHANG);
1188 if (pid <= 0)
f29328d6 1189 break;
e82e8fa5 1190
c0c6806b 1191 worker = hashmap_get(manager->workers, UINT_TO_PTR(pid));
e82e8fa5
TG
1192 if (!worker) {
1193 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1194 continue;
912541b0 1195 }
e82e8fa5
TG
1196
1197 if (WIFEXITED(status)) {
1198 if (WEXITSTATUS(status) == 0)
1199 log_debug("worker ["PID_FMT"] exited", pid);
1200 else
1201 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1202 } else if (WIFSIGNALED(status)) {
1203 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1204 } else if (WIFSTOPPED(status)) {
1205 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1206 continue;
e82e8fa5
TG
1207 } else if (WIFCONTINUED(status)) {
1208 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1209 continue;
e82e8fa5
TG
1210 } else
1211 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1212
1213 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1214 if (worker->event) {
1215 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1216 /* delete state from disk */
1217 udev_device_delete_db(worker->event->dev);
1218 udev_device_tag_index(worker->event->dev, NULL, false);
1219 /* forward kernel event without amending it */
c0c6806b 1220 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1221 }
1222 }
1223
1224 worker_free(worker);
912541b0 1225 }
e82e8fa5 1226
8302fe5a
TG
1227 /* we can start new workers, try to schedule events */
1228 event_queue_start(manager);
1229
e82e8fa5 1230 return 1;
f27125f9 1231}
1232
693d371d
TG
1233static int on_post(sd_event_source *s, void *userdata) {
1234 Manager *manager = userdata;
1235 int r;
1236
1237 assert(manager);
1238
1239 if (udev_list_node_is_empty(&manager->events)) {
1240 /* no pending events */
1241 if (!hashmap_isempty(manager->workers)) {
1242 /* there are idle workers */
1243 log_debug("cleanup idle workers");
1244 manager_kill_workers(manager);
1245 } else {
1246 /* we are idle */
1247 if (manager->exit) {
1248 r = sd_event_exit(manager->event, 0);
1249 if (r < 0)
1250 return r;
1251 } else if (manager->cgroup)
1252 /* cleanup possible left-over processes in our cgroup */
1253 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
1254 }
1255 }
1256
1257 return 1;
1258}
1259
fcff1e72 1260static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1261 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1262 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1263 int fd, n, r;
912541b0 1264
fcff1e72
TG
1265 assert(rctrl);
1266 assert(rnetlink);
1267
912541b0 1268 n = sd_listen_fds(true);
fcff1e72
TG
1269 if (n < 0)
1270 return n;
912541b0
KS
1271
1272 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1273 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1274 if (ctrl_fd >= 0)
1275 return -EINVAL;
1276 ctrl_fd = fd;
912541b0
KS
1277 continue;
1278 }
1279
1280 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1281 if (netlink_fd >= 0)
1282 return -EINVAL;
1283 netlink_fd = fd;
912541b0
KS
1284 continue;
1285 }
1286
fcff1e72 1287 return -EINVAL;
912541b0
KS
1288 }
1289
f59118ec
TG
1290 if (ctrl_fd < 0) {
1291 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1292
1293 udev = udev_new();
1294 if (!udev)
1295 return -ENOMEM;
1296
1297 ctrl = udev_ctrl_new(udev);
1298 if (!ctrl)
1299 return log_error_errno(EINVAL, "error initializing udev control socket");
1300
1301 r = udev_ctrl_enable_receiving(ctrl);
1302 if (r < 0)
1303 return log_error_errno(EINVAL, "error binding udev control socket");
1304
1305 fd = udev_ctrl_get_fd(ctrl);
1306 if (fd < 0)
1307 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1308
f59118ec
TG
1309 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1310 if (ctrl_fd < 0)
1311 return log_error_errno(errno, "could not dup ctrl fd: %m");
1312 }
1313
1314 if (netlink_fd < 0) {
1315 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1316
1317 if (!udev) {
1318 udev = udev_new();
1319 if (!udev)
1320 return -ENOMEM;
1321 }
1322
1323 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1324 if (!monitor)
1325 return log_error_errno(EINVAL, "error initializing netlink socket");
1326
1327 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1328
1329 r = udev_monitor_enable_receiving(monitor);
1330 if (r < 0)
1331 return log_error_errno(EINVAL, "error binding netlink socket");
1332
1333 fd = udev_monitor_get_fd(monitor);
1334 if (fd < 0)
1335 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1336
1337 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1338 if (ctrl_fd < 0)
1339 return log_error_errno(errno, "could not dup netlink fd: %m");
1340 }
fcff1e72
TG
1341
1342 *rctrl = ctrl_fd;
1343 *rnetlink = netlink_fd;
912541b0 1344
912541b0 1345 return 0;
7459bcdc
KS
1346}
1347
e6f86cac 1348/*
3f85ef0f 1349 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1350 * udev.log-priority=<level> syslog priority
1351 * udev.children-max=<number of workers> events are fully serialized if set to 1
1352 * udev.exec-delay=<number of seconds> delay execution of every executed program
1353 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1354 */
614a823c 1355static int parse_proc_cmdline_item(const char *key, const char *value) {
3567afa5 1356 const char *full_key = key;
74df0fca 1357 int r;
e6f86cac 1358
614a823c 1359 assert(key);
e6f86cac 1360
614a823c
TG
1361 if (!value)
1362 return 0;
e6f86cac 1363
614a823c
TG
1364 if (startswith(key, "rd."))
1365 key += strlen("rd.");
e6f86cac 1366
614a823c
TG
1367 if (startswith(key, "udev."))
1368 key += strlen("udev.");
1369 else
1370 return 0;
e6f86cac 1371
614a823c
TG
1372 if (streq(key, "log-priority")) {
1373 int prio;
e6f86cac 1374
614a823c 1375 prio = util_log_priority(value);
e00f5bdd 1376 if (prio < 0)
3567afa5
MS
1377 goto invalid;
1378 log_set_max_level(prio);
614a823c 1379 } else if (streq(key, "children-max")) {
020328e1 1380 r = safe_atou(value, &arg_children_max);
614a823c 1381 if (r < 0)
3567afa5 1382 goto invalid;
614a823c
TG
1383 } else if (streq(key, "exec-delay")) {
1384 r = safe_atoi(value, &arg_exec_delay);
1385 if (r < 0)
3567afa5 1386 goto invalid;
614a823c
TG
1387 } else if (streq(key, "event-timeout")) {
1388 r = safe_atou64(value, &arg_event_timeout_usec);
1389 if (r < 0)
3567afa5
MS
1390 goto invalid;
1391 arg_event_timeout_usec *= USEC_PER_SEC;
1392 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
e6f86cac 1393 }
614a823c 1394
3567afa5
MS
1395 return 0;
1396invalid:
1397 log_warning("invalid %s ignored: %s", full_key, value);
614a823c 1398 return 0;
e6f86cac
KS
1399}
1400
ed216e1f
TG
1401static void help(void) {
1402 printf("%s [OPTIONS...]\n\n"
1403 "Manages devices.\n\n"
5ac0162c
LP
1404 " -h --help Print this message\n"
1405 " --version Print version of the program\n"
1406 " --daemon Detach and run in the background\n"
1407 " --debug Enable debug output\n"
1408 " --children-max=INT Set maximum number of workers\n"
1409 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1410 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1411 " --resolve-names=early|late|never\n"
1412 " When to resolve users and groups\n"
ed216e1f
TG
1413 , program_invocation_short_name);
1414}
1415
bba7a484 1416static int parse_argv(int argc, char *argv[]) {
912541b0 1417 static const struct option options[] = {
bba7a484
TG
1418 { "daemon", no_argument, NULL, 'd' },
1419 { "debug", no_argument, NULL, 'D' },
1420 { "children-max", required_argument, NULL, 'c' },
1421 { "exec-delay", required_argument, NULL, 'e' },
1422 { "event-timeout", required_argument, NULL, 't' },
1423 { "resolve-names", required_argument, NULL, 'N' },
1424 { "help", no_argument, NULL, 'h' },
1425 { "version", no_argument, NULL, 'V' },
912541b0
KS
1426 {}
1427 };
689a97f5 1428
bba7a484 1429 int c;
689a97f5 1430
bba7a484
TG
1431 assert(argc >= 0);
1432 assert(argv);
912541b0 1433
e14b6f21 1434 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1435 int r;
912541b0 1436
bba7a484 1437 switch (c) {
912541b0 1438
912541b0 1439 case 'd':
bba7a484 1440 arg_daemonize = true;
912541b0
KS
1441 break;
1442 case 'c':
020328e1 1443 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1444 if (r < 0)
1445 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1446 break;
1447 case 'e':
6f5cf8a8
TG
1448 r = safe_atoi(optarg, &arg_exec_delay);
1449 if (r < 0)
1450 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1451 break;
9719859c 1452 case 't':
f1e8664e
TG
1453 r = safe_atou64(optarg, &arg_event_timeout_usec);
1454 if (r < 0)
65fea570 1455 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1456 else {
1457 arg_event_timeout_usec *= USEC_PER_SEC;
1458 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1459 }
9719859c 1460 break;
912541b0 1461 case 'D':
bba7a484 1462 arg_debug = true;
912541b0
KS
1463 break;
1464 case 'N':
090be865 1465 if (streq(optarg, "early")) {
bba7a484 1466 arg_resolve_names = 1;
090be865 1467 } else if (streq(optarg, "late")) {
bba7a484 1468 arg_resolve_names = 0;
090be865 1469 } else if (streq(optarg, "never")) {
bba7a484 1470 arg_resolve_names = -1;
912541b0 1471 } else {
9f6445e3 1472 log_error("resolve-names must be early, late or never");
bba7a484 1473 return 0;
912541b0
KS
1474 }
1475 break;
1476 case 'h':
ed216e1f 1477 help();
bba7a484 1478 return 0;
912541b0
KS
1479 case 'V':
1480 printf("%s\n", VERSION);
bba7a484
TG
1481 return 0;
1482 case '?':
1483 return -EINVAL;
912541b0 1484 default:
bba7a484
TG
1485 assert_not_reached("Unhandled option");
1486
912541b0
KS
1487 }
1488 }
1489
bba7a484
TG
1490 return 1;
1491}
1492
b7f74dd4 1493static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1494 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1495 int r, fd_worker, one = 1;
c0c6806b
TG
1496
1497 assert(ret);
11b1dd8c
TG
1498 assert(fd_ctrl >= 0);
1499 assert(fd_uevent >= 0);
c0c6806b
TG
1500
1501 manager = new0(Manager, 1);
1502 if (!manager)
1503 return log_oom();
1504
e237d8cb
TG
1505 manager->fd_inotify = -1;
1506 manager->worker_watch[WRITE_END] = -1;
1507 manager->worker_watch[READ_END] = -1;
1508
c0c6806b
TG
1509 manager->udev = udev_new();
1510 if (!manager->udev)
1511 return log_error_errno(errno, "could not allocate udev context: %m");
1512
b2d21d93
TG
1513 udev_builtin_init(manager->udev);
1514
ecb17862
TG
1515 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1516 if (!manager->rules)
1517 return log_error_errno(ENOMEM, "error reading rules");
1518
1519 udev_list_node_init(&manager->events);
1520 udev_list_init(manager->udev, &manager->properties, true);
1521
c26d1879
TG
1522 manager->cgroup = cgroup;
1523
f59118ec
TG
1524 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1525 if (!manager->ctrl)
1526 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1527
f59118ec
TG
1528 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1529 if (!manager->monitor)
1530 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1531
1532 /* unnamed socket from workers to the main daemon */
1533 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1534 if (r < 0)
1535 return log_error_errno(errno, "error creating socketpair: %m");
1536
693d371d 1537 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1538
693d371d 1539 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1540 if (r < 0)
1541 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1542
1543 manager->fd_inotify = udev_watch_init(manager->udev);
1544 if (manager->fd_inotify < 0)
1545 return log_error_errno(ENOMEM, "error initializing inotify");
1546
1547 udev_watch_restore(manager->udev);
1548
1549 /* block and listen to all signals on signalfd */
72c0a2c2 1550 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1551
49f997f3
TG
1552 r = sd_event_default(&manager->event);
1553 if (r < 0)
1554 return log_error_errno(errno, "could not allocate event loop: %m");
1555
693d371d
TG
1556 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1557 if (r < 0)
1558 return log_error_errno(r, "error creating sigint event source: %m");
1559
1560 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1561 if (r < 0)
1562 return log_error_errno(r, "error creating sigterm event source: %m");
1563
1564 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1565 if (r < 0)
1566 return log_error_errno(r, "error creating sighup event source: %m");
1567
1568 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1569 if (r < 0)
1570 return log_error_errno(r, "error creating sigchld event source: %m");
1571
1572 r = sd_event_set_watchdog(manager->event, true);
1573 if (r < 0)
1574 return log_error_errno(r, "error creating watchdog event source: %m");
1575
11b1dd8c 1576 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1577 if (r < 0)
1578 return log_error_errno(r, "error creating ctrl event source: %m");
1579
1580 /* This needs to be after the inotify and uevent handling, to make sure
1581 * that the ping is send back after fully processing the pending uevents
1582 * (including the synthetic ones we may create due to inotify events).
1583 */
1584 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1585 if (r < 0)
1586 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1587
1588 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1589 if (r < 0)
1590 return log_error_errno(r, "error creating inotify event source: %m");
1591
11b1dd8c 1592 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1593 if (r < 0)
1594 return log_error_errno(r, "error creating uevent event source: %m");
1595
1596 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1597 if (r < 0)
1598 return log_error_errno(r, "error creating worker event source: %m");
1599
1600 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1601 if (r < 0)
1602 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1603
11b1dd8c
TG
1604 *ret = manager;
1605 manager = NULL;
1606
86c3bece 1607 return 0;
c0c6806b
TG
1608}
1609
077fc5e2 1610static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1611 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1612 int r;
1613
1614 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1615 if (r < 0) {
1616 r = log_error_errno(r, "failed to allocate manager object: %m");
1617 goto exit;
1618 }
1619
1620 r = udev_rules_apply_static_dev_perms(manager->rules);
1621 if (r < 0)
1622 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1623
1624 (void) sd_notify(false,
1625 "READY=1\n"
1626 "STATUS=Processing...");
1627
1628 r = sd_event_loop(manager->event);
1629 if (r < 0) {
1630 log_error_errno(r, "event loop failed: %m");
1631 goto exit;
1632 }
1633
1634 sd_event_get_exit_code(manager->event, &r);
1635
1636exit:
1637 sd_notify(false,
1638 "STOPPING=1\n"
1639 "STATUS=Shutting down...");
1640 if (manager)
1641 udev_ctrl_cleanup(manager->ctrl);
1642 return r;
1643}
1644
1645int main(int argc, char *argv[]) {
c26d1879 1646 _cleanup_free_ char *cgroup = NULL;
b7f74dd4 1647 int r, fd_ctrl, fd_uevent;
bba7a484 1648
bba7a484
TG
1649 log_set_target(LOG_TARGET_AUTO);
1650 log_parse_environment();
1651 log_open();
1652
bba7a484
TG
1653 r = parse_argv(argc, argv);
1654 if (r <= 0)
1655 goto exit;
1656
614a823c
TG
1657 r = parse_proc_cmdline(parse_proc_cmdline_item);
1658 if (r < 0)
1659 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1660
78d3e041
KS
1661 if (arg_debug) {
1662 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1663 log_set_max_level(LOG_DEBUG);
78d3e041 1664 }
bba7a484 1665
912541b0 1666 if (getuid() != 0) {
6af5e6a4 1667 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1668 goto exit;
1669 }
1670
712cebf1
TG
1671 if (arg_children_max == 0) {
1672 cpu_set_t cpu_set;
ebc164ef 1673
712cebf1 1674 arg_children_max = 8;
d457ff83 1675
ece174c5 1676 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1677 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1678
712cebf1 1679 log_debug("set children_max to %u", arg_children_max);
d457ff83 1680 }
912541b0 1681
712cebf1
TG
1682 /* set umask before creating any file/directory */
1683 r = chdir("/");
1684 if (r < 0) {
1685 r = log_error_errno(errno, "could not change dir to /: %m");
1686 goto exit;
1687 }
194bbe33 1688
712cebf1 1689 umask(022);
912541b0 1690
712cebf1
TG
1691 r = mac_selinux_init("/dev");
1692 if (r < 0) {
1693 log_error_errno(r, "could not initialize labelling: %m");
1694 goto exit;
912541b0
KS
1695 }
1696
712cebf1
TG
1697 r = mkdir("/run/udev", 0755);
1698 if (r < 0 && errno != EEXIST) {
1699 r = log_error_errno(errno, "could not create /run/udev: %m");
1700 goto exit;
1701 }
1702
03cfe0d5 1703 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1704
c26d1879
TG
1705 if (getppid() == 1) {
1706 /* get our own cgroup, we regularly kill everything udev has left behind
1707 we only do this on systemd systems, and only if we are directly spawned
1708 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1709 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1710 if (r < 0) {
e859aa9e 1711 if (r == -ENOENT || r == -ENOEXEC)
11b9fb15
TG
1712 log_debug_errno(r, "did not find dedicated cgroup: %m");
1713 else
1714 log_warning_errno(r, "failed to get cgroup: %m");
1715 }
c26d1879
TG
1716 }
1717
b7f74dd4
TG
1718 r = listen_fds(&fd_ctrl, &fd_uevent);
1719 if (r < 0) {
1720 r = log_error_errno(r, "could not listen on fds: %m");
1721 goto exit;
1722 }
1723
bba7a484 1724 if (arg_daemonize) {
912541b0 1725 pid_t pid;
912541b0 1726
3cbb2057
TG
1727 log_info("starting version " VERSION);
1728
40e749b5
TG
1729 /* connect /dev/null to stdin, stdout, stderr */
1730 if (log_get_max_level() < LOG_DEBUG)
1731 (void) make_null_stdio();
1732
912541b0
KS
1733 pid = fork();
1734 switch (pid) {
1735 case 0:
1736 break;
1737 case -1:
6af5e6a4 1738 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1739 goto exit;
1740 default:
f53d1fcd
TG
1741 mac_selinux_finish();
1742 log_close();
1743 _exit(EXIT_SUCCESS);
912541b0
KS
1744 }
1745
1746 setsid();
1747
ad118bda 1748 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1749 }
912541b0 1750
077fc5e2 1751 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1752
53921bfa 1753exit:
cc56fafe 1754 mac_selinux_finish();
baa30fbc 1755 log_close();
6af5e6a4 1756 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1757}