]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
util-lib: split our string related calls from util.[ch] into its own file string...
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
7fafc032 1/*
1298001e 2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
55e9959b
KS
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
7fafc032 11 *
55e9959b
KS
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
7fafc032 16 *
55e9959b
KS
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
19 */
20
7fafc032 21#include <errno.h>
618234a5
LP
22#include <fcntl.h>
23#include <getopt.h>
24#include <signal.h>
25#include <stdbool.h>
26#include <stddef.h>
7fafc032
KS
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
618234a5 30#include <sys/epoll.h>
3ebdb81e 31#include <sys/file.h>
618234a5
LP
32#include <sys/inotify.h>
33#include <sys/ioctl.h>
34#include <sys/mount.h>
1e03b754 35#include <sys/prctl.h>
1e03b754 36#include <sys/signalfd.h>
618234a5 37#include <sys/socket.h>
dc117daa 38#include <sys/stat.h>
618234a5
LP
39#include <sys/time.h>
40#include <sys/wait.h>
41#include <unistd.h>
7fafc032 42
392ef7a2 43#include "sd-daemon.h"
693d371d 44#include "sd-event.h"
8314de1d 45
194bbe33 46#include "cgroup-util.h"
618234a5 47#include "cpu-set-util.h"
5ba2dc25 48#include "dev-setup.h"
618234a5 49#include "event-util.h"
a5c32cff 50#include "fileio.h"
6482f626 51#include "formats-util.h"
a505965d 52#include "hashmap.h"
618234a5
LP
53#include "netlink-util.h"
54#include "process-util.h"
55#include "selinux-util.h"
56#include "signal-util.h"
07630cea 57#include "string-util.h"
618234a5
LP
58#include "terminal-util.h"
59#include "udev-util.h"
60#include "udev.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
64static int arg_resolve_names = 1;
020328e1 65static unsigned arg_children_max;
bba7a484
TG
66static int arg_exec_delay;
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
71 struct udev *udev;
693d371d 72 sd_event *event;
c0c6806b 73 Hashmap *workers;
ecb17862 74 struct udev_list_node events;
c26d1879 75 const char *cgroup;
cb49a4f2 76 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 77
ecb17862 78 struct udev_rules *rules;
c0c6806b
TG
79 struct udev_list properties;
80
81 struct udev_monitor *monitor;
82 struct udev_ctrl *ctrl;
83 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 84 int fd_inotify;
e237d8cb
TG
85 int worker_watch[2];
86
693d371d
TG
87 sd_event_source *ctrl_event;
88 sd_event_source *uevent_event;
89 sd_event_source *inotify_event;
90
7c4c7e89
TG
91 usec_t last_usec;
92
c0c6806b 93 bool stop_exec_queue:1;
c0c6806b
TG
94 bool exit:1;
95} Manager;
1e03b754 96
1e03b754 97enum event_state {
912541b0
KS
98 EVENT_UNDEF,
99 EVENT_QUEUED,
100 EVENT_RUNNING,
1e03b754
KS
101};
102
103struct event {
912541b0 104 struct udev_list_node node;
cb49a4f2 105 Manager *manager;
912541b0
KS
106 struct udev *udev;
107 struct udev_device *dev;
6969c349 108 struct udev_device *dev_kernel;
c6aa11f2 109 struct worker *worker;
912541b0 110 enum event_state state;
912541b0
KS
111 unsigned long long int delaying_seqnum;
112 unsigned long long int seqnum;
113 const char *devpath;
114 size_t devpath_len;
115 const char *devpath_old;
116 dev_t devnum;
912541b0 117 int ifindex;
ea6039a3 118 bool is_block;
693d371d
TG
119 sd_event_source *timeout_warning;
120 sd_event_source *timeout;
1e03b754
KS
121};
122
9ec6e95b 123static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 124 return container_of(node, struct event, node);
1e03b754
KS
125}
126
ecb17862 127static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 128
1e03b754 129enum worker_state {
912541b0
KS
130 WORKER_UNDEF,
131 WORKER_RUNNING,
132 WORKER_IDLE,
133 WORKER_KILLED,
1e03b754
KS
134};
135
136struct worker {
c0c6806b 137 Manager *manager;
912541b0 138 struct udev_list_node node;
912541b0
KS
139 int refcount;
140 pid_t pid;
141 struct udev_monitor *monitor;
142 enum worker_state state;
143 struct event *event;
1e03b754
KS
144};
145
146/* passed from worker to main process */
147struct worker_message {
1e03b754
KS
148};
149
c6aa11f2 150static void event_free(struct event *event) {
cb49a4f2
TG
151 int r;
152
c6aa11f2
TG
153 if (!event)
154 return;
155
912541b0 156 udev_list_node_remove(&event->node);
912541b0 157 udev_device_unref(event->dev);
6969c349 158 udev_device_unref(event->dev_kernel);
c6aa11f2 159
693d371d
TG
160 sd_event_source_unref(event->timeout_warning);
161 sd_event_source_unref(event->timeout);
162
c6aa11f2
TG
163 if (event->worker)
164 event->worker->event = NULL;
165
cb49a4f2
TG
166 assert(event->manager);
167
168 if (udev_list_node_is_empty(&event->manager->events)) {
169 /* only clean up the queue from the process that created it */
170 if (event->manager->pid == getpid()) {
171 r = unlink("/run/udev/queue");
172 if (r < 0)
173 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
174 }
175 }
176
912541b0 177 free(event);
aa8734ff 178}
7a770250 179
c6aa11f2
TG
180static void worker_free(struct worker *worker) {
181 if (!worker)
182 return;
bc113de9 183
c0c6806b
TG
184 assert(worker->manager);
185
186 hashmap_remove(worker->manager->workers, UINT_TO_PTR(worker->pid));
912541b0 187 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
188 event_free(worker->event);
189
c6aa11f2 190 free(worker);
ff2c503d
KS
191}
192
c0c6806b 193static void manager_workers_free(Manager *manager) {
a505965d
TG
194 struct worker *worker;
195 Iterator i;
ff2c503d 196
c0c6806b
TG
197 assert(manager);
198
199 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 200 worker_free(worker);
a505965d 201
c0c6806b 202 manager->workers = hashmap_free(manager->workers);
fc465079
KS
203}
204
c0c6806b 205static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
206 _cleanup_free_ struct worker *worker = NULL;
207 int r;
3a19b32a
TG
208
209 assert(ret);
c0c6806b 210 assert(manager);
3a19b32a
TG
211 assert(worker_monitor);
212 assert(pid > 1);
213
214 worker = new0(struct worker, 1);
215 if (!worker)
216 return -ENOMEM;
217
39c19cf1 218 worker->refcount = 1;
c0c6806b 219 worker->manager = manager;
3a19b32a
TG
220 /* close monitor, but keep address around */
221 udev_monitor_disconnect(worker_monitor);
222 worker->monitor = udev_monitor_ref(worker_monitor);
223 worker->pid = pid;
a505965d 224
c0c6806b 225 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
226 if (r < 0)
227 return r;
228
c0c6806b 229 r = hashmap_put(manager->workers, UINT_TO_PTR(pid), worker);
a505965d
TG
230 if (r < 0)
231 return r;
232
3a19b32a 233 *ret = worker;
a505965d 234 worker = NULL;
3a19b32a
TG
235
236 return 0;
237}
238
4fa4d885
TG
239static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
240 struct event *event = userdata;
241
242 assert(event);
243 assert(event->worker);
244
245 kill_and_sigcont(event->worker->pid, SIGKILL);
246 event->worker->state = WORKER_KILLED;
247
248 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
249
250 return 1;
251}
252
253static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
254 struct event *event = userdata;
255
256 assert(event);
257
258 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
259
260 return 1;
261}
262
39c19cf1 263static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
264 sd_event *e;
265 uint64_t usec;
693d371d 266
c6aa11f2 267 assert(worker);
693d371d 268 assert(worker->manager);
c6aa11f2
TG
269 assert(event);
270 assert(!event->worker);
271 assert(!worker->event);
272
39c19cf1 273 worker->state = WORKER_RUNNING;
39c19cf1
TG
274 worker->event = event;
275 event->state = EVENT_RUNNING;
c6aa11f2 276 event->worker = worker;
693d371d
TG
277
278 e = worker->manager->event;
279
38a03f06 280 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
281
282 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
283 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
284
285 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
286 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
287}
288
e237d8cb
TG
289static void manager_free(Manager *manager) {
290 if (!manager)
291 return;
292
b2d21d93
TG
293 udev_builtin_exit(manager->udev);
294
693d371d
TG
295 sd_event_source_unref(manager->ctrl_event);
296 sd_event_source_unref(manager->uevent_event);
297 sd_event_source_unref(manager->inotify_event);
298
e237d8cb 299 udev_unref(manager->udev);
693d371d 300 sd_event_unref(manager->event);
e237d8cb
TG
301 manager_workers_free(manager);
302 event_queue_cleanup(manager, EVENT_UNDEF);
303
304 udev_monitor_unref(manager->monitor);
305 udev_ctrl_unref(manager->ctrl);
306 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
307
308 udev_list_cleanup(&manager->properties);
309 udev_rules_unref(manager->rules);
e237d8cb 310
e237d8cb
TG
311 safe_close(manager->fd_inotify);
312 safe_close_pair(manager->worker_watch);
313
314 free(manager);
315}
316
317DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
318
9a73bd7c
TG
319static int worker_send_message(int fd) {
320 struct worker_message message = {};
321
322 return loop_write(fd, &message, sizeof(message), false);
323}
324
c0c6806b 325static void worker_spawn(Manager *manager, struct event *event) {
912541b0 326 struct udev *udev = event->udev;
3a19b32a 327 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 328 pid_t pid;
b6aab8ef 329 int r = 0;
912541b0
KS
330
331 /* listen for new events */
332 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
333 if (worker_monitor == NULL)
334 return;
335 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 336 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
337 r = udev_monitor_enable_receiving(worker_monitor);
338 if (r < 0)
339 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 340
912541b0
KS
341 pid = fork();
342 switch (pid) {
343 case 0: {
344 struct udev_device *dev = NULL;
1c4baffc 345 _cleanup_netlink_unref_ sd_netlink *rtnl = NULL;
912541b0 346 int fd_monitor;
e237d8cb 347 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
348 struct epoll_event ep_signal = { .events = EPOLLIN };
349 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 350 sigset_t mask;
912541b0 351
43095991 352 /* take initial device from queue */
912541b0
KS
353 dev = event->dev;
354 event->dev = NULL;
355
39fd2ca1
TG
356 unsetenv("NOTIFY_SOCKET");
357
c0c6806b 358 manager_workers_free(manager);
ecb17862 359 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 360
e237d8cb 361 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->ctrl = udev_ctrl_unref(manager->ctrl);
693d371d 364 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 365 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 366
693d371d
TG
367 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
368 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
369 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
370
371 manager->event = sd_event_unref(manager->event);
372
912541b0
KS
373 sigfillset(&mask);
374 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
375 if (fd_signal < 0) {
6af5e6a4 376 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
377 goto out;
378 }
2dd9f98d
TG
379 ep_signal.data.fd = fd_signal;
380
381 fd_monitor = udev_monitor_get_fd(worker_monitor);
382 ep_monitor.data.fd = fd_monitor;
912541b0
KS
383
384 fd_ep = epoll_create1(EPOLL_CLOEXEC);
385 if (fd_ep < 0) {
6af5e6a4 386 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
387 goto out;
388 }
389
912541b0
KS
390 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
391 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 392 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
393 goto out;
394 }
395
396 /* request TERM signal if parent exits */
397 prctl(PR_SET_PDEATHSIG, SIGTERM);
398
145dae7e 399 /* reset OOM score, we only protect the main daemon */
ad118bda 400 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 401
912541b0
KS
402 for (;;) {
403 struct udev_event *udev_event;
6af5e6a4 404 int fd_lock = -1;
912541b0 405
3b64e4d4
TG
406 assert(dev);
407
9f6445e3 408 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
409 udev_event = udev_event_new(dev);
410 if (udev_event == NULL) {
6af5e6a4 411 r = -ENOMEM;
912541b0
KS
412 goto out;
413 }
414
bba7a484
TG
415 if (arg_exec_delay > 0)
416 udev_event->exec_delay = arg_exec_delay;
912541b0 417
3ebdb81e 418 /*
2e5b17d0 419 * Take a shared lock on the device node; this establishes
3ebdb81e 420 * a concept of device "ownership" to serialize device
2e5b17d0 421 * access. External processes holding an exclusive lock will
3ebdb81e 422 * cause udev to skip the event handling; in the case udev
2e5b17d0 423 * acquired the lock, the external process can block until
3ebdb81e
KS
424 * udev has finished its event handling.
425 */
2e5b17d0
KS
426 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
427 streq_ptr("block", udev_device_get_subsystem(dev)) &&
428 !startswith(udev_device_get_sysname(dev), "dm-") &&
429 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
430 struct udev_device *d = dev;
431
432 if (streq_ptr("partition", udev_device_get_devtype(d)))
433 d = udev_device_get_parent(d);
434
435 if (d) {
436 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
437 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 438 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 439 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
440 goto skip;
441 }
442 }
443 }
444
4c83d994
TG
445 /* needed for renaming netifs */
446 udev_event->rtnl = rtnl;
447
912541b0 448 /* apply rules, create node, symlinks */
adeba500
KS
449 udev_event_execute_rules(udev_event,
450 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 451 &manager->properties,
8314de1d 452 manager->rules);
adeba500
KS
453
454 udev_event_execute_run(udev_event,
8314de1d 455 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 456
523c620b
TG
457 if (udev_event->rtnl)
458 /* in case rtnl was initialized */
1c4baffc 459 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 460
912541b0 461 /* apply/restore inotify watch */
bf9bead1 462 if (udev_event->inotify_watch) {
912541b0
KS
463 udev_watch_begin(udev, dev);
464 udev_device_update_db(dev);
465 }
466
3d06f418 467 safe_close(fd_lock);
3ebdb81e 468
912541b0
KS
469 /* send processed event back to libudev listeners */
470 udev_monitor_send_device(worker_monitor, NULL, dev);
471
3ebdb81e 472skip:
4914cb2d 473 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 474
912541b0 475 /* send udevd the result of the event execution */
e237d8cb 476 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 477 if (r < 0)
9a73bd7c 478 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 479 udev_device_get_seqnum(dev));
912541b0
KS
480
481 udev_device_unref(dev);
482 dev = NULL;
483
73814ca2 484 udev_event_unref(udev_event);
47e737dc 485
912541b0
KS
486 /* wait for more device messages from main udevd, or term signal */
487 while (dev == NULL) {
488 struct epoll_event ev[4];
489 int fdcount;
490 int i;
491
8fef0ff2 492 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
493 if (fdcount < 0) {
494 if (errno == EINTR)
495 continue;
6af5e6a4 496 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
497 goto out;
498 }
499
500 for (i = 0; i < fdcount; i++) {
501 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
502 dev = udev_monitor_receive_device(worker_monitor);
503 break;
504 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
505 struct signalfd_siginfo fdsi;
506 ssize_t size;
507
508 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
509 if (size != sizeof(struct signalfd_siginfo))
510 continue;
511 switch (fdsi.ssi_signo) {
512 case SIGTERM:
513 goto out;
514 }
515 }
516 }
517 }
518 }
82063a88 519out:
912541b0 520 udev_device_unref(dev);
e237d8cb 521 manager_free(manager);
baa30fbc 522 log_close();
8b46c3fc 523 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
524 }
525 case -1:
912541b0 526 event->state = EVENT_QUEUED;
56f64d95 527 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
528 break;
529 default:
e03c7cc2
TG
530 {
531 struct worker *worker;
532
c0c6806b 533 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 534 if (r < 0)
e03c7cc2 535 return;
e03c7cc2 536
39c19cf1
TG
537 worker_attach_event(worker, event);
538
1fa2f38f 539 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
540 break;
541 }
e03c7cc2 542 }
7fafc032
KS
543}
544
c0c6806b 545static void event_run(Manager *manager, struct event *event) {
a505965d
TG
546 struct worker *worker;
547 Iterator i;
912541b0 548
c0c6806b
TG
549 assert(manager);
550 assert(event);
551
552 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
553 ssize_t count;
554
555 if (worker->state != WORKER_IDLE)
556 continue;
557
c0c6806b 558 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 559 if (count < 0) {
1fa2f38f
ZJS
560 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
561 worker->pid, count);
912541b0
KS
562 kill(worker->pid, SIGKILL);
563 worker->state = WORKER_KILLED;
564 continue;
565 }
39c19cf1 566 worker_attach_event(worker, event);
912541b0
KS
567 return;
568 }
569
c0c6806b 570 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 571 if (arg_children_max > 1)
c0c6806b 572 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
573 return;
574 }
575
576 /* start new worker and pass initial device */
c0c6806b 577 worker_spawn(manager, event);
1e03b754
KS
578}
579
ecb17862 580static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 581 struct event *event;
cb49a4f2 582 int r;
912541b0 583
ecb17862
TG
584 assert(manager);
585 assert(dev);
586
040e6896
TG
587 /* only one process can add events to the queue */
588 if (manager->pid == 0)
589 manager->pid = getpid();
590
cb49a4f2
TG
591 assert(manager->pid == getpid());
592
955d98c9 593 event = new0(struct event, 1);
cb49a4f2
TG
594 if (!event)
595 return -ENOMEM;
912541b0
KS
596
597 event->udev = udev_device_get_udev(dev);
cb49a4f2 598 event->manager = manager;
912541b0 599 event->dev = dev;
6969c349
TG
600 event->dev_kernel = udev_device_shallow_clone(dev);
601 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
602 event->seqnum = udev_device_get_seqnum(dev);
603 event->devpath = udev_device_get_devpath(dev);
604 event->devpath_len = strlen(event->devpath);
605 event->devpath_old = udev_device_get_devpath_old(dev);
606 event->devnum = udev_device_get_devnum(dev);
ea6039a3 607 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
608 event->ifindex = udev_device_get_ifindex(dev);
609
9f6445e3 610 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
611 udev_device_get_action(dev), udev_device_get_subsystem(dev));
612
613 event->state = EVENT_QUEUED;
cb49a4f2
TG
614
615 if (udev_list_node_is_empty(&manager->events)) {
616 r = touch("/run/udev/queue");
617 if (r < 0)
618 log_warning_errno(r, "could not touch /run/udev/queue: %m");
619 }
620
ecb17862 621 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 622
912541b0 623 return 0;
fc465079
KS
624}
625
c0c6806b 626static void manager_kill_workers(Manager *manager) {
a505965d
TG
627 struct worker *worker;
628 Iterator i;
1e03b754 629
c0c6806b
TG
630 assert(manager);
631
632 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
633 if (worker->state == WORKER_KILLED)
634 continue;
1e03b754 635
912541b0
KS
636 worker->state = WORKER_KILLED;
637 kill(worker->pid, SIGTERM);
638 }
1e03b754
KS
639}
640
e3196993 641/* lookup event for identical, parent, child device */
ecb17862 642static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
643 struct udev_list_node *loop;
644 size_t common;
645
646 /* check if queue contains events we depend on */
ecb17862 647 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
648 struct event *loop_event = node_to_event(loop);
649
650 /* we already found a later event, earlier can not block us, no need to check again */
651 if (loop_event->seqnum < event->delaying_seqnum)
652 continue;
653
654 /* event we checked earlier still exists, no need to check again */
655 if (loop_event->seqnum == event->delaying_seqnum)
656 return true;
657
658 /* found ourself, no later event can block us */
659 if (loop_event->seqnum >= event->seqnum)
660 break;
661
662 /* check major/minor */
663 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
664 return true;
665
666 /* check network device ifindex */
667 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
668 return true;
669
670 /* check our old name */
090be865 671 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
672 event->delaying_seqnum = loop_event->seqnum;
673 return true;
674 }
675
676 /* compare devpath */
677 common = MIN(loop_event->devpath_len, event->devpath_len);
678
679 /* one devpath is contained in the other? */
680 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
681 continue;
682
683 /* identical device event found */
684 if (loop_event->devpath_len == event->devpath_len) {
685 /* devices names might have changed/swapped in the meantime */
686 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
687 continue;
688 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
689 continue;
690 event->delaying_seqnum = loop_event->seqnum;
691 return true;
692 }
693
694 /* parent device event found */
695 if (event->devpath[common] == '/') {
696 event->delaying_seqnum = loop_event->seqnum;
697 return true;
698 }
699
700 /* child device event found */
701 if (loop_event->devpath[common] == '/') {
702 event->delaying_seqnum = loop_event->seqnum;
703 return true;
704 }
705
706 /* no matching device */
707 continue;
708 }
709
710 return false;
7fafc032
KS
711}
712
693d371d
TG
713static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
714 Manager *manager = userdata;
715
716 assert(manager);
717
718 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
719
720 sd_event_exit(manager->event, -ETIMEDOUT);
721
722 return 1;
723}
724
62d43dac 725static void manager_exit(Manager *manager) {
693d371d
TG
726 uint64_t usec;
727 int r;
62d43dac
TG
728
729 assert(manager);
730
731 manager->exit = true;
732
b79aacbf
TG
733 sd_notify(false,
734 "STOPPING=1\n"
735 "STATUS=Starting shutdown...");
736
62d43dac 737 /* close sources of new events and discard buffered events */
693d371d 738 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 739 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 740
693d371d 741 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 742 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 743
693d371d 744 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 745 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
746
747 /* discard queued events and kill workers */
748 event_queue_cleanup(manager, EVENT_QUEUED);
749 manager_kill_workers(manager);
693d371d 750
38a03f06 751 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
752
753 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
754 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
755 if (r < 0)
756 return;
62d43dac
TG
757}
758
759/* reload requested, HUP signal received, rules changed, builtin changed */
760static void manager_reload(Manager *manager) {
761
762 assert(manager);
763
b79aacbf
TG
764 sd_notify(false,
765 "RELOADING=1\n"
766 "STATUS=Flushing configuration...");
767
62d43dac
TG
768 manager_kill_workers(manager);
769 manager->rules = udev_rules_unref(manager->rules);
770 udev_builtin_exit(manager->udev);
b79aacbf
TG
771
772 sd_notify(false,
773 "READY=1\n"
774 "STATUS=Processing...");
62d43dac
TG
775}
776
c0c6806b 777static void event_queue_start(Manager *manager) {
912541b0 778 struct udev_list_node *loop;
693d371d 779 usec_t usec;
8ab44e3f 780
c0c6806b
TG
781 assert(manager);
782
7c4c7e89
TG
783 if (udev_list_node_is_empty(&manager->events) ||
784 manager->exit || manager->stop_exec_queue)
785 return;
786
38a03f06
LP
787 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
788 /* check for changed config, every 3 seconds at most */
789 if (manager->last_usec == 0 ||
790 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
791 if (udev_rules_check_timestamp(manager->rules) ||
792 udev_builtin_validate(manager->udev))
793 manager_reload(manager);
693d371d 794
38a03f06 795 manager->last_usec = usec;
7c4c7e89
TG
796 }
797
798 udev_builtin_init(manager->udev);
799
800 if (!manager->rules) {
801 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
802 if (!manager->rules)
803 return;
804 }
805
ecb17862 806 udev_list_node_foreach(loop, &manager->events) {
912541b0 807 struct event *event = node_to_event(loop);
0bc74ea7 808
912541b0
KS
809 if (event->state != EVENT_QUEUED)
810 continue;
0bc74ea7 811
912541b0 812 /* do not start event if parent or child event is still running */
ecb17862 813 if (is_devpath_busy(manager, event))
912541b0 814 continue;
fc465079 815
c0c6806b 816 event_run(manager, event);
912541b0 817 }
1e03b754
KS
818}
819
ecb17862 820static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 821 struct udev_list_node *loop, *tmp;
ff2c503d 822
ecb17862 823 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 824 struct event *event = node_to_event(loop);
ff2c503d 825
912541b0
KS
826 if (match_type != EVENT_UNDEF && match_type != event->state)
827 continue;
ff2c503d 828
c6aa11f2 829 event_free(event);
912541b0 830 }
ff2c503d
KS
831}
832
e82e8fa5 833static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
834 Manager *manager = userdata;
835
836 assert(manager);
837
912541b0
KS
838 for (;;) {
839 struct worker_message msg;
979558f3
TG
840 struct iovec iovec = {
841 .iov_base = &msg,
842 .iov_len = sizeof(msg),
843 };
844 union {
845 struct cmsghdr cmsghdr;
846 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
847 } control = {};
848 struct msghdr msghdr = {
849 .msg_iov = &iovec,
850 .msg_iovlen = 1,
851 .msg_control = &control,
852 .msg_controllen = sizeof(control),
853 };
854 struct cmsghdr *cmsg;
912541b0 855 ssize_t size;
979558f3 856 struct ucred *ucred = NULL;
a505965d 857 struct worker *worker;
912541b0 858
e82e8fa5 859 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 860 if (size < 0) {
738a7907
TG
861 if (errno == EINTR)
862 continue;
863 else if (errno == EAGAIN)
864 /* nothing more to read */
865 break;
979558f3 866
e82e8fa5 867 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
868 } else if (size != sizeof(struct worker_message)) {
869 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 870 continue;
979558f3
TG
871 }
872
2a1288ff 873 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
874 if (cmsg->cmsg_level == SOL_SOCKET &&
875 cmsg->cmsg_type == SCM_CREDENTIALS &&
876 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
877 ucred = (struct ucred*) CMSG_DATA(cmsg);
878 }
879
880 if (!ucred || ucred->pid <= 0) {
881 log_warning_errno(EIO, "ignoring worker message without valid PID");
882 continue;
883 }
912541b0
KS
884
885 /* lookup worker who sent the signal */
c0c6806b 886 worker = hashmap_get(manager->workers, UINT_TO_PTR(ucred->pid));
a505965d
TG
887 if (!worker) {
888 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
889 continue;
912541b0 890 }
c0bbfd72 891
a505965d
TG
892 if (worker->state != WORKER_KILLED)
893 worker->state = WORKER_IDLE;
894
895 /* worker returned */
896 event_free(worker->event);
912541b0 897 }
e82e8fa5 898
8302fe5a
TG
899 /* we have free workers, try to schedule events */
900 event_queue_start(manager);
901
e82e8fa5
TG
902 return 1;
903}
904
905static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 906 Manager *manager = userdata;
e82e8fa5
TG
907 struct udev_device *dev;
908 int r;
909
c0c6806b 910 assert(manager);
e82e8fa5 911
c0c6806b 912 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
913 if (dev) {
914 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 915 r = event_queue_insert(manager, dev);
e82e8fa5
TG
916 if (r < 0)
917 udev_device_unref(dev);
8302fe5a
TG
918 else
919 /* we have fresh events, try to schedule them */
920 event_queue_start(manager);
e82e8fa5
TG
921 }
922
923 return 1;
88f4b648
KS
924}
925
3b47c739 926/* receive the udevd message from userspace */
e82e8fa5 927static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 928 Manager *manager = userdata;
e4f66b77
TG
929 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
930 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
931 const char *str;
932 int i;
933
c0c6806b 934 assert(manager);
e4f66b77 935
c0c6806b 936 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 937 if (!ctrl_conn)
e82e8fa5 938 return 1;
912541b0
KS
939
940 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 941 if (!ctrl_msg)
e82e8fa5 942 return 1;
912541b0
KS
943
944 i = udev_ctrl_get_set_log_level(ctrl_msg);
945 if (i >= 0) {
ed14edc0 946 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 947 log_set_max_level(i);
c0c6806b 948 manager_kill_workers(manager);
912541b0
KS
949 }
950
951 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 952 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 953 manager->stop_exec_queue = true;
912541b0
KS
954 }
955
956 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 957 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 958 manager->stop_exec_queue = false;
8302fe5a 959 event_queue_start(manager);
912541b0
KS
960 }
961
962 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 963 log_debug("udevd message (RELOAD) received");
62d43dac 964 manager_reload(manager);
912541b0
KS
965 }
966
967 str = udev_ctrl_get_set_env(ctrl_msg);
968 if (str != NULL) {
c0c6806b 969 _cleanup_free_ char *key = NULL;
912541b0
KS
970
971 key = strdup(str);
c0c6806b 972 if (key) {
912541b0
KS
973 char *val;
974
975 val = strchr(key, '=');
976 if (val != NULL) {
977 val[0] = '\0';
978 val = &val[1];
979 if (val[0] == '\0') {
9f6445e3 980 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 981 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 982 } else {
9f6445e3 983 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 984 udev_list_entry_add(&manager->properties, key, val);
912541b0 985 }
c0c6806b 986 } else
9f6445e3 987 log_error("wrong key format '%s'", key);
912541b0 988 }
c0c6806b 989 manager_kill_workers(manager);
912541b0
KS
990 }
991
992 i = udev_ctrl_get_set_children_max(ctrl_msg);
993 if (i >= 0) {
9f6445e3 994 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 995 arg_children_max = i;
912541b0
KS
996 }
997
cb49a4f2 998 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 999 log_debug("udevd message (SYNC) received");
912541b0
KS
1000
1001 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1002 log_debug("udevd message (EXIT) received");
62d43dac 1003 manager_exit(manager);
c0c6806b
TG
1004 /* keep reference to block the client until we exit
1005 TODO: deal with several blocking exit requests */
1006 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1007 }
e4f66b77 1008
e82e8fa5 1009 return 1;
88f4b648 1010}
4a231017 1011
f3a740a5 1012static int synthesize_change(struct udev_device *dev) {
edd32000 1013 char filename[UTIL_PATH_SIZE];
f3a740a5 1014 int r;
edd32000 1015
f3a740a5 1016 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1017 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1018 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1019 bool part_table_read = false;
1020 bool has_partitions = false;
ede34445 1021 int fd;
f3a740a5
KS
1022 struct udev *udev = udev_device_get_udev(dev);
1023 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1024 struct udev_list_entry *item;
1025
ede34445 1026 /*
e9fc29f4
KS
1027 * Try to re-read the partition table. This only succeeds if
1028 * none of the devices is busy. The kernel returns 0 if no
1029 * partition table is found, and we will not get an event for
1030 * the disk.
ede34445 1031 */
02ba8fb3 1032 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1033 if (fd >= 0) {
02ba8fb3
KS
1034 r = flock(fd, LOCK_EX|LOCK_NB);
1035 if (r >= 0)
1036 r = ioctl(fd, BLKRRPART, 0);
1037
ede34445
KS
1038 close(fd);
1039 if (r >= 0)
e9fc29f4 1040 part_table_read = true;
ede34445
KS
1041 }
1042
e9fc29f4 1043 /* search for partitions */
f3a740a5
KS
1044 e = udev_enumerate_new(udev);
1045 if (!e)
1046 return -ENOMEM;
1047
1048 r = udev_enumerate_add_match_parent(e, dev);
1049 if (r < 0)
1050 return r;
1051
1052 r = udev_enumerate_add_match_subsystem(e, "block");
1053 if (r < 0)
1054 return r;
1055
1056 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1057 if (r < 0)
1058 return r;
e9fc29f4
KS
1059
1060 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1061 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1062
1063 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1064 if (!d)
1065 continue;
1066
1067 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1068 continue;
1069
1070 has_partitions = true;
1071 break;
1072 }
1073
1074 /*
1075 * We have partitions and re-read the table, the kernel already sent
1076 * out a "change" event for the disk, and "remove/add" for all
1077 * partitions.
1078 */
1079 if (part_table_read && has_partitions)
1080 return 0;
1081
1082 /*
1083 * We have partitions but re-reading the partition table did not
1084 * work, synthesize "change" for the disk and all partitions.
1085 */
1086 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1087 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1088 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1089
f3a740a5
KS
1090 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1091 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1092
1093 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1094 if (!d)
1095 continue;
1096
1097 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1098 continue;
1099
1100 log_debug("device %s closed, synthesising partition '%s' 'change'",
1101 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1102 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1103 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1104 }
ede34445
KS
1105
1106 return 0;
f3a740a5
KS
1107 }
1108
ede34445
KS
1109 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1110 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1111 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1112
f3a740a5 1113 return 0;
edd32000
KS
1114}
1115
e82e8fa5 1116static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1117 Manager *manager = userdata;
0254e944 1118 union inotify_event_buffer buffer;
f7c1ad4f
LP
1119 struct inotify_event *e;
1120 ssize_t l;
912541b0 1121
c0c6806b 1122 assert(manager);
e82e8fa5
TG
1123
1124 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1125 if (l < 0) {
1126 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1127 return 1;
912541b0 1128
f7c1ad4f 1129 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1130 }
1131
f7c1ad4f 1132 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1133 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1134
c0c6806b 1135 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1136 if (!dev)
1137 continue;
912541b0 1138
f7c1ad4f 1139 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1140 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1141 synthesize_change(dev);
a8389097
TG
1142
1143 /* settle might be waiting on us to determine the queue
1144 * state. If we just handled an inotify event, we might have
1145 * generated a "change" event, but we won't have queued up
1146 * the resultant uevent yet. Do that.
1147 */
c0c6806b 1148 on_uevent(NULL, -1, 0, manager);
a8389097 1149 } else if (e->mask & IN_IGNORED)
c0c6806b 1150 udev_watch_end(manager->udev, dev);
912541b0
KS
1151 }
1152
e82e8fa5 1153 return 1;
bd284db1
SJR
1154}
1155
0561329d 1156static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1157 Manager *manager = userdata;
1158
1159 assert(manager);
1160
62d43dac 1161 manager_exit(manager);
912541b0 1162
e82e8fa5
TG
1163 return 1;
1164}
912541b0 1165
0561329d 1166static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1167 Manager *manager = userdata;
1168
1169 assert(manager);
1170
62d43dac 1171 manager_reload(manager);
912541b0 1172
e82e8fa5
TG
1173 return 1;
1174}
912541b0 1175
e82e8fa5 1176static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1177 Manager *manager = userdata;
1178
1179 assert(manager);
1180
e82e8fa5
TG
1181 for (;;) {
1182 pid_t pid;
1183 int status;
1184 struct worker *worker;
d1317d02 1185
e82e8fa5
TG
1186 pid = waitpid(-1, &status, WNOHANG);
1187 if (pid <= 0)
f29328d6 1188 break;
e82e8fa5 1189
c0c6806b 1190 worker = hashmap_get(manager->workers, UINT_TO_PTR(pid));
e82e8fa5
TG
1191 if (!worker) {
1192 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1193 continue;
912541b0 1194 }
e82e8fa5
TG
1195
1196 if (WIFEXITED(status)) {
1197 if (WEXITSTATUS(status) == 0)
1198 log_debug("worker ["PID_FMT"] exited", pid);
1199 else
1200 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1201 } else if (WIFSIGNALED(status)) {
1202 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1203 } else if (WIFSTOPPED(status)) {
1204 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1205 continue;
e82e8fa5
TG
1206 } else if (WIFCONTINUED(status)) {
1207 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1208 continue;
e82e8fa5
TG
1209 } else
1210 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1211
1212 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1213 if (worker->event) {
1214 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1215 /* delete state from disk */
1216 udev_device_delete_db(worker->event->dev);
1217 udev_device_tag_index(worker->event->dev, NULL, false);
1218 /* forward kernel event without amending it */
c0c6806b 1219 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1220 }
1221 }
1222
1223 worker_free(worker);
912541b0 1224 }
e82e8fa5 1225
8302fe5a
TG
1226 /* we can start new workers, try to schedule events */
1227 event_queue_start(manager);
1228
e82e8fa5 1229 return 1;
f27125f9 1230}
1231
693d371d
TG
1232static int on_post(sd_event_source *s, void *userdata) {
1233 Manager *manager = userdata;
1234 int r;
1235
1236 assert(manager);
1237
1238 if (udev_list_node_is_empty(&manager->events)) {
1239 /* no pending events */
1240 if (!hashmap_isempty(manager->workers)) {
1241 /* there are idle workers */
1242 log_debug("cleanup idle workers");
1243 manager_kill_workers(manager);
1244 } else {
1245 /* we are idle */
1246 if (manager->exit) {
1247 r = sd_event_exit(manager->event, 0);
1248 if (r < 0)
1249 return r;
1250 } else if (manager->cgroup)
1251 /* cleanup possible left-over processes in our cgroup */
1252 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
1253 }
1254 }
1255
1256 return 1;
1257}
1258
fcff1e72 1259static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1260 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1261 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1262 int fd, n, r;
912541b0 1263
fcff1e72
TG
1264 assert(rctrl);
1265 assert(rnetlink);
1266
912541b0 1267 n = sd_listen_fds(true);
fcff1e72
TG
1268 if (n < 0)
1269 return n;
912541b0
KS
1270
1271 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1272 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1273 if (ctrl_fd >= 0)
1274 return -EINVAL;
1275 ctrl_fd = fd;
912541b0
KS
1276 continue;
1277 }
1278
1279 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1280 if (netlink_fd >= 0)
1281 return -EINVAL;
1282 netlink_fd = fd;
912541b0
KS
1283 continue;
1284 }
1285
fcff1e72 1286 return -EINVAL;
912541b0
KS
1287 }
1288
f59118ec
TG
1289 if (ctrl_fd < 0) {
1290 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1291
1292 udev = udev_new();
1293 if (!udev)
1294 return -ENOMEM;
1295
1296 ctrl = udev_ctrl_new(udev);
1297 if (!ctrl)
1298 return log_error_errno(EINVAL, "error initializing udev control socket");
1299
1300 r = udev_ctrl_enable_receiving(ctrl);
1301 if (r < 0)
1302 return log_error_errno(EINVAL, "error binding udev control socket");
1303
1304 fd = udev_ctrl_get_fd(ctrl);
1305 if (fd < 0)
1306 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1307
f59118ec
TG
1308 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1309 if (ctrl_fd < 0)
1310 return log_error_errno(errno, "could not dup ctrl fd: %m");
1311 }
1312
1313 if (netlink_fd < 0) {
1314 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1315
1316 if (!udev) {
1317 udev = udev_new();
1318 if (!udev)
1319 return -ENOMEM;
1320 }
1321
1322 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1323 if (!monitor)
1324 return log_error_errno(EINVAL, "error initializing netlink socket");
1325
1326 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1327
1328 r = udev_monitor_enable_receiving(monitor);
1329 if (r < 0)
1330 return log_error_errno(EINVAL, "error binding netlink socket");
1331
1332 fd = udev_monitor_get_fd(monitor);
1333 if (fd < 0)
1334 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1335
1336 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1337 if (ctrl_fd < 0)
1338 return log_error_errno(errno, "could not dup netlink fd: %m");
1339 }
fcff1e72
TG
1340
1341 *rctrl = ctrl_fd;
1342 *rnetlink = netlink_fd;
912541b0 1343
912541b0 1344 return 0;
7459bcdc
KS
1345}
1346
e6f86cac 1347/*
3f85ef0f 1348 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1349 * udev.log-priority=<level> syslog priority
1350 * udev.children-max=<number of workers> events are fully serialized if set to 1
1351 * udev.exec-delay=<number of seconds> delay execution of every executed program
1352 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1353 */
614a823c 1354static int parse_proc_cmdline_item(const char *key, const char *value) {
3567afa5 1355 const char *full_key = key;
74df0fca 1356 int r;
e6f86cac 1357
614a823c 1358 assert(key);
e6f86cac 1359
614a823c
TG
1360 if (!value)
1361 return 0;
e6f86cac 1362
614a823c
TG
1363 if (startswith(key, "rd."))
1364 key += strlen("rd.");
e6f86cac 1365
614a823c
TG
1366 if (startswith(key, "udev."))
1367 key += strlen("udev.");
1368 else
1369 return 0;
e6f86cac 1370
614a823c
TG
1371 if (streq(key, "log-priority")) {
1372 int prio;
e6f86cac 1373
614a823c 1374 prio = util_log_priority(value);
e00f5bdd 1375 if (prio < 0)
3567afa5
MS
1376 goto invalid;
1377 log_set_max_level(prio);
614a823c 1378 } else if (streq(key, "children-max")) {
020328e1 1379 r = safe_atou(value, &arg_children_max);
614a823c 1380 if (r < 0)
3567afa5 1381 goto invalid;
614a823c
TG
1382 } else if (streq(key, "exec-delay")) {
1383 r = safe_atoi(value, &arg_exec_delay);
1384 if (r < 0)
3567afa5 1385 goto invalid;
614a823c
TG
1386 } else if (streq(key, "event-timeout")) {
1387 r = safe_atou64(value, &arg_event_timeout_usec);
1388 if (r < 0)
3567afa5
MS
1389 goto invalid;
1390 arg_event_timeout_usec *= USEC_PER_SEC;
1391 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
e6f86cac 1392 }
614a823c 1393
3567afa5
MS
1394 return 0;
1395invalid:
1396 log_warning("invalid %s ignored: %s", full_key, value);
614a823c 1397 return 0;
e6f86cac
KS
1398}
1399
ed216e1f
TG
1400static void help(void) {
1401 printf("%s [OPTIONS...]\n\n"
1402 "Manages devices.\n\n"
5ac0162c
LP
1403 " -h --help Print this message\n"
1404 " --version Print version of the program\n"
1405 " --daemon Detach and run in the background\n"
1406 " --debug Enable debug output\n"
1407 " --children-max=INT Set maximum number of workers\n"
1408 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1409 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1410 " --resolve-names=early|late|never\n"
1411 " When to resolve users and groups\n"
ed216e1f
TG
1412 , program_invocation_short_name);
1413}
1414
bba7a484 1415static int parse_argv(int argc, char *argv[]) {
912541b0 1416 static const struct option options[] = {
bba7a484
TG
1417 { "daemon", no_argument, NULL, 'd' },
1418 { "debug", no_argument, NULL, 'D' },
1419 { "children-max", required_argument, NULL, 'c' },
1420 { "exec-delay", required_argument, NULL, 'e' },
1421 { "event-timeout", required_argument, NULL, 't' },
1422 { "resolve-names", required_argument, NULL, 'N' },
1423 { "help", no_argument, NULL, 'h' },
1424 { "version", no_argument, NULL, 'V' },
912541b0
KS
1425 {}
1426 };
689a97f5 1427
bba7a484 1428 int c;
689a97f5 1429
bba7a484
TG
1430 assert(argc >= 0);
1431 assert(argv);
912541b0 1432
e14b6f21 1433 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1434 int r;
912541b0 1435
bba7a484 1436 switch (c) {
912541b0 1437
912541b0 1438 case 'd':
bba7a484 1439 arg_daemonize = true;
912541b0
KS
1440 break;
1441 case 'c':
020328e1 1442 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1443 if (r < 0)
1444 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1445 break;
1446 case 'e':
6f5cf8a8
TG
1447 r = safe_atoi(optarg, &arg_exec_delay);
1448 if (r < 0)
1449 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1450 break;
9719859c 1451 case 't':
f1e8664e
TG
1452 r = safe_atou64(optarg, &arg_event_timeout_usec);
1453 if (r < 0)
65fea570 1454 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1455 else {
1456 arg_event_timeout_usec *= USEC_PER_SEC;
1457 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1458 }
9719859c 1459 break;
912541b0 1460 case 'D':
bba7a484 1461 arg_debug = true;
912541b0
KS
1462 break;
1463 case 'N':
090be865 1464 if (streq(optarg, "early")) {
bba7a484 1465 arg_resolve_names = 1;
090be865 1466 } else if (streq(optarg, "late")) {
bba7a484 1467 arg_resolve_names = 0;
090be865 1468 } else if (streq(optarg, "never")) {
bba7a484 1469 arg_resolve_names = -1;
912541b0 1470 } else {
9f6445e3 1471 log_error("resolve-names must be early, late or never");
bba7a484 1472 return 0;
912541b0
KS
1473 }
1474 break;
1475 case 'h':
ed216e1f 1476 help();
bba7a484 1477 return 0;
912541b0
KS
1478 case 'V':
1479 printf("%s\n", VERSION);
bba7a484
TG
1480 return 0;
1481 case '?':
1482 return -EINVAL;
912541b0 1483 default:
bba7a484
TG
1484 assert_not_reached("Unhandled option");
1485
912541b0
KS
1486 }
1487 }
1488
bba7a484
TG
1489 return 1;
1490}
1491
b7f74dd4 1492static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1493 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1494 int r, fd_worker, one = 1;
c0c6806b
TG
1495
1496 assert(ret);
11b1dd8c
TG
1497 assert(fd_ctrl >= 0);
1498 assert(fd_uevent >= 0);
c0c6806b
TG
1499
1500 manager = new0(Manager, 1);
1501 if (!manager)
1502 return log_oom();
1503
e237d8cb
TG
1504 manager->fd_inotify = -1;
1505 manager->worker_watch[WRITE_END] = -1;
1506 manager->worker_watch[READ_END] = -1;
1507
c0c6806b
TG
1508 manager->udev = udev_new();
1509 if (!manager->udev)
1510 return log_error_errno(errno, "could not allocate udev context: %m");
1511
b2d21d93
TG
1512 udev_builtin_init(manager->udev);
1513
ecb17862
TG
1514 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1515 if (!manager->rules)
1516 return log_error_errno(ENOMEM, "error reading rules");
1517
1518 udev_list_node_init(&manager->events);
1519 udev_list_init(manager->udev, &manager->properties, true);
1520
c26d1879
TG
1521 manager->cgroup = cgroup;
1522
f59118ec
TG
1523 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1524 if (!manager->ctrl)
1525 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1526
f59118ec
TG
1527 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1528 if (!manager->monitor)
1529 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1530
1531 /* unnamed socket from workers to the main daemon */
1532 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1533 if (r < 0)
1534 return log_error_errno(errno, "error creating socketpair: %m");
1535
693d371d 1536 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1537
693d371d 1538 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1539 if (r < 0)
1540 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1541
1542 manager->fd_inotify = udev_watch_init(manager->udev);
1543 if (manager->fd_inotify < 0)
1544 return log_error_errno(ENOMEM, "error initializing inotify");
1545
1546 udev_watch_restore(manager->udev);
1547
1548 /* block and listen to all signals on signalfd */
72c0a2c2 1549 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1550
49f997f3
TG
1551 r = sd_event_default(&manager->event);
1552 if (r < 0)
1553 return log_error_errno(errno, "could not allocate event loop: %m");
1554
693d371d
TG
1555 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1556 if (r < 0)
1557 return log_error_errno(r, "error creating sigint event source: %m");
1558
1559 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1560 if (r < 0)
1561 return log_error_errno(r, "error creating sigterm event source: %m");
1562
1563 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1564 if (r < 0)
1565 return log_error_errno(r, "error creating sighup event source: %m");
1566
1567 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1568 if (r < 0)
1569 return log_error_errno(r, "error creating sigchld event source: %m");
1570
1571 r = sd_event_set_watchdog(manager->event, true);
1572 if (r < 0)
1573 return log_error_errno(r, "error creating watchdog event source: %m");
1574
11b1dd8c 1575 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1576 if (r < 0)
1577 return log_error_errno(r, "error creating ctrl event source: %m");
1578
1579 /* This needs to be after the inotify and uevent handling, to make sure
1580 * that the ping is send back after fully processing the pending uevents
1581 * (including the synthetic ones we may create due to inotify events).
1582 */
1583 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1584 if (r < 0)
1585 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1586
1587 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1588 if (r < 0)
1589 return log_error_errno(r, "error creating inotify event source: %m");
1590
11b1dd8c 1591 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1592 if (r < 0)
1593 return log_error_errno(r, "error creating uevent event source: %m");
1594
1595 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1596 if (r < 0)
1597 return log_error_errno(r, "error creating worker event source: %m");
1598
1599 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1600 if (r < 0)
1601 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1602
11b1dd8c
TG
1603 *ret = manager;
1604 manager = NULL;
1605
86c3bece 1606 return 0;
c0c6806b
TG
1607}
1608
077fc5e2 1609static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1610 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1611 int r;
1612
1613 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1614 if (r < 0) {
1615 r = log_error_errno(r, "failed to allocate manager object: %m");
1616 goto exit;
1617 }
1618
1619 r = udev_rules_apply_static_dev_perms(manager->rules);
1620 if (r < 0)
1621 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1622
1623 (void) sd_notify(false,
1624 "READY=1\n"
1625 "STATUS=Processing...");
1626
1627 r = sd_event_loop(manager->event);
1628 if (r < 0) {
1629 log_error_errno(r, "event loop failed: %m");
1630 goto exit;
1631 }
1632
1633 sd_event_get_exit_code(manager->event, &r);
1634
1635exit:
1636 sd_notify(false,
1637 "STOPPING=1\n"
1638 "STATUS=Shutting down...");
1639 if (manager)
1640 udev_ctrl_cleanup(manager->ctrl);
1641 return r;
1642}
1643
1644int main(int argc, char *argv[]) {
c26d1879 1645 _cleanup_free_ char *cgroup = NULL;
b7f74dd4 1646 int r, fd_ctrl, fd_uevent;
bba7a484 1647
bba7a484
TG
1648 log_set_target(LOG_TARGET_AUTO);
1649 log_parse_environment();
1650 log_open();
1651
bba7a484
TG
1652 r = parse_argv(argc, argv);
1653 if (r <= 0)
1654 goto exit;
1655
614a823c
TG
1656 r = parse_proc_cmdline(parse_proc_cmdline_item);
1657 if (r < 0)
1658 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1659
78d3e041
KS
1660 if (arg_debug) {
1661 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1662 log_set_max_level(LOG_DEBUG);
78d3e041 1663 }
bba7a484 1664
912541b0 1665 if (getuid() != 0) {
6af5e6a4 1666 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1667 goto exit;
1668 }
1669
712cebf1
TG
1670 if (arg_children_max == 0) {
1671 cpu_set_t cpu_set;
ebc164ef 1672
712cebf1 1673 arg_children_max = 8;
d457ff83 1674
ece174c5 1675 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1676 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1677
712cebf1 1678 log_debug("set children_max to %u", arg_children_max);
d457ff83 1679 }
912541b0 1680
712cebf1
TG
1681 /* set umask before creating any file/directory */
1682 r = chdir("/");
1683 if (r < 0) {
1684 r = log_error_errno(errno, "could not change dir to /: %m");
1685 goto exit;
1686 }
194bbe33 1687
712cebf1 1688 umask(022);
912541b0 1689
712cebf1
TG
1690 r = mac_selinux_init("/dev");
1691 if (r < 0) {
1692 log_error_errno(r, "could not initialize labelling: %m");
1693 goto exit;
912541b0
KS
1694 }
1695
712cebf1
TG
1696 r = mkdir("/run/udev", 0755);
1697 if (r < 0 && errno != EEXIST) {
1698 r = log_error_errno(errno, "could not create /run/udev: %m");
1699 goto exit;
1700 }
1701
03cfe0d5 1702 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1703
c26d1879
TG
1704 if (getppid() == 1) {
1705 /* get our own cgroup, we regularly kill everything udev has left behind
1706 we only do this on systemd systems, and only if we are directly spawned
1707 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1708 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1709 if (r < 0) {
e859aa9e 1710 if (r == -ENOENT || r == -ENOEXEC)
11b9fb15
TG
1711 log_debug_errno(r, "did not find dedicated cgroup: %m");
1712 else
1713 log_warning_errno(r, "failed to get cgroup: %m");
1714 }
c26d1879
TG
1715 }
1716
b7f74dd4
TG
1717 r = listen_fds(&fd_ctrl, &fd_uevent);
1718 if (r < 0) {
1719 r = log_error_errno(r, "could not listen on fds: %m");
1720 goto exit;
1721 }
1722
bba7a484 1723 if (arg_daemonize) {
912541b0 1724 pid_t pid;
912541b0 1725
3cbb2057
TG
1726 log_info("starting version " VERSION);
1727
40e749b5
TG
1728 /* connect /dev/null to stdin, stdout, stderr */
1729 if (log_get_max_level() < LOG_DEBUG)
1730 (void) make_null_stdio();
1731
912541b0
KS
1732 pid = fork();
1733 switch (pid) {
1734 case 0:
1735 break;
1736 case -1:
6af5e6a4 1737 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1738 goto exit;
1739 default:
f53d1fcd
TG
1740 mac_selinux_finish();
1741 log_close();
1742 _exit(EXIT_SUCCESS);
912541b0
KS
1743 }
1744
1745 setsid();
1746
ad118bda 1747 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1748 }
912541b0 1749
077fc5e2 1750 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1751
53921bfa 1752exit:
cc56fafe 1753 mac_selinux_finish();
baa30fbc 1754 log_close();
6af5e6a4 1755 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1756}