]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
tree-wide: make parse_proc_cmdline() strip "rd." prefix automatically
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
7fafc032 1/*
1298001e 2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
55e9959b
KS
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
7fafc032 11 *
55e9959b
KS
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
7fafc032 16 *
55e9959b
KS
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
19 */
20
7fafc032 21#include <errno.h>
618234a5
LP
22#include <fcntl.h>
23#include <getopt.h>
24#include <signal.h>
25#include <stdbool.h>
26#include <stddef.h>
7fafc032
KS
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
618234a5 30#include <sys/epoll.h>
3ebdb81e 31#include <sys/file.h>
618234a5
LP
32#include <sys/inotify.h>
33#include <sys/ioctl.h>
34#include <sys/mount.h>
1e03b754 35#include <sys/prctl.h>
1e03b754 36#include <sys/signalfd.h>
618234a5 37#include <sys/socket.h>
dc117daa 38#include <sys/stat.h>
618234a5
LP
39#include <sys/time.h>
40#include <sys/wait.h>
41#include <unistd.h>
7fafc032 42
392ef7a2 43#include "sd-daemon.h"
693d371d 44#include "sd-event.h"
8314de1d 45
b5efdb8a 46#include "alloc-util.h"
194bbe33 47#include "cgroup-util.h"
618234a5 48#include "cpu-set-util.h"
5ba2dc25 49#include "dev-setup.h"
3ffd4af2 50#include "fd-util.h"
a5c32cff 51#include "fileio.h"
6482f626 52#include "formats-util.h"
f4f15635 53#include "fs-util.h"
a505965d 54#include "hashmap.h"
c004493c 55#include "io-util.h"
618234a5 56#include "netlink-util.h"
6bedfcbb 57#include "parse-util.h"
4e731273 58#include "proc-cmdline.h"
618234a5
LP
59#include "process-util.h"
60#include "selinux-util.h"
61#include "signal-util.h"
8f328d36 62#include "socket-util.h"
07630cea 63#include "string-util.h"
618234a5
LP
64#include "terminal-util.h"
65#include "udev-util.h"
66#include "udev.h"
ee104e11 67#include "user-util.h"
7fafc032 68
bba7a484
TG
69static bool arg_debug = false;
70static int arg_daemonize = false;
71static int arg_resolve_names = 1;
020328e1 72static unsigned arg_children_max;
bba7a484
TG
73static int arg_exec_delay;
74static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
75static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
76
77typedef struct Manager {
78 struct udev *udev;
693d371d 79 sd_event *event;
c0c6806b 80 Hashmap *workers;
ecb17862 81 struct udev_list_node events;
c26d1879 82 const char *cgroup;
cb49a4f2 83 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 84
ecb17862 85 struct udev_rules *rules;
c0c6806b
TG
86 struct udev_list properties;
87
88 struct udev_monitor *monitor;
89 struct udev_ctrl *ctrl;
90 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 91 int fd_inotify;
e237d8cb
TG
92 int worker_watch[2];
93
693d371d
TG
94 sd_event_source *ctrl_event;
95 sd_event_source *uevent_event;
96 sd_event_source *inotify_event;
97
7c4c7e89
TG
98 usec_t last_usec;
99
c0c6806b 100 bool stop_exec_queue:1;
c0c6806b
TG
101 bool exit:1;
102} Manager;
1e03b754 103
1e03b754 104enum event_state {
912541b0
KS
105 EVENT_UNDEF,
106 EVENT_QUEUED,
107 EVENT_RUNNING,
1e03b754
KS
108};
109
110struct event {
912541b0 111 struct udev_list_node node;
cb49a4f2 112 Manager *manager;
912541b0
KS
113 struct udev *udev;
114 struct udev_device *dev;
6969c349 115 struct udev_device *dev_kernel;
c6aa11f2 116 struct worker *worker;
912541b0 117 enum event_state state;
912541b0
KS
118 unsigned long long int delaying_seqnum;
119 unsigned long long int seqnum;
120 const char *devpath;
121 size_t devpath_len;
122 const char *devpath_old;
123 dev_t devnum;
912541b0 124 int ifindex;
ea6039a3 125 bool is_block;
693d371d
TG
126 sd_event_source *timeout_warning;
127 sd_event_source *timeout;
1e03b754
KS
128};
129
9ec6e95b 130static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 131 return container_of(node, struct event, node);
1e03b754
KS
132}
133
ecb17862 134static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 135
1e03b754 136enum worker_state {
912541b0
KS
137 WORKER_UNDEF,
138 WORKER_RUNNING,
139 WORKER_IDLE,
140 WORKER_KILLED,
1e03b754
KS
141};
142
143struct worker {
c0c6806b 144 Manager *manager;
912541b0 145 struct udev_list_node node;
912541b0
KS
146 int refcount;
147 pid_t pid;
148 struct udev_monitor *monitor;
149 enum worker_state state;
150 struct event *event;
1e03b754
KS
151};
152
153/* passed from worker to main process */
154struct worker_message {
1e03b754
KS
155};
156
c6aa11f2 157static void event_free(struct event *event) {
cb49a4f2
TG
158 int r;
159
c6aa11f2
TG
160 if (!event)
161 return;
162
912541b0 163 udev_list_node_remove(&event->node);
912541b0 164 udev_device_unref(event->dev);
6969c349 165 udev_device_unref(event->dev_kernel);
c6aa11f2 166
693d371d
TG
167 sd_event_source_unref(event->timeout_warning);
168 sd_event_source_unref(event->timeout);
169
c6aa11f2
TG
170 if (event->worker)
171 event->worker->event = NULL;
172
cb49a4f2
TG
173 assert(event->manager);
174
175 if (udev_list_node_is_empty(&event->manager->events)) {
176 /* only clean up the queue from the process that created it */
177 if (event->manager->pid == getpid()) {
178 r = unlink("/run/udev/queue");
179 if (r < 0)
180 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
181 }
182 }
183
912541b0 184 free(event);
aa8734ff 185}
7a770250 186
c6aa11f2
TG
187static void worker_free(struct worker *worker) {
188 if (!worker)
189 return;
bc113de9 190
c0c6806b
TG
191 assert(worker->manager);
192
4a0b58c4 193 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 194 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
195 event_free(worker->event);
196
c6aa11f2 197 free(worker);
ff2c503d
KS
198}
199
c0c6806b 200static void manager_workers_free(Manager *manager) {
a505965d
TG
201 struct worker *worker;
202 Iterator i;
ff2c503d 203
c0c6806b
TG
204 assert(manager);
205
206 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 207 worker_free(worker);
a505965d 208
c0c6806b 209 manager->workers = hashmap_free(manager->workers);
fc465079
KS
210}
211
c0c6806b 212static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
213 _cleanup_free_ struct worker *worker = NULL;
214 int r;
3a19b32a
TG
215
216 assert(ret);
c0c6806b 217 assert(manager);
3a19b32a
TG
218 assert(worker_monitor);
219 assert(pid > 1);
220
221 worker = new0(struct worker, 1);
222 if (!worker)
223 return -ENOMEM;
224
39c19cf1 225 worker->refcount = 1;
c0c6806b 226 worker->manager = manager;
3a19b32a
TG
227 /* close monitor, but keep address around */
228 udev_monitor_disconnect(worker_monitor);
229 worker->monitor = udev_monitor_ref(worker_monitor);
230 worker->pid = pid;
a505965d 231
c0c6806b 232 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
233 if (r < 0)
234 return r;
235
4a0b58c4 236 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
237 if (r < 0)
238 return r;
239
3a19b32a 240 *ret = worker;
a505965d 241 worker = NULL;
3a19b32a
TG
242
243 return 0;
244}
245
4fa4d885
TG
246static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
247 struct event *event = userdata;
248
249 assert(event);
250 assert(event->worker);
251
252 kill_and_sigcont(event->worker->pid, SIGKILL);
253 event->worker->state = WORKER_KILLED;
254
255 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
256
257 return 1;
258}
259
260static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
261 struct event *event = userdata;
262
263 assert(event);
264
265 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
266
267 return 1;
268}
269
39c19cf1 270static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
271 sd_event *e;
272 uint64_t usec;
693d371d 273
c6aa11f2 274 assert(worker);
693d371d 275 assert(worker->manager);
c6aa11f2
TG
276 assert(event);
277 assert(!event->worker);
278 assert(!worker->event);
279
39c19cf1 280 worker->state = WORKER_RUNNING;
39c19cf1
TG
281 worker->event = event;
282 event->state = EVENT_RUNNING;
c6aa11f2 283 event->worker = worker;
693d371d
TG
284
285 e = worker->manager->event;
286
38a03f06 287 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
288
289 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
290 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
291
292 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
293 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
294}
295
e237d8cb
TG
296static void manager_free(Manager *manager) {
297 if (!manager)
298 return;
299
b2d21d93
TG
300 udev_builtin_exit(manager->udev);
301
693d371d
TG
302 sd_event_source_unref(manager->ctrl_event);
303 sd_event_source_unref(manager->uevent_event);
304 sd_event_source_unref(manager->inotify_event);
305
e237d8cb 306 udev_unref(manager->udev);
693d371d 307 sd_event_unref(manager->event);
e237d8cb
TG
308 manager_workers_free(manager);
309 event_queue_cleanup(manager, EVENT_UNDEF);
310
311 udev_monitor_unref(manager->monitor);
312 udev_ctrl_unref(manager->ctrl);
313 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
314
315 udev_list_cleanup(&manager->properties);
316 udev_rules_unref(manager->rules);
e237d8cb 317
e237d8cb
TG
318 safe_close(manager->fd_inotify);
319 safe_close_pair(manager->worker_watch);
320
321 free(manager);
322}
323
324DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
325
9a73bd7c
TG
326static int worker_send_message(int fd) {
327 struct worker_message message = {};
328
329 return loop_write(fd, &message, sizeof(message), false);
330}
331
c0c6806b 332static void worker_spawn(Manager *manager, struct event *event) {
912541b0 333 struct udev *udev = event->udev;
3a19b32a 334 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 335 pid_t pid;
b6aab8ef 336 int r = 0;
912541b0
KS
337
338 /* listen for new events */
339 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
340 if (worker_monitor == NULL)
341 return;
342 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 343 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
344 r = udev_monitor_enable_receiving(worker_monitor);
345 if (r < 0)
346 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 347
912541b0
KS
348 pid = fork();
349 switch (pid) {
350 case 0: {
351 struct udev_device *dev = NULL;
4afd3348 352 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 353 int fd_monitor;
e237d8cb 354 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
355 struct epoll_event ep_signal = { .events = EPOLLIN };
356 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 357 sigset_t mask;
912541b0 358
43095991 359 /* take initial device from queue */
912541b0
KS
360 dev = event->dev;
361 event->dev = NULL;
362
39fd2ca1
TG
363 unsetenv("NOTIFY_SOCKET");
364
c0c6806b 365 manager_workers_free(manager);
ecb17862 366 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 367
e237d8cb 368 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 369 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 370 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 371 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 372
693d371d
TG
373 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
374 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
375 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
376
377 manager->event = sd_event_unref(manager->event);
378
912541b0
KS
379 sigfillset(&mask);
380 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
381 if (fd_signal < 0) {
6af5e6a4 382 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
383 goto out;
384 }
2dd9f98d
TG
385 ep_signal.data.fd = fd_signal;
386
387 fd_monitor = udev_monitor_get_fd(worker_monitor);
388 ep_monitor.data.fd = fd_monitor;
912541b0
KS
389
390 fd_ep = epoll_create1(EPOLL_CLOEXEC);
391 if (fd_ep < 0) {
6af5e6a4 392 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
393 goto out;
394 }
395
912541b0
KS
396 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
397 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 398 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
399 goto out;
400 }
401
045e00cf
ZJS
402 /* Request TERM signal if parent exits.
403 Ignore error, not much we can do in that case. */
404 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 405
045e00cf 406 /* Reset OOM score, we only protect the main daemon. */
ad118bda 407 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 408
912541b0
KS
409 for (;;) {
410 struct udev_event *udev_event;
6af5e6a4 411 int fd_lock = -1;
912541b0 412
3b64e4d4
TG
413 assert(dev);
414
9f6445e3 415 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
416 udev_event = udev_event_new(dev);
417 if (udev_event == NULL) {
6af5e6a4 418 r = -ENOMEM;
912541b0
KS
419 goto out;
420 }
421
bba7a484
TG
422 if (arg_exec_delay > 0)
423 udev_event->exec_delay = arg_exec_delay;
912541b0 424
3ebdb81e 425 /*
2e5b17d0 426 * Take a shared lock on the device node; this establishes
3ebdb81e 427 * a concept of device "ownership" to serialize device
2e5b17d0 428 * access. External processes holding an exclusive lock will
3ebdb81e 429 * cause udev to skip the event handling; in the case udev
2e5b17d0 430 * acquired the lock, the external process can block until
3ebdb81e
KS
431 * udev has finished its event handling.
432 */
2e5b17d0
KS
433 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
434 streq_ptr("block", udev_device_get_subsystem(dev)) &&
435 !startswith(udev_device_get_sysname(dev), "dm-") &&
436 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
437 struct udev_device *d = dev;
438
439 if (streq_ptr("partition", udev_device_get_devtype(d)))
440 d = udev_device_get_parent(d);
441
442 if (d) {
443 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
444 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 445 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 446 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
447 goto skip;
448 }
449 }
450 }
451
4c83d994
TG
452 /* needed for renaming netifs */
453 udev_event->rtnl = rtnl;
454
912541b0 455 /* apply rules, create node, symlinks */
adeba500
KS
456 udev_event_execute_rules(udev_event,
457 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 458 &manager->properties,
8314de1d 459 manager->rules);
adeba500
KS
460
461 udev_event_execute_run(udev_event,
8314de1d 462 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 463
523c620b
TG
464 if (udev_event->rtnl)
465 /* in case rtnl was initialized */
1c4baffc 466 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 467
912541b0 468 /* apply/restore inotify watch */
bf9bead1 469 if (udev_event->inotify_watch) {
912541b0
KS
470 udev_watch_begin(udev, dev);
471 udev_device_update_db(dev);
472 }
473
3d06f418 474 safe_close(fd_lock);
3ebdb81e 475
912541b0
KS
476 /* send processed event back to libudev listeners */
477 udev_monitor_send_device(worker_monitor, NULL, dev);
478
3ebdb81e 479skip:
4914cb2d 480 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 481
912541b0 482 /* send udevd the result of the event execution */
e237d8cb 483 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 484 if (r < 0)
9a73bd7c 485 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 486 udev_device_get_seqnum(dev));
912541b0
KS
487
488 udev_device_unref(dev);
489 dev = NULL;
490
73814ca2 491 udev_event_unref(udev_event);
47e737dc 492
912541b0
KS
493 /* wait for more device messages from main udevd, or term signal */
494 while (dev == NULL) {
495 struct epoll_event ev[4];
496 int fdcount;
497 int i;
498
8fef0ff2 499 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
500 if (fdcount < 0) {
501 if (errno == EINTR)
502 continue;
6af5e6a4 503 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
504 goto out;
505 }
506
507 for (i = 0; i < fdcount; i++) {
508 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
509 dev = udev_monitor_receive_device(worker_monitor);
510 break;
511 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
512 struct signalfd_siginfo fdsi;
513 ssize_t size;
514
515 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
516 if (size != sizeof(struct signalfd_siginfo))
517 continue;
518 switch (fdsi.ssi_signo) {
519 case SIGTERM:
520 goto out;
521 }
522 }
523 }
524 }
525 }
82063a88 526out:
912541b0 527 udev_device_unref(dev);
e237d8cb 528 manager_free(manager);
baa30fbc 529 log_close();
8b46c3fc 530 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
531 }
532 case -1:
912541b0 533 event->state = EVENT_QUEUED;
56f64d95 534 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
535 break;
536 default:
e03c7cc2
TG
537 {
538 struct worker *worker;
539
c0c6806b 540 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 541 if (r < 0)
e03c7cc2 542 return;
e03c7cc2 543
39c19cf1
TG
544 worker_attach_event(worker, event);
545
1fa2f38f 546 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
547 break;
548 }
e03c7cc2 549 }
7fafc032
KS
550}
551
c0c6806b 552static void event_run(Manager *manager, struct event *event) {
a505965d
TG
553 struct worker *worker;
554 Iterator i;
912541b0 555
c0c6806b
TG
556 assert(manager);
557 assert(event);
558
559 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
560 ssize_t count;
561
562 if (worker->state != WORKER_IDLE)
563 continue;
564
c0c6806b 565 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 566 if (count < 0) {
1fa2f38f
ZJS
567 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
568 worker->pid, count);
912541b0
KS
569 kill(worker->pid, SIGKILL);
570 worker->state = WORKER_KILLED;
571 continue;
572 }
39c19cf1 573 worker_attach_event(worker, event);
912541b0
KS
574 return;
575 }
576
c0c6806b 577 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 578 if (arg_children_max > 1)
c0c6806b 579 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
580 return;
581 }
582
583 /* start new worker and pass initial device */
c0c6806b 584 worker_spawn(manager, event);
1e03b754
KS
585}
586
ecb17862 587static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 588 struct event *event;
cb49a4f2 589 int r;
912541b0 590
ecb17862
TG
591 assert(manager);
592 assert(dev);
593
040e6896
TG
594 /* only one process can add events to the queue */
595 if (manager->pid == 0)
596 manager->pid = getpid();
597
cb49a4f2
TG
598 assert(manager->pid == getpid());
599
955d98c9 600 event = new0(struct event, 1);
cb49a4f2
TG
601 if (!event)
602 return -ENOMEM;
912541b0
KS
603
604 event->udev = udev_device_get_udev(dev);
cb49a4f2 605 event->manager = manager;
912541b0 606 event->dev = dev;
6969c349
TG
607 event->dev_kernel = udev_device_shallow_clone(dev);
608 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
609 event->seqnum = udev_device_get_seqnum(dev);
610 event->devpath = udev_device_get_devpath(dev);
611 event->devpath_len = strlen(event->devpath);
612 event->devpath_old = udev_device_get_devpath_old(dev);
613 event->devnum = udev_device_get_devnum(dev);
ea6039a3 614 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
615 event->ifindex = udev_device_get_ifindex(dev);
616
9f6445e3 617 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
618 udev_device_get_action(dev), udev_device_get_subsystem(dev));
619
620 event->state = EVENT_QUEUED;
cb49a4f2
TG
621
622 if (udev_list_node_is_empty(&manager->events)) {
623 r = touch("/run/udev/queue");
624 if (r < 0)
625 log_warning_errno(r, "could not touch /run/udev/queue: %m");
626 }
627
ecb17862 628 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 629
912541b0 630 return 0;
fc465079
KS
631}
632
c0c6806b 633static void manager_kill_workers(Manager *manager) {
a505965d
TG
634 struct worker *worker;
635 Iterator i;
1e03b754 636
c0c6806b
TG
637 assert(manager);
638
639 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
640 if (worker->state == WORKER_KILLED)
641 continue;
1e03b754 642
912541b0
KS
643 worker->state = WORKER_KILLED;
644 kill(worker->pid, SIGTERM);
645 }
1e03b754
KS
646}
647
e3196993 648/* lookup event for identical, parent, child device */
ecb17862 649static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
650 struct udev_list_node *loop;
651 size_t common;
652
653 /* check if queue contains events we depend on */
ecb17862 654 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
655 struct event *loop_event = node_to_event(loop);
656
657 /* we already found a later event, earlier can not block us, no need to check again */
658 if (loop_event->seqnum < event->delaying_seqnum)
659 continue;
660
661 /* event we checked earlier still exists, no need to check again */
662 if (loop_event->seqnum == event->delaying_seqnum)
663 return true;
664
665 /* found ourself, no later event can block us */
666 if (loop_event->seqnum >= event->seqnum)
667 break;
668
669 /* check major/minor */
670 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
671 return true;
672
673 /* check network device ifindex */
674 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
675 return true;
676
677 /* check our old name */
090be865 678 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
679 event->delaying_seqnum = loop_event->seqnum;
680 return true;
681 }
682
683 /* compare devpath */
684 common = MIN(loop_event->devpath_len, event->devpath_len);
685
686 /* one devpath is contained in the other? */
687 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
688 continue;
689
690 /* identical device event found */
691 if (loop_event->devpath_len == event->devpath_len) {
692 /* devices names might have changed/swapped in the meantime */
693 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
694 continue;
695 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
696 continue;
697 event->delaying_seqnum = loop_event->seqnum;
698 return true;
699 }
700
701 /* parent device event found */
702 if (event->devpath[common] == '/') {
703 event->delaying_seqnum = loop_event->seqnum;
704 return true;
705 }
706
707 /* child device event found */
708 if (loop_event->devpath[common] == '/') {
709 event->delaying_seqnum = loop_event->seqnum;
710 return true;
711 }
712
713 /* no matching device */
714 continue;
715 }
716
717 return false;
7fafc032
KS
718}
719
693d371d
TG
720static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
721 Manager *manager = userdata;
722
723 assert(manager);
724
725 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
726
727 sd_event_exit(manager->event, -ETIMEDOUT);
728
729 return 1;
730}
731
62d43dac 732static void manager_exit(Manager *manager) {
693d371d
TG
733 uint64_t usec;
734 int r;
62d43dac
TG
735
736 assert(manager);
737
738 manager->exit = true;
739
b79aacbf
TG
740 sd_notify(false,
741 "STOPPING=1\n"
742 "STATUS=Starting shutdown...");
743
62d43dac 744 /* close sources of new events and discard buffered events */
693d371d 745 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 746 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 747
693d371d 748 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 749 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 750
693d371d 751 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 752 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
753
754 /* discard queued events and kill workers */
755 event_queue_cleanup(manager, EVENT_QUEUED);
756 manager_kill_workers(manager);
693d371d 757
38a03f06 758 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
759
760 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
761 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
762 if (r < 0)
763 return;
62d43dac
TG
764}
765
766/* reload requested, HUP signal received, rules changed, builtin changed */
767static void manager_reload(Manager *manager) {
768
769 assert(manager);
770
b79aacbf
TG
771 sd_notify(false,
772 "RELOADING=1\n"
773 "STATUS=Flushing configuration...");
774
62d43dac
TG
775 manager_kill_workers(manager);
776 manager->rules = udev_rules_unref(manager->rules);
777 udev_builtin_exit(manager->udev);
b79aacbf 778
1ef72b55
MS
779 sd_notifyf(false,
780 "READY=1\n"
781 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
782}
783
c0c6806b 784static void event_queue_start(Manager *manager) {
912541b0 785 struct udev_list_node *loop;
693d371d 786 usec_t usec;
8ab44e3f 787
c0c6806b
TG
788 assert(manager);
789
7c4c7e89
TG
790 if (udev_list_node_is_empty(&manager->events) ||
791 manager->exit || manager->stop_exec_queue)
792 return;
793
38a03f06
LP
794 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
795 /* check for changed config, every 3 seconds at most */
796 if (manager->last_usec == 0 ||
797 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
798 if (udev_rules_check_timestamp(manager->rules) ||
799 udev_builtin_validate(manager->udev))
800 manager_reload(manager);
693d371d 801
38a03f06 802 manager->last_usec = usec;
7c4c7e89
TG
803 }
804
805 udev_builtin_init(manager->udev);
806
807 if (!manager->rules) {
808 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
809 if (!manager->rules)
810 return;
811 }
812
ecb17862 813 udev_list_node_foreach(loop, &manager->events) {
912541b0 814 struct event *event = node_to_event(loop);
0bc74ea7 815
912541b0
KS
816 if (event->state != EVENT_QUEUED)
817 continue;
0bc74ea7 818
912541b0 819 /* do not start event if parent or child event is still running */
ecb17862 820 if (is_devpath_busy(manager, event))
912541b0 821 continue;
fc465079 822
c0c6806b 823 event_run(manager, event);
912541b0 824 }
1e03b754
KS
825}
826
ecb17862 827static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 828 struct udev_list_node *loop, *tmp;
ff2c503d 829
ecb17862 830 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 831 struct event *event = node_to_event(loop);
ff2c503d 832
912541b0
KS
833 if (match_type != EVENT_UNDEF && match_type != event->state)
834 continue;
ff2c503d 835
c6aa11f2 836 event_free(event);
912541b0 837 }
ff2c503d
KS
838}
839
e82e8fa5 840static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
841 Manager *manager = userdata;
842
843 assert(manager);
844
912541b0
KS
845 for (;;) {
846 struct worker_message msg;
979558f3
TG
847 struct iovec iovec = {
848 .iov_base = &msg,
849 .iov_len = sizeof(msg),
850 };
851 union {
852 struct cmsghdr cmsghdr;
853 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
854 } control = {};
855 struct msghdr msghdr = {
856 .msg_iov = &iovec,
857 .msg_iovlen = 1,
858 .msg_control = &control,
859 .msg_controllen = sizeof(control),
860 };
861 struct cmsghdr *cmsg;
912541b0 862 ssize_t size;
979558f3 863 struct ucred *ucred = NULL;
a505965d 864 struct worker *worker;
912541b0 865
e82e8fa5 866 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 867 if (size < 0) {
738a7907
TG
868 if (errno == EINTR)
869 continue;
870 else if (errno == EAGAIN)
871 /* nothing more to read */
872 break;
979558f3 873
e82e8fa5 874 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
875 } else if (size != sizeof(struct worker_message)) {
876 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 877 continue;
979558f3
TG
878 }
879
2a1288ff 880 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
881 if (cmsg->cmsg_level == SOL_SOCKET &&
882 cmsg->cmsg_type == SCM_CREDENTIALS &&
883 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
884 ucred = (struct ucred*) CMSG_DATA(cmsg);
885 }
886
887 if (!ucred || ucred->pid <= 0) {
888 log_warning_errno(EIO, "ignoring worker message without valid PID");
889 continue;
890 }
912541b0
KS
891
892 /* lookup worker who sent the signal */
4a0b58c4 893 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
894 if (!worker) {
895 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
896 continue;
912541b0 897 }
c0bbfd72 898
a505965d
TG
899 if (worker->state != WORKER_KILLED)
900 worker->state = WORKER_IDLE;
901
902 /* worker returned */
903 event_free(worker->event);
912541b0 904 }
e82e8fa5 905
8302fe5a
TG
906 /* we have free workers, try to schedule events */
907 event_queue_start(manager);
908
e82e8fa5
TG
909 return 1;
910}
911
912static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 913 Manager *manager = userdata;
e82e8fa5
TG
914 struct udev_device *dev;
915 int r;
916
c0c6806b 917 assert(manager);
e82e8fa5 918
c0c6806b 919 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
920 if (dev) {
921 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 922 r = event_queue_insert(manager, dev);
e82e8fa5
TG
923 if (r < 0)
924 udev_device_unref(dev);
8302fe5a
TG
925 else
926 /* we have fresh events, try to schedule them */
927 event_queue_start(manager);
e82e8fa5
TG
928 }
929
930 return 1;
88f4b648
KS
931}
932
3b47c739 933/* receive the udevd message from userspace */
e82e8fa5 934static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 935 Manager *manager = userdata;
e4f66b77
TG
936 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
937 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
938 const char *str;
939 int i;
940
c0c6806b 941 assert(manager);
e4f66b77 942
c0c6806b 943 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 944 if (!ctrl_conn)
e82e8fa5 945 return 1;
912541b0
KS
946
947 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 948 if (!ctrl_msg)
e82e8fa5 949 return 1;
912541b0
KS
950
951 i = udev_ctrl_get_set_log_level(ctrl_msg);
952 if (i >= 0) {
ed14edc0 953 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 954 log_set_max_level(i);
c0c6806b 955 manager_kill_workers(manager);
912541b0
KS
956 }
957
958 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 959 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 960 manager->stop_exec_queue = true;
912541b0
KS
961 }
962
963 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 964 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 965 manager->stop_exec_queue = false;
8302fe5a 966 event_queue_start(manager);
912541b0
KS
967 }
968
969 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 970 log_debug("udevd message (RELOAD) received");
62d43dac 971 manager_reload(manager);
912541b0
KS
972 }
973
974 str = udev_ctrl_get_set_env(ctrl_msg);
975 if (str != NULL) {
c0c6806b 976 _cleanup_free_ char *key = NULL;
912541b0
KS
977
978 key = strdup(str);
c0c6806b 979 if (key) {
912541b0
KS
980 char *val;
981
982 val = strchr(key, '=');
983 if (val != NULL) {
984 val[0] = '\0';
985 val = &val[1];
986 if (val[0] == '\0') {
9f6445e3 987 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 988 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 989 } else {
9f6445e3 990 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 991 udev_list_entry_add(&manager->properties, key, val);
912541b0 992 }
c0c6806b 993 } else
9f6445e3 994 log_error("wrong key format '%s'", key);
912541b0 995 }
c0c6806b 996 manager_kill_workers(manager);
912541b0
KS
997 }
998
999 i = udev_ctrl_get_set_children_max(ctrl_msg);
1000 if (i >= 0) {
9f6445e3 1001 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1002 arg_children_max = i;
1ef72b55
MS
1003
1004 (void) sd_notifyf(false,
1005 "READY=1\n"
1006 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1007 }
1008
cb49a4f2 1009 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1010 log_debug("udevd message (SYNC) received");
912541b0
KS
1011
1012 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1013 log_debug("udevd message (EXIT) received");
62d43dac 1014 manager_exit(manager);
c0c6806b
TG
1015 /* keep reference to block the client until we exit
1016 TODO: deal with several blocking exit requests */
1017 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1018 }
e4f66b77 1019
e82e8fa5 1020 return 1;
88f4b648 1021}
4a231017 1022
f3a740a5 1023static int synthesize_change(struct udev_device *dev) {
edd32000 1024 char filename[UTIL_PATH_SIZE];
f3a740a5 1025 int r;
edd32000 1026
f3a740a5 1027 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1028 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1029 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1030 bool part_table_read = false;
1031 bool has_partitions = false;
ede34445 1032 int fd;
f3a740a5
KS
1033 struct udev *udev = udev_device_get_udev(dev);
1034 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1035 struct udev_list_entry *item;
1036
ede34445 1037 /*
e9fc29f4
KS
1038 * Try to re-read the partition table. This only succeeds if
1039 * none of the devices is busy. The kernel returns 0 if no
1040 * partition table is found, and we will not get an event for
1041 * the disk.
ede34445 1042 */
02ba8fb3 1043 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1044 if (fd >= 0) {
02ba8fb3
KS
1045 r = flock(fd, LOCK_EX|LOCK_NB);
1046 if (r >= 0)
1047 r = ioctl(fd, BLKRRPART, 0);
1048
ede34445
KS
1049 close(fd);
1050 if (r >= 0)
e9fc29f4 1051 part_table_read = true;
ede34445
KS
1052 }
1053
e9fc29f4 1054 /* search for partitions */
f3a740a5
KS
1055 e = udev_enumerate_new(udev);
1056 if (!e)
1057 return -ENOMEM;
1058
1059 r = udev_enumerate_add_match_parent(e, dev);
1060 if (r < 0)
1061 return r;
1062
1063 r = udev_enumerate_add_match_subsystem(e, "block");
1064 if (r < 0)
1065 return r;
1066
1067 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1068 if (r < 0)
1069 return r;
e9fc29f4
KS
1070
1071 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1072 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1073
1074 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1075 if (!d)
1076 continue;
1077
1078 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1079 continue;
1080
1081 has_partitions = true;
1082 break;
1083 }
1084
1085 /*
1086 * We have partitions and re-read the table, the kernel already sent
1087 * out a "change" event for the disk, and "remove/add" for all
1088 * partitions.
1089 */
1090 if (part_table_read && has_partitions)
1091 return 0;
1092
1093 /*
1094 * We have partitions but re-reading the partition table did not
1095 * work, synthesize "change" for the disk and all partitions.
1096 */
1097 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1098 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1099 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1100
f3a740a5
KS
1101 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1102 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1103
1104 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1105 if (!d)
1106 continue;
1107
1108 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1109 continue;
1110
1111 log_debug("device %s closed, synthesising partition '%s' 'change'",
1112 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1113 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1114 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1115 }
ede34445
KS
1116
1117 return 0;
f3a740a5
KS
1118 }
1119
ede34445
KS
1120 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1121 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1122 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1123
f3a740a5 1124 return 0;
edd32000
KS
1125}
1126
e82e8fa5 1127static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1128 Manager *manager = userdata;
0254e944 1129 union inotify_event_buffer buffer;
f7c1ad4f
LP
1130 struct inotify_event *e;
1131 ssize_t l;
912541b0 1132
c0c6806b 1133 assert(manager);
e82e8fa5
TG
1134
1135 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1136 if (l < 0) {
1137 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1138 return 1;
912541b0 1139
f7c1ad4f 1140 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1141 }
1142
f7c1ad4f 1143 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1144 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1145
c0c6806b 1146 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1147 if (!dev)
1148 continue;
912541b0 1149
f7c1ad4f 1150 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1151 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1152 synthesize_change(dev);
a8389097
TG
1153
1154 /* settle might be waiting on us to determine the queue
1155 * state. If we just handled an inotify event, we might have
1156 * generated a "change" event, but we won't have queued up
1157 * the resultant uevent yet. Do that.
1158 */
c0c6806b 1159 on_uevent(NULL, -1, 0, manager);
a8389097 1160 } else if (e->mask & IN_IGNORED)
c0c6806b 1161 udev_watch_end(manager->udev, dev);
912541b0
KS
1162 }
1163
e82e8fa5 1164 return 1;
bd284db1
SJR
1165}
1166
0561329d 1167static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1168 Manager *manager = userdata;
1169
1170 assert(manager);
1171
62d43dac 1172 manager_exit(manager);
912541b0 1173
e82e8fa5
TG
1174 return 1;
1175}
912541b0 1176
0561329d 1177static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1178 Manager *manager = userdata;
1179
1180 assert(manager);
1181
62d43dac 1182 manager_reload(manager);
912541b0 1183
e82e8fa5
TG
1184 return 1;
1185}
912541b0 1186
e82e8fa5 1187static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1188 Manager *manager = userdata;
1189
1190 assert(manager);
1191
e82e8fa5
TG
1192 for (;;) {
1193 pid_t pid;
1194 int status;
1195 struct worker *worker;
d1317d02 1196
e82e8fa5
TG
1197 pid = waitpid(-1, &status, WNOHANG);
1198 if (pid <= 0)
f29328d6 1199 break;
e82e8fa5 1200
4a0b58c4 1201 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1202 if (!worker) {
1203 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1204 continue;
912541b0 1205 }
e82e8fa5
TG
1206
1207 if (WIFEXITED(status)) {
1208 if (WEXITSTATUS(status) == 0)
1209 log_debug("worker ["PID_FMT"] exited", pid);
1210 else
1211 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1212 } else if (WIFSIGNALED(status)) {
1213 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1214 } else if (WIFSTOPPED(status)) {
1215 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1216 continue;
e82e8fa5
TG
1217 } else if (WIFCONTINUED(status)) {
1218 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1219 continue;
e82e8fa5
TG
1220 } else
1221 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1222
1223 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1224 if (worker->event) {
1225 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1226 /* delete state from disk */
1227 udev_device_delete_db(worker->event->dev);
1228 udev_device_tag_index(worker->event->dev, NULL, false);
1229 /* forward kernel event without amending it */
c0c6806b 1230 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1231 }
1232 }
1233
1234 worker_free(worker);
912541b0 1235 }
e82e8fa5 1236
8302fe5a
TG
1237 /* we can start new workers, try to schedule events */
1238 event_queue_start(manager);
1239
e82e8fa5 1240 return 1;
f27125f9 1241}
1242
693d371d
TG
1243static int on_post(sd_event_source *s, void *userdata) {
1244 Manager *manager = userdata;
1245 int r;
1246
1247 assert(manager);
1248
1249 if (udev_list_node_is_empty(&manager->events)) {
1250 /* no pending events */
1251 if (!hashmap_isempty(manager->workers)) {
1252 /* there are idle workers */
1253 log_debug("cleanup idle workers");
1254 manager_kill_workers(manager);
1255 } else {
1256 /* we are idle */
1257 if (manager->exit) {
1258 r = sd_event_exit(manager->event, 0);
1259 if (r < 0)
1260 return r;
1261 } else if (manager->cgroup)
1262 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1263 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1264 }
1265 }
1266
1267 return 1;
1268}
1269
fcff1e72 1270static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1271 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1272 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1273 int fd, n, r;
912541b0 1274
fcff1e72
TG
1275 assert(rctrl);
1276 assert(rnetlink);
1277
912541b0 1278 n = sd_listen_fds(true);
fcff1e72
TG
1279 if (n < 0)
1280 return n;
912541b0
KS
1281
1282 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1283 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1284 if (ctrl_fd >= 0)
1285 return -EINVAL;
1286 ctrl_fd = fd;
912541b0
KS
1287 continue;
1288 }
1289
1290 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1291 if (netlink_fd >= 0)
1292 return -EINVAL;
1293 netlink_fd = fd;
912541b0
KS
1294 continue;
1295 }
1296
fcff1e72 1297 return -EINVAL;
912541b0
KS
1298 }
1299
f59118ec
TG
1300 if (ctrl_fd < 0) {
1301 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1302
1303 udev = udev_new();
1304 if (!udev)
1305 return -ENOMEM;
1306
1307 ctrl = udev_ctrl_new(udev);
1308 if (!ctrl)
1309 return log_error_errno(EINVAL, "error initializing udev control socket");
1310
1311 r = udev_ctrl_enable_receiving(ctrl);
1312 if (r < 0)
1313 return log_error_errno(EINVAL, "error binding udev control socket");
1314
1315 fd = udev_ctrl_get_fd(ctrl);
1316 if (fd < 0)
1317 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1318
f59118ec
TG
1319 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1320 if (ctrl_fd < 0)
1321 return log_error_errno(errno, "could not dup ctrl fd: %m");
1322 }
1323
1324 if (netlink_fd < 0) {
1325 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1326
1327 if (!udev) {
1328 udev = udev_new();
1329 if (!udev)
1330 return -ENOMEM;
1331 }
1332
1333 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1334 if (!monitor)
1335 return log_error_errno(EINVAL, "error initializing netlink socket");
1336
1337 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1338
1339 r = udev_monitor_enable_receiving(monitor);
1340 if (r < 0)
1341 return log_error_errno(EINVAL, "error binding netlink socket");
1342
1343 fd = udev_monitor_get_fd(monitor);
1344 if (fd < 0)
1345 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1346
1347 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1348 if (ctrl_fd < 0)
1349 return log_error_errno(errno, "could not dup netlink fd: %m");
1350 }
fcff1e72
TG
1351
1352 *rctrl = ctrl_fd;
1353 *rnetlink = netlink_fd;
912541b0 1354
912541b0 1355 return 0;
7459bcdc
KS
1356}
1357
e6f86cac 1358/*
3f85ef0f 1359 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1360 * udev.log-priority=<level> syslog priority
1361 * udev.children-max=<number of workers> events are fully serialized if set to 1
1362 * udev.exec-delay=<number of seconds> delay execution of every executed program
1363 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1364 */
96287a49 1365static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1366 int r = 0;
e6f86cac 1367
614a823c 1368 assert(key);
e6f86cac 1369
614a823c
TG
1370 if (!value)
1371 return 0;
e6f86cac 1372
92e72467
ZJS
1373 if (streq(key, "udev.log-priority") && value) {
1374 r = util_log_priority(value);
1375 if (r >= 0)
1376 log_set_max_level(r);
1377 } else if (streq(key, "udev.event-timeout") && value) {
1378 r = safe_atou64(value, &arg_event_timeout_usec);
1379 if (r >= 0) {
1380 arg_event_timeout_usec *= USEC_PER_SEC;
1381 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1382 }
1383 } else if (streq(key, "udev.children-max") && value)
020328e1 1384 r = safe_atou(value, &arg_children_max);
92e72467 1385 else if (streq(key, "udev.exec-delay") && value)
614a823c 1386 r = safe_atoi(value, &arg_exec_delay);
92e72467
ZJS
1387 else if (startswith(key, "udev."))
1388 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1389
92e72467
ZJS
1390 if (r < 0)
1391 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
614a823c 1392 return 0;
e6f86cac
KS
1393}
1394
ed216e1f
TG
1395static void help(void) {
1396 printf("%s [OPTIONS...]\n\n"
1397 "Manages devices.\n\n"
5ac0162c
LP
1398 " -h --help Print this message\n"
1399 " --version Print version of the program\n"
1400 " --daemon Detach and run in the background\n"
1401 " --debug Enable debug output\n"
1402 " --children-max=INT Set maximum number of workers\n"
1403 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1404 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1405 " --resolve-names=early|late|never\n"
1406 " When to resolve users and groups\n"
ed216e1f
TG
1407 , program_invocation_short_name);
1408}
1409
bba7a484 1410static int parse_argv(int argc, char *argv[]) {
912541b0 1411 static const struct option options[] = {
bba7a484
TG
1412 { "daemon", no_argument, NULL, 'd' },
1413 { "debug", no_argument, NULL, 'D' },
1414 { "children-max", required_argument, NULL, 'c' },
1415 { "exec-delay", required_argument, NULL, 'e' },
1416 { "event-timeout", required_argument, NULL, 't' },
1417 { "resolve-names", required_argument, NULL, 'N' },
1418 { "help", no_argument, NULL, 'h' },
1419 { "version", no_argument, NULL, 'V' },
912541b0
KS
1420 {}
1421 };
689a97f5 1422
bba7a484 1423 int c;
689a97f5 1424
bba7a484
TG
1425 assert(argc >= 0);
1426 assert(argv);
912541b0 1427
e14b6f21 1428 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1429 int r;
912541b0 1430
bba7a484 1431 switch (c) {
912541b0 1432
912541b0 1433 case 'd':
bba7a484 1434 arg_daemonize = true;
912541b0
KS
1435 break;
1436 case 'c':
020328e1 1437 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1438 if (r < 0)
1439 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1440 break;
1441 case 'e':
6f5cf8a8
TG
1442 r = safe_atoi(optarg, &arg_exec_delay);
1443 if (r < 0)
1444 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1445 break;
9719859c 1446 case 't':
f1e8664e
TG
1447 r = safe_atou64(optarg, &arg_event_timeout_usec);
1448 if (r < 0)
65fea570 1449 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1450 else {
1451 arg_event_timeout_usec *= USEC_PER_SEC;
1452 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1453 }
9719859c 1454 break;
912541b0 1455 case 'D':
bba7a484 1456 arg_debug = true;
912541b0
KS
1457 break;
1458 case 'N':
090be865 1459 if (streq(optarg, "early")) {
bba7a484 1460 arg_resolve_names = 1;
090be865 1461 } else if (streq(optarg, "late")) {
bba7a484 1462 arg_resolve_names = 0;
090be865 1463 } else if (streq(optarg, "never")) {
bba7a484 1464 arg_resolve_names = -1;
912541b0 1465 } else {
9f6445e3 1466 log_error("resolve-names must be early, late or never");
bba7a484 1467 return 0;
912541b0
KS
1468 }
1469 break;
1470 case 'h':
ed216e1f 1471 help();
bba7a484 1472 return 0;
912541b0
KS
1473 case 'V':
1474 printf("%s\n", VERSION);
bba7a484
TG
1475 return 0;
1476 case '?':
1477 return -EINVAL;
912541b0 1478 default:
bba7a484
TG
1479 assert_not_reached("Unhandled option");
1480
912541b0
KS
1481 }
1482 }
1483
bba7a484
TG
1484 return 1;
1485}
1486
b7f74dd4 1487static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1488 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1489 int r, fd_worker, one = 1;
c0c6806b
TG
1490
1491 assert(ret);
11b1dd8c
TG
1492 assert(fd_ctrl >= 0);
1493 assert(fd_uevent >= 0);
c0c6806b
TG
1494
1495 manager = new0(Manager, 1);
1496 if (!manager)
1497 return log_oom();
1498
e237d8cb
TG
1499 manager->fd_inotify = -1;
1500 manager->worker_watch[WRITE_END] = -1;
1501 manager->worker_watch[READ_END] = -1;
1502
c0c6806b
TG
1503 manager->udev = udev_new();
1504 if (!manager->udev)
1505 return log_error_errno(errno, "could not allocate udev context: %m");
1506
b2d21d93
TG
1507 udev_builtin_init(manager->udev);
1508
ecb17862
TG
1509 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1510 if (!manager->rules)
1511 return log_error_errno(ENOMEM, "error reading rules");
1512
1513 udev_list_node_init(&manager->events);
1514 udev_list_init(manager->udev, &manager->properties, true);
1515
c26d1879
TG
1516 manager->cgroup = cgroup;
1517
f59118ec
TG
1518 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1519 if (!manager->ctrl)
1520 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1521
f59118ec
TG
1522 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1523 if (!manager->monitor)
1524 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1525
1526 /* unnamed socket from workers to the main daemon */
1527 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1528 if (r < 0)
1529 return log_error_errno(errno, "error creating socketpair: %m");
1530
693d371d 1531 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1532
693d371d 1533 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1534 if (r < 0)
1535 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1536
1537 manager->fd_inotify = udev_watch_init(manager->udev);
1538 if (manager->fd_inotify < 0)
1539 return log_error_errno(ENOMEM, "error initializing inotify");
1540
1541 udev_watch_restore(manager->udev);
1542
1543 /* block and listen to all signals on signalfd */
72c0a2c2 1544 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1545
49f997f3
TG
1546 r = sd_event_default(&manager->event);
1547 if (r < 0)
709f6e46 1548 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1549
693d371d
TG
1550 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1551 if (r < 0)
1552 return log_error_errno(r, "error creating sigint event source: %m");
1553
1554 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1555 if (r < 0)
1556 return log_error_errno(r, "error creating sigterm event source: %m");
1557
1558 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1559 if (r < 0)
1560 return log_error_errno(r, "error creating sighup event source: %m");
1561
1562 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1563 if (r < 0)
1564 return log_error_errno(r, "error creating sigchld event source: %m");
1565
1566 r = sd_event_set_watchdog(manager->event, true);
1567 if (r < 0)
1568 return log_error_errno(r, "error creating watchdog event source: %m");
1569
11b1dd8c 1570 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1571 if (r < 0)
1572 return log_error_errno(r, "error creating ctrl event source: %m");
1573
1574 /* This needs to be after the inotify and uevent handling, to make sure
1575 * that the ping is send back after fully processing the pending uevents
1576 * (including the synthetic ones we may create due to inotify events).
1577 */
1578 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1579 if (r < 0)
1580 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1581
1582 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1583 if (r < 0)
1584 return log_error_errno(r, "error creating inotify event source: %m");
1585
11b1dd8c 1586 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1587 if (r < 0)
1588 return log_error_errno(r, "error creating uevent event source: %m");
1589
1590 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1591 if (r < 0)
1592 return log_error_errno(r, "error creating worker event source: %m");
1593
1594 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1595 if (r < 0)
1596 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1597
11b1dd8c
TG
1598 *ret = manager;
1599 manager = NULL;
1600
86c3bece 1601 return 0;
c0c6806b
TG
1602}
1603
077fc5e2 1604static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1605 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1606 int r;
1607
1608 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1609 if (r < 0) {
1610 r = log_error_errno(r, "failed to allocate manager object: %m");
1611 goto exit;
1612 }
1613
1614 r = udev_rules_apply_static_dev_perms(manager->rules);
1615 if (r < 0)
1616 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1617
1ef72b55
MS
1618 (void) sd_notifyf(false,
1619 "READY=1\n"
1620 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1621
1622 r = sd_event_loop(manager->event);
1623 if (r < 0) {
1624 log_error_errno(r, "event loop failed: %m");
1625 goto exit;
1626 }
1627
1628 sd_event_get_exit_code(manager->event, &r);
1629
1630exit:
1631 sd_notify(false,
1632 "STOPPING=1\n"
1633 "STATUS=Shutting down...");
1634 if (manager)
1635 udev_ctrl_cleanup(manager->ctrl);
1636 return r;
1637}
1638
1639int main(int argc, char *argv[]) {
c26d1879 1640 _cleanup_free_ char *cgroup = NULL;
efa1606e 1641 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1642 int r;
bba7a484 1643
bba7a484
TG
1644 log_set_target(LOG_TARGET_AUTO);
1645 log_parse_environment();
1646 log_open();
1647
bba7a484
TG
1648 r = parse_argv(argc, argv);
1649 if (r <= 0)
1650 goto exit;
1651
d7f69e16 1652 r = parse_proc_cmdline(parse_proc_cmdline_item, NULL, true);
614a823c
TG
1653 if (r < 0)
1654 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1655
78d3e041
KS
1656 if (arg_debug) {
1657 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1658 log_set_max_level(LOG_DEBUG);
78d3e041 1659 }
bba7a484 1660
912541b0 1661 if (getuid() != 0) {
6af5e6a4 1662 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1663 goto exit;
1664 }
1665
712cebf1
TG
1666 if (arg_children_max == 0) {
1667 cpu_set_t cpu_set;
ebc164ef 1668
712cebf1 1669 arg_children_max = 8;
d457ff83 1670
ece174c5 1671 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1672 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1673
712cebf1 1674 log_debug("set children_max to %u", arg_children_max);
d457ff83 1675 }
912541b0 1676
712cebf1
TG
1677 /* set umask before creating any file/directory */
1678 r = chdir("/");
1679 if (r < 0) {
1680 r = log_error_errno(errno, "could not change dir to /: %m");
1681 goto exit;
1682 }
194bbe33 1683
712cebf1 1684 umask(022);
912541b0 1685
c3dacc8b 1686 r = mac_selinux_init();
712cebf1
TG
1687 if (r < 0) {
1688 log_error_errno(r, "could not initialize labelling: %m");
1689 goto exit;
912541b0
KS
1690 }
1691
712cebf1
TG
1692 r = mkdir("/run/udev", 0755);
1693 if (r < 0 && errno != EEXIST) {
1694 r = log_error_errno(errno, "could not create /run/udev: %m");
1695 goto exit;
1696 }
1697
03cfe0d5 1698 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1699
c26d1879
TG
1700 if (getppid() == 1) {
1701 /* get our own cgroup, we regularly kill everything udev has left behind
1702 we only do this on systemd systems, and only if we are directly spawned
1703 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1704 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1705 if (r < 0) {
6d235339 1706 if (r == -ENOENT || r == -ENOMEDIUM)
11b9fb15
TG
1707 log_debug_errno(r, "did not find dedicated cgroup: %m");
1708 else
1709 log_warning_errno(r, "failed to get cgroup: %m");
1710 }
c26d1879
TG
1711 }
1712
b7f74dd4
TG
1713 r = listen_fds(&fd_ctrl, &fd_uevent);
1714 if (r < 0) {
1715 r = log_error_errno(r, "could not listen on fds: %m");
1716 goto exit;
1717 }
1718
bba7a484 1719 if (arg_daemonize) {
912541b0 1720 pid_t pid;
912541b0 1721
3cbb2057
TG
1722 log_info("starting version " VERSION);
1723
40e749b5 1724 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1725 if (log_get_max_level() < LOG_DEBUG) {
1726 r = make_null_stdio();
1727 if (r < 0)
1728 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1729 }
1730
9fc932bf 1731
40e749b5 1732
912541b0
KS
1733 pid = fork();
1734 switch (pid) {
1735 case 0:
1736 break;
1737 case -1:
6af5e6a4 1738 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1739 goto exit;
1740 default:
f53d1fcd
TG
1741 mac_selinux_finish();
1742 log_close();
1743 _exit(EXIT_SUCCESS);
912541b0
KS
1744 }
1745
1746 setsid();
1747
ad118bda 1748 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1749 }
912541b0 1750
077fc5e2 1751 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1752
53921bfa 1753exit:
cc56fafe 1754 mac_selinux_finish();
baa30fbc 1755 log_close();
6af5e6a4 1756 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1757}