]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
socket-util: move CMSG_FOREACH() from macro.h to socket-util.h
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
7fafc032 1/*
1298001e 2 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 3 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
4 * Copyright (C) 2009 Canonical Ltd.
5 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
55e9959b
KS
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
7fafc032 11 *
55e9959b
KS
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
7fafc032 16 *
55e9959b
KS
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
19 */
20
7fafc032 21#include <errno.h>
618234a5
LP
22#include <fcntl.h>
23#include <getopt.h>
24#include <signal.h>
25#include <stdbool.h>
26#include <stddef.h>
7fafc032
KS
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
618234a5 30#include <sys/epoll.h>
3ebdb81e 31#include <sys/file.h>
618234a5
LP
32#include <sys/inotify.h>
33#include <sys/ioctl.h>
34#include <sys/mount.h>
1e03b754 35#include <sys/prctl.h>
1e03b754 36#include <sys/signalfd.h>
618234a5 37#include <sys/socket.h>
dc117daa 38#include <sys/stat.h>
618234a5
LP
39#include <sys/time.h>
40#include <sys/wait.h>
41#include <unistd.h>
7fafc032 42
392ef7a2 43#include "sd-daemon.h"
693d371d 44#include "sd-event.h"
8314de1d 45
194bbe33 46#include "cgroup-util.h"
618234a5 47#include "cpu-set-util.h"
5ba2dc25 48#include "dev-setup.h"
618234a5 49#include "event-util.h"
3ffd4af2 50#include "fd-util.h"
a5c32cff 51#include "fileio.h"
6482f626 52#include "formats-util.h"
f4f15635 53#include "fs-util.h"
a505965d 54#include "hashmap.h"
c004493c 55#include "io-util.h"
618234a5 56#include "netlink-util.h"
6bedfcbb 57#include "parse-util.h"
4e731273 58#include "proc-cmdline.h"
618234a5
LP
59#include "process-util.h"
60#include "selinux-util.h"
61#include "signal-util.h"
8f328d36 62#include "socket-util.h"
07630cea 63#include "string-util.h"
618234a5
LP
64#include "terminal-util.h"
65#include "udev-util.h"
66#include "udev.h"
ee104e11 67#include "user-util.h"
7fafc032 68
bba7a484
TG
69static bool arg_debug = false;
70static int arg_daemonize = false;
71static int arg_resolve_names = 1;
020328e1 72static unsigned arg_children_max;
bba7a484
TG
73static int arg_exec_delay;
74static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
75static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
76
77typedef struct Manager {
78 struct udev *udev;
693d371d 79 sd_event *event;
c0c6806b 80 Hashmap *workers;
ecb17862 81 struct udev_list_node events;
c26d1879 82 const char *cgroup;
cb49a4f2 83 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 84
ecb17862 85 struct udev_rules *rules;
c0c6806b
TG
86 struct udev_list properties;
87
88 struct udev_monitor *monitor;
89 struct udev_ctrl *ctrl;
90 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 91 int fd_inotify;
e237d8cb
TG
92 int worker_watch[2];
93
693d371d
TG
94 sd_event_source *ctrl_event;
95 sd_event_source *uevent_event;
96 sd_event_source *inotify_event;
97
7c4c7e89
TG
98 usec_t last_usec;
99
c0c6806b 100 bool stop_exec_queue:1;
c0c6806b
TG
101 bool exit:1;
102} Manager;
1e03b754 103
1e03b754 104enum event_state {
912541b0
KS
105 EVENT_UNDEF,
106 EVENT_QUEUED,
107 EVENT_RUNNING,
1e03b754
KS
108};
109
110struct event {
912541b0 111 struct udev_list_node node;
cb49a4f2 112 Manager *manager;
912541b0
KS
113 struct udev *udev;
114 struct udev_device *dev;
6969c349 115 struct udev_device *dev_kernel;
c6aa11f2 116 struct worker *worker;
912541b0 117 enum event_state state;
912541b0
KS
118 unsigned long long int delaying_seqnum;
119 unsigned long long int seqnum;
120 const char *devpath;
121 size_t devpath_len;
122 const char *devpath_old;
123 dev_t devnum;
912541b0 124 int ifindex;
ea6039a3 125 bool is_block;
693d371d
TG
126 sd_event_source *timeout_warning;
127 sd_event_source *timeout;
1e03b754
KS
128};
129
9ec6e95b 130static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 131 return container_of(node, struct event, node);
1e03b754
KS
132}
133
ecb17862 134static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 135
1e03b754 136enum worker_state {
912541b0
KS
137 WORKER_UNDEF,
138 WORKER_RUNNING,
139 WORKER_IDLE,
140 WORKER_KILLED,
1e03b754
KS
141};
142
143struct worker {
c0c6806b 144 Manager *manager;
912541b0 145 struct udev_list_node node;
912541b0
KS
146 int refcount;
147 pid_t pid;
148 struct udev_monitor *monitor;
149 enum worker_state state;
150 struct event *event;
1e03b754
KS
151};
152
153/* passed from worker to main process */
154struct worker_message {
1e03b754
KS
155};
156
c6aa11f2 157static void event_free(struct event *event) {
cb49a4f2
TG
158 int r;
159
c6aa11f2
TG
160 if (!event)
161 return;
162
912541b0 163 udev_list_node_remove(&event->node);
912541b0 164 udev_device_unref(event->dev);
6969c349 165 udev_device_unref(event->dev_kernel);
c6aa11f2 166
693d371d
TG
167 sd_event_source_unref(event->timeout_warning);
168 sd_event_source_unref(event->timeout);
169
c6aa11f2
TG
170 if (event->worker)
171 event->worker->event = NULL;
172
cb49a4f2
TG
173 assert(event->manager);
174
175 if (udev_list_node_is_empty(&event->manager->events)) {
176 /* only clean up the queue from the process that created it */
177 if (event->manager->pid == getpid()) {
178 r = unlink("/run/udev/queue");
179 if (r < 0)
180 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
181 }
182 }
183
912541b0 184 free(event);
aa8734ff 185}
7a770250 186
c6aa11f2
TG
187static void worker_free(struct worker *worker) {
188 if (!worker)
189 return;
bc113de9 190
c0c6806b
TG
191 assert(worker->manager);
192
193 hashmap_remove(worker->manager->workers, UINT_TO_PTR(worker->pid));
912541b0 194 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
195 event_free(worker->event);
196
c6aa11f2 197 free(worker);
ff2c503d
KS
198}
199
c0c6806b 200static void manager_workers_free(Manager *manager) {
a505965d
TG
201 struct worker *worker;
202 Iterator i;
ff2c503d 203
c0c6806b
TG
204 assert(manager);
205
206 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 207 worker_free(worker);
a505965d 208
c0c6806b 209 manager->workers = hashmap_free(manager->workers);
fc465079
KS
210}
211
c0c6806b 212static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
213 _cleanup_free_ struct worker *worker = NULL;
214 int r;
3a19b32a
TG
215
216 assert(ret);
c0c6806b 217 assert(manager);
3a19b32a
TG
218 assert(worker_monitor);
219 assert(pid > 1);
220
221 worker = new0(struct worker, 1);
222 if (!worker)
223 return -ENOMEM;
224
39c19cf1 225 worker->refcount = 1;
c0c6806b 226 worker->manager = manager;
3a19b32a
TG
227 /* close monitor, but keep address around */
228 udev_monitor_disconnect(worker_monitor);
229 worker->monitor = udev_monitor_ref(worker_monitor);
230 worker->pid = pid;
a505965d 231
c0c6806b 232 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
233 if (r < 0)
234 return r;
235
c0c6806b 236 r = hashmap_put(manager->workers, UINT_TO_PTR(pid), worker);
a505965d
TG
237 if (r < 0)
238 return r;
239
3a19b32a 240 *ret = worker;
a505965d 241 worker = NULL;
3a19b32a
TG
242
243 return 0;
244}
245
4fa4d885
TG
246static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
247 struct event *event = userdata;
248
249 assert(event);
250 assert(event->worker);
251
252 kill_and_sigcont(event->worker->pid, SIGKILL);
253 event->worker->state = WORKER_KILLED;
254
255 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
256
257 return 1;
258}
259
260static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
261 struct event *event = userdata;
262
263 assert(event);
264
265 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
266
267 return 1;
268}
269
39c19cf1 270static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
271 sd_event *e;
272 uint64_t usec;
693d371d 273
c6aa11f2 274 assert(worker);
693d371d 275 assert(worker->manager);
c6aa11f2
TG
276 assert(event);
277 assert(!event->worker);
278 assert(!worker->event);
279
39c19cf1 280 worker->state = WORKER_RUNNING;
39c19cf1
TG
281 worker->event = event;
282 event->state = EVENT_RUNNING;
c6aa11f2 283 event->worker = worker;
693d371d
TG
284
285 e = worker->manager->event;
286
38a03f06 287 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
288
289 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
290 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
291
292 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
293 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
294}
295
e237d8cb
TG
296static void manager_free(Manager *manager) {
297 if (!manager)
298 return;
299
b2d21d93
TG
300 udev_builtin_exit(manager->udev);
301
693d371d
TG
302 sd_event_source_unref(manager->ctrl_event);
303 sd_event_source_unref(manager->uevent_event);
304 sd_event_source_unref(manager->inotify_event);
305
e237d8cb 306 udev_unref(manager->udev);
693d371d 307 sd_event_unref(manager->event);
e237d8cb
TG
308 manager_workers_free(manager);
309 event_queue_cleanup(manager, EVENT_UNDEF);
310
311 udev_monitor_unref(manager->monitor);
312 udev_ctrl_unref(manager->ctrl);
313 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
314
315 udev_list_cleanup(&manager->properties);
316 udev_rules_unref(manager->rules);
e237d8cb 317
e237d8cb
TG
318 safe_close(manager->fd_inotify);
319 safe_close_pair(manager->worker_watch);
320
321 free(manager);
322}
323
324DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
325
9a73bd7c
TG
326static int worker_send_message(int fd) {
327 struct worker_message message = {};
328
329 return loop_write(fd, &message, sizeof(message), false);
330}
331
c0c6806b 332static void worker_spawn(Manager *manager, struct event *event) {
912541b0 333 struct udev *udev = event->udev;
3a19b32a 334 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 335 pid_t pid;
b6aab8ef 336 int r = 0;
912541b0
KS
337
338 /* listen for new events */
339 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
340 if (worker_monitor == NULL)
341 return;
342 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 343 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
344 r = udev_monitor_enable_receiving(worker_monitor);
345 if (r < 0)
346 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 347
912541b0
KS
348 pid = fork();
349 switch (pid) {
350 case 0: {
351 struct udev_device *dev = NULL;
1c4baffc 352 _cleanup_netlink_unref_ sd_netlink *rtnl = NULL;
912541b0 353 int fd_monitor;
e237d8cb 354 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
355 struct epoll_event ep_signal = { .events = EPOLLIN };
356 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 357 sigset_t mask;
912541b0 358
43095991 359 /* take initial device from queue */
912541b0
KS
360 dev = event->dev;
361 event->dev = NULL;
362
39fd2ca1
TG
363 unsetenv("NOTIFY_SOCKET");
364
c0c6806b 365 manager_workers_free(manager);
ecb17862 366 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 367
e237d8cb 368 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 369 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 370 manager->ctrl = udev_ctrl_unref(manager->ctrl);
693d371d 371 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 372 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 373
693d371d
TG
374 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
375 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
376 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
377
378 manager->event = sd_event_unref(manager->event);
379
912541b0
KS
380 sigfillset(&mask);
381 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
382 if (fd_signal < 0) {
6af5e6a4 383 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
384 goto out;
385 }
2dd9f98d
TG
386 ep_signal.data.fd = fd_signal;
387
388 fd_monitor = udev_monitor_get_fd(worker_monitor);
389 ep_monitor.data.fd = fd_monitor;
912541b0
KS
390
391 fd_ep = epoll_create1(EPOLL_CLOEXEC);
392 if (fd_ep < 0) {
6af5e6a4 393 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
394 goto out;
395 }
396
912541b0
KS
397 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
398 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 399 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
400 goto out;
401 }
402
403 /* request TERM signal if parent exits */
404 prctl(PR_SET_PDEATHSIG, SIGTERM);
405
145dae7e 406 /* reset OOM score, we only protect the main daemon */
ad118bda 407 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 408
912541b0
KS
409 for (;;) {
410 struct udev_event *udev_event;
6af5e6a4 411 int fd_lock = -1;
912541b0 412
3b64e4d4
TG
413 assert(dev);
414
9f6445e3 415 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
416 udev_event = udev_event_new(dev);
417 if (udev_event == NULL) {
6af5e6a4 418 r = -ENOMEM;
912541b0
KS
419 goto out;
420 }
421
bba7a484
TG
422 if (arg_exec_delay > 0)
423 udev_event->exec_delay = arg_exec_delay;
912541b0 424
3ebdb81e 425 /*
2e5b17d0 426 * Take a shared lock on the device node; this establishes
3ebdb81e 427 * a concept of device "ownership" to serialize device
2e5b17d0 428 * access. External processes holding an exclusive lock will
3ebdb81e 429 * cause udev to skip the event handling; in the case udev
2e5b17d0 430 * acquired the lock, the external process can block until
3ebdb81e
KS
431 * udev has finished its event handling.
432 */
2e5b17d0
KS
433 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
434 streq_ptr("block", udev_device_get_subsystem(dev)) &&
435 !startswith(udev_device_get_sysname(dev), "dm-") &&
436 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
437 struct udev_device *d = dev;
438
439 if (streq_ptr("partition", udev_device_get_devtype(d)))
440 d = udev_device_get_parent(d);
441
442 if (d) {
443 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
444 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 445 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 446 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
447 goto skip;
448 }
449 }
450 }
451
4c83d994
TG
452 /* needed for renaming netifs */
453 udev_event->rtnl = rtnl;
454
912541b0 455 /* apply rules, create node, symlinks */
adeba500
KS
456 udev_event_execute_rules(udev_event,
457 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 458 &manager->properties,
8314de1d 459 manager->rules);
adeba500
KS
460
461 udev_event_execute_run(udev_event,
8314de1d 462 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 463
523c620b
TG
464 if (udev_event->rtnl)
465 /* in case rtnl was initialized */
1c4baffc 466 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 467
912541b0 468 /* apply/restore inotify watch */
bf9bead1 469 if (udev_event->inotify_watch) {
912541b0
KS
470 udev_watch_begin(udev, dev);
471 udev_device_update_db(dev);
472 }
473
3d06f418 474 safe_close(fd_lock);
3ebdb81e 475
912541b0
KS
476 /* send processed event back to libudev listeners */
477 udev_monitor_send_device(worker_monitor, NULL, dev);
478
3ebdb81e 479skip:
4914cb2d 480 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 481
912541b0 482 /* send udevd the result of the event execution */
e237d8cb 483 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 484 if (r < 0)
9a73bd7c 485 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 486 udev_device_get_seqnum(dev));
912541b0
KS
487
488 udev_device_unref(dev);
489 dev = NULL;
490
73814ca2 491 udev_event_unref(udev_event);
47e737dc 492
912541b0
KS
493 /* wait for more device messages from main udevd, or term signal */
494 while (dev == NULL) {
495 struct epoll_event ev[4];
496 int fdcount;
497 int i;
498
8fef0ff2 499 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
500 if (fdcount < 0) {
501 if (errno == EINTR)
502 continue;
6af5e6a4 503 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
504 goto out;
505 }
506
507 for (i = 0; i < fdcount; i++) {
508 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
509 dev = udev_monitor_receive_device(worker_monitor);
510 break;
511 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
512 struct signalfd_siginfo fdsi;
513 ssize_t size;
514
515 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
516 if (size != sizeof(struct signalfd_siginfo))
517 continue;
518 switch (fdsi.ssi_signo) {
519 case SIGTERM:
520 goto out;
521 }
522 }
523 }
524 }
525 }
82063a88 526out:
912541b0 527 udev_device_unref(dev);
e237d8cb 528 manager_free(manager);
baa30fbc 529 log_close();
8b46c3fc 530 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
531 }
532 case -1:
912541b0 533 event->state = EVENT_QUEUED;
56f64d95 534 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
535 break;
536 default:
e03c7cc2
TG
537 {
538 struct worker *worker;
539
c0c6806b 540 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 541 if (r < 0)
e03c7cc2 542 return;
e03c7cc2 543
39c19cf1
TG
544 worker_attach_event(worker, event);
545
1fa2f38f 546 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
547 break;
548 }
e03c7cc2 549 }
7fafc032
KS
550}
551
c0c6806b 552static void event_run(Manager *manager, struct event *event) {
a505965d
TG
553 struct worker *worker;
554 Iterator i;
912541b0 555
c0c6806b
TG
556 assert(manager);
557 assert(event);
558
559 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
560 ssize_t count;
561
562 if (worker->state != WORKER_IDLE)
563 continue;
564
c0c6806b 565 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 566 if (count < 0) {
1fa2f38f
ZJS
567 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
568 worker->pid, count);
912541b0
KS
569 kill(worker->pid, SIGKILL);
570 worker->state = WORKER_KILLED;
571 continue;
572 }
39c19cf1 573 worker_attach_event(worker, event);
912541b0
KS
574 return;
575 }
576
c0c6806b 577 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 578 if (arg_children_max > 1)
c0c6806b 579 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
580 return;
581 }
582
583 /* start new worker and pass initial device */
c0c6806b 584 worker_spawn(manager, event);
1e03b754
KS
585}
586
ecb17862 587static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 588 struct event *event;
cb49a4f2 589 int r;
912541b0 590
ecb17862
TG
591 assert(manager);
592 assert(dev);
593
040e6896
TG
594 /* only one process can add events to the queue */
595 if (manager->pid == 0)
596 manager->pid = getpid();
597
cb49a4f2
TG
598 assert(manager->pid == getpid());
599
955d98c9 600 event = new0(struct event, 1);
cb49a4f2
TG
601 if (!event)
602 return -ENOMEM;
912541b0
KS
603
604 event->udev = udev_device_get_udev(dev);
cb49a4f2 605 event->manager = manager;
912541b0 606 event->dev = dev;
6969c349
TG
607 event->dev_kernel = udev_device_shallow_clone(dev);
608 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
609 event->seqnum = udev_device_get_seqnum(dev);
610 event->devpath = udev_device_get_devpath(dev);
611 event->devpath_len = strlen(event->devpath);
612 event->devpath_old = udev_device_get_devpath_old(dev);
613 event->devnum = udev_device_get_devnum(dev);
ea6039a3 614 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
615 event->ifindex = udev_device_get_ifindex(dev);
616
9f6445e3 617 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
618 udev_device_get_action(dev), udev_device_get_subsystem(dev));
619
620 event->state = EVENT_QUEUED;
cb49a4f2
TG
621
622 if (udev_list_node_is_empty(&manager->events)) {
623 r = touch("/run/udev/queue");
624 if (r < 0)
625 log_warning_errno(r, "could not touch /run/udev/queue: %m");
626 }
627
ecb17862 628 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 629
912541b0 630 return 0;
fc465079
KS
631}
632
c0c6806b 633static void manager_kill_workers(Manager *manager) {
a505965d
TG
634 struct worker *worker;
635 Iterator i;
1e03b754 636
c0c6806b
TG
637 assert(manager);
638
639 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
640 if (worker->state == WORKER_KILLED)
641 continue;
1e03b754 642
912541b0
KS
643 worker->state = WORKER_KILLED;
644 kill(worker->pid, SIGTERM);
645 }
1e03b754
KS
646}
647
e3196993 648/* lookup event for identical, parent, child device */
ecb17862 649static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
650 struct udev_list_node *loop;
651 size_t common;
652
653 /* check if queue contains events we depend on */
ecb17862 654 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
655 struct event *loop_event = node_to_event(loop);
656
657 /* we already found a later event, earlier can not block us, no need to check again */
658 if (loop_event->seqnum < event->delaying_seqnum)
659 continue;
660
661 /* event we checked earlier still exists, no need to check again */
662 if (loop_event->seqnum == event->delaying_seqnum)
663 return true;
664
665 /* found ourself, no later event can block us */
666 if (loop_event->seqnum >= event->seqnum)
667 break;
668
669 /* check major/minor */
670 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
671 return true;
672
673 /* check network device ifindex */
674 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
675 return true;
676
677 /* check our old name */
090be865 678 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
679 event->delaying_seqnum = loop_event->seqnum;
680 return true;
681 }
682
683 /* compare devpath */
684 common = MIN(loop_event->devpath_len, event->devpath_len);
685
686 /* one devpath is contained in the other? */
687 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
688 continue;
689
690 /* identical device event found */
691 if (loop_event->devpath_len == event->devpath_len) {
692 /* devices names might have changed/swapped in the meantime */
693 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
694 continue;
695 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
696 continue;
697 event->delaying_seqnum = loop_event->seqnum;
698 return true;
699 }
700
701 /* parent device event found */
702 if (event->devpath[common] == '/') {
703 event->delaying_seqnum = loop_event->seqnum;
704 return true;
705 }
706
707 /* child device event found */
708 if (loop_event->devpath[common] == '/') {
709 event->delaying_seqnum = loop_event->seqnum;
710 return true;
711 }
712
713 /* no matching device */
714 continue;
715 }
716
717 return false;
7fafc032
KS
718}
719
693d371d
TG
720static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
721 Manager *manager = userdata;
722
723 assert(manager);
724
725 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
726
727 sd_event_exit(manager->event, -ETIMEDOUT);
728
729 return 1;
730}
731
62d43dac 732static void manager_exit(Manager *manager) {
693d371d
TG
733 uint64_t usec;
734 int r;
62d43dac
TG
735
736 assert(manager);
737
738 manager->exit = true;
739
b79aacbf
TG
740 sd_notify(false,
741 "STOPPING=1\n"
742 "STATUS=Starting shutdown...");
743
62d43dac 744 /* close sources of new events and discard buffered events */
693d371d 745 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 746 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 747
693d371d 748 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 749 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 750
693d371d 751 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 752 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
753
754 /* discard queued events and kill workers */
755 event_queue_cleanup(manager, EVENT_QUEUED);
756 manager_kill_workers(manager);
693d371d 757
38a03f06 758 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
759
760 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
761 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
762 if (r < 0)
763 return;
62d43dac
TG
764}
765
766/* reload requested, HUP signal received, rules changed, builtin changed */
767static void manager_reload(Manager *manager) {
768
769 assert(manager);
770
b79aacbf
TG
771 sd_notify(false,
772 "RELOADING=1\n"
773 "STATUS=Flushing configuration...");
774
62d43dac
TG
775 manager_kill_workers(manager);
776 manager->rules = udev_rules_unref(manager->rules);
777 udev_builtin_exit(manager->udev);
b79aacbf
TG
778
779 sd_notify(false,
780 "READY=1\n"
781 "STATUS=Processing...");
62d43dac
TG
782}
783
c0c6806b 784static void event_queue_start(Manager *manager) {
912541b0 785 struct udev_list_node *loop;
693d371d 786 usec_t usec;
8ab44e3f 787
c0c6806b
TG
788 assert(manager);
789
7c4c7e89
TG
790 if (udev_list_node_is_empty(&manager->events) ||
791 manager->exit || manager->stop_exec_queue)
792 return;
793
38a03f06
LP
794 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
795 /* check for changed config, every 3 seconds at most */
796 if (manager->last_usec == 0 ||
797 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
798 if (udev_rules_check_timestamp(manager->rules) ||
799 udev_builtin_validate(manager->udev))
800 manager_reload(manager);
693d371d 801
38a03f06 802 manager->last_usec = usec;
7c4c7e89
TG
803 }
804
805 udev_builtin_init(manager->udev);
806
807 if (!manager->rules) {
808 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
809 if (!manager->rules)
810 return;
811 }
812
ecb17862 813 udev_list_node_foreach(loop, &manager->events) {
912541b0 814 struct event *event = node_to_event(loop);
0bc74ea7 815
912541b0
KS
816 if (event->state != EVENT_QUEUED)
817 continue;
0bc74ea7 818
912541b0 819 /* do not start event if parent or child event is still running */
ecb17862 820 if (is_devpath_busy(manager, event))
912541b0 821 continue;
fc465079 822
c0c6806b 823 event_run(manager, event);
912541b0 824 }
1e03b754
KS
825}
826
ecb17862 827static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 828 struct udev_list_node *loop, *tmp;
ff2c503d 829
ecb17862 830 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 831 struct event *event = node_to_event(loop);
ff2c503d 832
912541b0
KS
833 if (match_type != EVENT_UNDEF && match_type != event->state)
834 continue;
ff2c503d 835
c6aa11f2 836 event_free(event);
912541b0 837 }
ff2c503d
KS
838}
839
e82e8fa5 840static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
841 Manager *manager = userdata;
842
843 assert(manager);
844
912541b0
KS
845 for (;;) {
846 struct worker_message msg;
979558f3
TG
847 struct iovec iovec = {
848 .iov_base = &msg,
849 .iov_len = sizeof(msg),
850 };
851 union {
852 struct cmsghdr cmsghdr;
853 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
854 } control = {};
855 struct msghdr msghdr = {
856 .msg_iov = &iovec,
857 .msg_iovlen = 1,
858 .msg_control = &control,
859 .msg_controllen = sizeof(control),
860 };
861 struct cmsghdr *cmsg;
912541b0 862 ssize_t size;
979558f3 863 struct ucred *ucred = NULL;
a505965d 864 struct worker *worker;
912541b0 865
e82e8fa5 866 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 867 if (size < 0) {
738a7907
TG
868 if (errno == EINTR)
869 continue;
870 else if (errno == EAGAIN)
871 /* nothing more to read */
872 break;
979558f3 873
e82e8fa5 874 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
875 } else if (size != sizeof(struct worker_message)) {
876 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 877 continue;
979558f3
TG
878 }
879
2a1288ff 880 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
881 if (cmsg->cmsg_level == SOL_SOCKET &&
882 cmsg->cmsg_type == SCM_CREDENTIALS &&
883 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
884 ucred = (struct ucred*) CMSG_DATA(cmsg);
885 }
886
887 if (!ucred || ucred->pid <= 0) {
888 log_warning_errno(EIO, "ignoring worker message without valid PID");
889 continue;
890 }
912541b0
KS
891
892 /* lookup worker who sent the signal */
c0c6806b 893 worker = hashmap_get(manager->workers, UINT_TO_PTR(ucred->pid));
a505965d
TG
894 if (!worker) {
895 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
896 continue;
912541b0 897 }
c0bbfd72 898
a505965d
TG
899 if (worker->state != WORKER_KILLED)
900 worker->state = WORKER_IDLE;
901
902 /* worker returned */
903 event_free(worker->event);
912541b0 904 }
e82e8fa5 905
8302fe5a
TG
906 /* we have free workers, try to schedule events */
907 event_queue_start(manager);
908
e82e8fa5
TG
909 return 1;
910}
911
912static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 913 Manager *manager = userdata;
e82e8fa5
TG
914 struct udev_device *dev;
915 int r;
916
c0c6806b 917 assert(manager);
e82e8fa5 918
c0c6806b 919 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
920 if (dev) {
921 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 922 r = event_queue_insert(manager, dev);
e82e8fa5
TG
923 if (r < 0)
924 udev_device_unref(dev);
8302fe5a
TG
925 else
926 /* we have fresh events, try to schedule them */
927 event_queue_start(manager);
e82e8fa5
TG
928 }
929
930 return 1;
88f4b648
KS
931}
932
3b47c739 933/* receive the udevd message from userspace */
e82e8fa5 934static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 935 Manager *manager = userdata;
e4f66b77
TG
936 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
937 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
938 const char *str;
939 int i;
940
c0c6806b 941 assert(manager);
e4f66b77 942
c0c6806b 943 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 944 if (!ctrl_conn)
e82e8fa5 945 return 1;
912541b0
KS
946
947 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 948 if (!ctrl_msg)
e82e8fa5 949 return 1;
912541b0
KS
950
951 i = udev_ctrl_get_set_log_level(ctrl_msg);
952 if (i >= 0) {
ed14edc0 953 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 954 log_set_max_level(i);
c0c6806b 955 manager_kill_workers(manager);
912541b0
KS
956 }
957
958 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 959 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 960 manager->stop_exec_queue = true;
912541b0
KS
961 }
962
963 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 964 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 965 manager->stop_exec_queue = false;
8302fe5a 966 event_queue_start(manager);
912541b0
KS
967 }
968
969 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 970 log_debug("udevd message (RELOAD) received");
62d43dac 971 manager_reload(manager);
912541b0
KS
972 }
973
974 str = udev_ctrl_get_set_env(ctrl_msg);
975 if (str != NULL) {
c0c6806b 976 _cleanup_free_ char *key = NULL;
912541b0
KS
977
978 key = strdup(str);
c0c6806b 979 if (key) {
912541b0
KS
980 char *val;
981
982 val = strchr(key, '=');
983 if (val != NULL) {
984 val[0] = '\0';
985 val = &val[1];
986 if (val[0] == '\0') {
9f6445e3 987 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 988 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 989 } else {
9f6445e3 990 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 991 udev_list_entry_add(&manager->properties, key, val);
912541b0 992 }
c0c6806b 993 } else
9f6445e3 994 log_error("wrong key format '%s'", key);
912541b0 995 }
c0c6806b 996 manager_kill_workers(manager);
912541b0
KS
997 }
998
999 i = udev_ctrl_get_set_children_max(ctrl_msg);
1000 if (i >= 0) {
9f6445e3 1001 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1002 arg_children_max = i;
912541b0
KS
1003 }
1004
cb49a4f2 1005 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1006 log_debug("udevd message (SYNC) received");
912541b0
KS
1007
1008 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1009 log_debug("udevd message (EXIT) received");
62d43dac 1010 manager_exit(manager);
c0c6806b
TG
1011 /* keep reference to block the client until we exit
1012 TODO: deal with several blocking exit requests */
1013 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1014 }
e4f66b77 1015
e82e8fa5 1016 return 1;
88f4b648 1017}
4a231017 1018
f3a740a5 1019static int synthesize_change(struct udev_device *dev) {
edd32000 1020 char filename[UTIL_PATH_SIZE];
f3a740a5 1021 int r;
edd32000 1022
f3a740a5 1023 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1024 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1025 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1026 bool part_table_read = false;
1027 bool has_partitions = false;
ede34445 1028 int fd;
f3a740a5
KS
1029 struct udev *udev = udev_device_get_udev(dev);
1030 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1031 struct udev_list_entry *item;
1032
ede34445 1033 /*
e9fc29f4
KS
1034 * Try to re-read the partition table. This only succeeds if
1035 * none of the devices is busy. The kernel returns 0 if no
1036 * partition table is found, and we will not get an event for
1037 * the disk.
ede34445 1038 */
02ba8fb3 1039 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1040 if (fd >= 0) {
02ba8fb3
KS
1041 r = flock(fd, LOCK_EX|LOCK_NB);
1042 if (r >= 0)
1043 r = ioctl(fd, BLKRRPART, 0);
1044
ede34445
KS
1045 close(fd);
1046 if (r >= 0)
e9fc29f4 1047 part_table_read = true;
ede34445
KS
1048 }
1049
e9fc29f4 1050 /* search for partitions */
f3a740a5
KS
1051 e = udev_enumerate_new(udev);
1052 if (!e)
1053 return -ENOMEM;
1054
1055 r = udev_enumerate_add_match_parent(e, dev);
1056 if (r < 0)
1057 return r;
1058
1059 r = udev_enumerate_add_match_subsystem(e, "block");
1060 if (r < 0)
1061 return r;
1062
1063 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1064 if (r < 0)
1065 return r;
e9fc29f4
KS
1066
1067 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1068 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1069
1070 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1071 if (!d)
1072 continue;
1073
1074 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1075 continue;
1076
1077 has_partitions = true;
1078 break;
1079 }
1080
1081 /*
1082 * We have partitions and re-read the table, the kernel already sent
1083 * out a "change" event for the disk, and "remove/add" for all
1084 * partitions.
1085 */
1086 if (part_table_read && has_partitions)
1087 return 0;
1088
1089 /*
1090 * We have partitions but re-reading the partition table did not
1091 * work, synthesize "change" for the disk and all partitions.
1092 */
1093 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1094 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1095 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1096
f3a740a5
KS
1097 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1098 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1099
1100 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1101 if (!d)
1102 continue;
1103
1104 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1105 continue;
1106
1107 log_debug("device %s closed, synthesising partition '%s' 'change'",
1108 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1109 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1110 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1111 }
ede34445
KS
1112
1113 return 0;
f3a740a5
KS
1114 }
1115
ede34445
KS
1116 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1117 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1118 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1119
f3a740a5 1120 return 0;
edd32000
KS
1121}
1122
e82e8fa5 1123static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1124 Manager *manager = userdata;
0254e944 1125 union inotify_event_buffer buffer;
f7c1ad4f
LP
1126 struct inotify_event *e;
1127 ssize_t l;
912541b0 1128
c0c6806b 1129 assert(manager);
e82e8fa5
TG
1130
1131 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1132 if (l < 0) {
1133 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1134 return 1;
912541b0 1135
f7c1ad4f 1136 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1137 }
1138
f7c1ad4f 1139 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1140 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1141
c0c6806b 1142 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1143 if (!dev)
1144 continue;
912541b0 1145
f7c1ad4f 1146 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1147 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1148 synthesize_change(dev);
a8389097
TG
1149
1150 /* settle might be waiting on us to determine the queue
1151 * state. If we just handled an inotify event, we might have
1152 * generated a "change" event, but we won't have queued up
1153 * the resultant uevent yet. Do that.
1154 */
c0c6806b 1155 on_uevent(NULL, -1, 0, manager);
a8389097 1156 } else if (e->mask & IN_IGNORED)
c0c6806b 1157 udev_watch_end(manager->udev, dev);
912541b0
KS
1158 }
1159
e82e8fa5 1160 return 1;
bd284db1
SJR
1161}
1162
0561329d 1163static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1164 Manager *manager = userdata;
1165
1166 assert(manager);
1167
62d43dac 1168 manager_exit(manager);
912541b0 1169
e82e8fa5
TG
1170 return 1;
1171}
912541b0 1172
0561329d 1173static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1174 Manager *manager = userdata;
1175
1176 assert(manager);
1177
62d43dac 1178 manager_reload(manager);
912541b0 1179
e82e8fa5
TG
1180 return 1;
1181}
912541b0 1182
e82e8fa5 1183static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1184 Manager *manager = userdata;
1185
1186 assert(manager);
1187
e82e8fa5
TG
1188 for (;;) {
1189 pid_t pid;
1190 int status;
1191 struct worker *worker;
d1317d02 1192
e82e8fa5
TG
1193 pid = waitpid(-1, &status, WNOHANG);
1194 if (pid <= 0)
f29328d6 1195 break;
e82e8fa5 1196
c0c6806b 1197 worker = hashmap_get(manager->workers, UINT_TO_PTR(pid));
e82e8fa5
TG
1198 if (!worker) {
1199 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1200 continue;
912541b0 1201 }
e82e8fa5
TG
1202
1203 if (WIFEXITED(status)) {
1204 if (WEXITSTATUS(status) == 0)
1205 log_debug("worker ["PID_FMT"] exited", pid);
1206 else
1207 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1208 } else if (WIFSIGNALED(status)) {
1209 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1210 } else if (WIFSTOPPED(status)) {
1211 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1212 continue;
e82e8fa5
TG
1213 } else if (WIFCONTINUED(status)) {
1214 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1215 continue;
e82e8fa5
TG
1216 } else
1217 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1218
1219 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1220 if (worker->event) {
1221 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1222 /* delete state from disk */
1223 udev_device_delete_db(worker->event->dev);
1224 udev_device_tag_index(worker->event->dev, NULL, false);
1225 /* forward kernel event without amending it */
c0c6806b 1226 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1227 }
1228 }
1229
1230 worker_free(worker);
912541b0 1231 }
e82e8fa5 1232
8302fe5a
TG
1233 /* we can start new workers, try to schedule events */
1234 event_queue_start(manager);
1235
e82e8fa5 1236 return 1;
f27125f9 1237}
1238
693d371d
TG
1239static int on_post(sd_event_source *s, void *userdata) {
1240 Manager *manager = userdata;
1241 int r;
1242
1243 assert(manager);
1244
1245 if (udev_list_node_is_empty(&manager->events)) {
1246 /* no pending events */
1247 if (!hashmap_isempty(manager->workers)) {
1248 /* there are idle workers */
1249 log_debug("cleanup idle workers");
1250 manager_kill_workers(manager);
1251 } else {
1252 /* we are idle */
1253 if (manager->exit) {
1254 r = sd_event_exit(manager->event, 0);
1255 if (r < 0)
1256 return r;
1257 } else if (manager->cgroup)
1258 /* cleanup possible left-over processes in our cgroup */
1259 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
1260 }
1261 }
1262
1263 return 1;
1264}
1265
fcff1e72 1266static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1267 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1268 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1269 int fd, n, r;
912541b0 1270
fcff1e72
TG
1271 assert(rctrl);
1272 assert(rnetlink);
1273
912541b0 1274 n = sd_listen_fds(true);
fcff1e72
TG
1275 if (n < 0)
1276 return n;
912541b0
KS
1277
1278 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1279 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1280 if (ctrl_fd >= 0)
1281 return -EINVAL;
1282 ctrl_fd = fd;
912541b0
KS
1283 continue;
1284 }
1285
1286 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1287 if (netlink_fd >= 0)
1288 return -EINVAL;
1289 netlink_fd = fd;
912541b0
KS
1290 continue;
1291 }
1292
fcff1e72 1293 return -EINVAL;
912541b0
KS
1294 }
1295
f59118ec
TG
1296 if (ctrl_fd < 0) {
1297 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1298
1299 udev = udev_new();
1300 if (!udev)
1301 return -ENOMEM;
1302
1303 ctrl = udev_ctrl_new(udev);
1304 if (!ctrl)
1305 return log_error_errno(EINVAL, "error initializing udev control socket");
1306
1307 r = udev_ctrl_enable_receiving(ctrl);
1308 if (r < 0)
1309 return log_error_errno(EINVAL, "error binding udev control socket");
1310
1311 fd = udev_ctrl_get_fd(ctrl);
1312 if (fd < 0)
1313 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1314
f59118ec
TG
1315 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1316 if (ctrl_fd < 0)
1317 return log_error_errno(errno, "could not dup ctrl fd: %m");
1318 }
1319
1320 if (netlink_fd < 0) {
1321 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1322
1323 if (!udev) {
1324 udev = udev_new();
1325 if (!udev)
1326 return -ENOMEM;
1327 }
1328
1329 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1330 if (!monitor)
1331 return log_error_errno(EINVAL, "error initializing netlink socket");
1332
1333 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1334
1335 r = udev_monitor_enable_receiving(monitor);
1336 if (r < 0)
1337 return log_error_errno(EINVAL, "error binding netlink socket");
1338
1339 fd = udev_monitor_get_fd(monitor);
1340 if (fd < 0)
1341 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1342
1343 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1344 if (ctrl_fd < 0)
1345 return log_error_errno(errno, "could not dup netlink fd: %m");
1346 }
fcff1e72
TG
1347
1348 *rctrl = ctrl_fd;
1349 *rnetlink = netlink_fd;
912541b0 1350
912541b0 1351 return 0;
7459bcdc
KS
1352}
1353
e6f86cac 1354/*
3f85ef0f 1355 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1356 * udev.log-priority=<level> syslog priority
1357 * udev.children-max=<number of workers> events are fully serialized if set to 1
1358 * udev.exec-delay=<number of seconds> delay execution of every executed program
1359 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1360 */
614a823c 1361static int parse_proc_cmdline_item(const char *key, const char *value) {
3567afa5 1362 const char *full_key = key;
74df0fca 1363 int r;
e6f86cac 1364
614a823c 1365 assert(key);
e6f86cac 1366
614a823c
TG
1367 if (!value)
1368 return 0;
e6f86cac 1369
614a823c
TG
1370 if (startswith(key, "rd."))
1371 key += strlen("rd.");
e6f86cac 1372
614a823c
TG
1373 if (startswith(key, "udev."))
1374 key += strlen("udev.");
1375 else
1376 return 0;
e6f86cac 1377
614a823c
TG
1378 if (streq(key, "log-priority")) {
1379 int prio;
e6f86cac 1380
614a823c 1381 prio = util_log_priority(value);
e00f5bdd 1382 if (prio < 0)
3567afa5
MS
1383 goto invalid;
1384 log_set_max_level(prio);
614a823c 1385 } else if (streq(key, "children-max")) {
020328e1 1386 r = safe_atou(value, &arg_children_max);
614a823c 1387 if (r < 0)
3567afa5 1388 goto invalid;
614a823c
TG
1389 } else if (streq(key, "exec-delay")) {
1390 r = safe_atoi(value, &arg_exec_delay);
1391 if (r < 0)
3567afa5 1392 goto invalid;
614a823c
TG
1393 } else if (streq(key, "event-timeout")) {
1394 r = safe_atou64(value, &arg_event_timeout_usec);
1395 if (r < 0)
3567afa5
MS
1396 goto invalid;
1397 arg_event_timeout_usec *= USEC_PER_SEC;
1398 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
e6f86cac 1399 }
614a823c 1400
3567afa5
MS
1401 return 0;
1402invalid:
1403 log_warning("invalid %s ignored: %s", full_key, value);
614a823c 1404 return 0;
e6f86cac
KS
1405}
1406
ed216e1f
TG
1407static void help(void) {
1408 printf("%s [OPTIONS...]\n\n"
1409 "Manages devices.\n\n"
5ac0162c
LP
1410 " -h --help Print this message\n"
1411 " --version Print version of the program\n"
1412 " --daemon Detach and run in the background\n"
1413 " --debug Enable debug output\n"
1414 " --children-max=INT Set maximum number of workers\n"
1415 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1416 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1417 " --resolve-names=early|late|never\n"
1418 " When to resolve users and groups\n"
ed216e1f
TG
1419 , program_invocation_short_name);
1420}
1421
bba7a484 1422static int parse_argv(int argc, char *argv[]) {
912541b0 1423 static const struct option options[] = {
bba7a484
TG
1424 { "daemon", no_argument, NULL, 'd' },
1425 { "debug", no_argument, NULL, 'D' },
1426 { "children-max", required_argument, NULL, 'c' },
1427 { "exec-delay", required_argument, NULL, 'e' },
1428 { "event-timeout", required_argument, NULL, 't' },
1429 { "resolve-names", required_argument, NULL, 'N' },
1430 { "help", no_argument, NULL, 'h' },
1431 { "version", no_argument, NULL, 'V' },
912541b0
KS
1432 {}
1433 };
689a97f5 1434
bba7a484 1435 int c;
689a97f5 1436
bba7a484
TG
1437 assert(argc >= 0);
1438 assert(argv);
912541b0 1439
e14b6f21 1440 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1441 int r;
912541b0 1442
bba7a484 1443 switch (c) {
912541b0 1444
912541b0 1445 case 'd':
bba7a484 1446 arg_daemonize = true;
912541b0
KS
1447 break;
1448 case 'c':
020328e1 1449 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1450 if (r < 0)
1451 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1452 break;
1453 case 'e':
6f5cf8a8
TG
1454 r = safe_atoi(optarg, &arg_exec_delay);
1455 if (r < 0)
1456 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1457 break;
9719859c 1458 case 't':
f1e8664e
TG
1459 r = safe_atou64(optarg, &arg_event_timeout_usec);
1460 if (r < 0)
65fea570 1461 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1462 else {
1463 arg_event_timeout_usec *= USEC_PER_SEC;
1464 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1465 }
9719859c 1466 break;
912541b0 1467 case 'D':
bba7a484 1468 arg_debug = true;
912541b0
KS
1469 break;
1470 case 'N':
090be865 1471 if (streq(optarg, "early")) {
bba7a484 1472 arg_resolve_names = 1;
090be865 1473 } else if (streq(optarg, "late")) {
bba7a484 1474 arg_resolve_names = 0;
090be865 1475 } else if (streq(optarg, "never")) {
bba7a484 1476 arg_resolve_names = -1;
912541b0 1477 } else {
9f6445e3 1478 log_error("resolve-names must be early, late or never");
bba7a484 1479 return 0;
912541b0
KS
1480 }
1481 break;
1482 case 'h':
ed216e1f 1483 help();
bba7a484 1484 return 0;
912541b0
KS
1485 case 'V':
1486 printf("%s\n", VERSION);
bba7a484
TG
1487 return 0;
1488 case '?':
1489 return -EINVAL;
912541b0 1490 default:
bba7a484
TG
1491 assert_not_reached("Unhandled option");
1492
912541b0
KS
1493 }
1494 }
1495
bba7a484
TG
1496 return 1;
1497}
1498
b7f74dd4 1499static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1500 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1501 int r, fd_worker, one = 1;
c0c6806b
TG
1502
1503 assert(ret);
11b1dd8c
TG
1504 assert(fd_ctrl >= 0);
1505 assert(fd_uevent >= 0);
c0c6806b
TG
1506
1507 manager = new0(Manager, 1);
1508 if (!manager)
1509 return log_oom();
1510
e237d8cb
TG
1511 manager->fd_inotify = -1;
1512 manager->worker_watch[WRITE_END] = -1;
1513 manager->worker_watch[READ_END] = -1;
1514
c0c6806b
TG
1515 manager->udev = udev_new();
1516 if (!manager->udev)
1517 return log_error_errno(errno, "could not allocate udev context: %m");
1518
b2d21d93
TG
1519 udev_builtin_init(manager->udev);
1520
ecb17862
TG
1521 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1522 if (!manager->rules)
1523 return log_error_errno(ENOMEM, "error reading rules");
1524
1525 udev_list_node_init(&manager->events);
1526 udev_list_init(manager->udev, &manager->properties, true);
1527
c26d1879
TG
1528 manager->cgroup = cgroup;
1529
f59118ec
TG
1530 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1531 if (!manager->ctrl)
1532 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1533
f59118ec
TG
1534 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1535 if (!manager->monitor)
1536 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1537
1538 /* unnamed socket from workers to the main daemon */
1539 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1540 if (r < 0)
1541 return log_error_errno(errno, "error creating socketpair: %m");
1542
693d371d 1543 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1544
693d371d 1545 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1546 if (r < 0)
1547 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1548
1549 manager->fd_inotify = udev_watch_init(manager->udev);
1550 if (manager->fd_inotify < 0)
1551 return log_error_errno(ENOMEM, "error initializing inotify");
1552
1553 udev_watch_restore(manager->udev);
1554
1555 /* block and listen to all signals on signalfd */
72c0a2c2 1556 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1557
49f997f3
TG
1558 r = sd_event_default(&manager->event);
1559 if (r < 0)
1560 return log_error_errno(errno, "could not allocate event loop: %m");
1561
693d371d
TG
1562 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1563 if (r < 0)
1564 return log_error_errno(r, "error creating sigint event source: %m");
1565
1566 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1567 if (r < 0)
1568 return log_error_errno(r, "error creating sigterm event source: %m");
1569
1570 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1571 if (r < 0)
1572 return log_error_errno(r, "error creating sighup event source: %m");
1573
1574 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1575 if (r < 0)
1576 return log_error_errno(r, "error creating sigchld event source: %m");
1577
1578 r = sd_event_set_watchdog(manager->event, true);
1579 if (r < 0)
1580 return log_error_errno(r, "error creating watchdog event source: %m");
1581
11b1dd8c 1582 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1583 if (r < 0)
1584 return log_error_errno(r, "error creating ctrl event source: %m");
1585
1586 /* This needs to be after the inotify and uevent handling, to make sure
1587 * that the ping is send back after fully processing the pending uevents
1588 * (including the synthetic ones we may create due to inotify events).
1589 */
1590 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1591 if (r < 0)
1592 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1593
1594 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1595 if (r < 0)
1596 return log_error_errno(r, "error creating inotify event source: %m");
1597
11b1dd8c 1598 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1599 if (r < 0)
1600 return log_error_errno(r, "error creating uevent event source: %m");
1601
1602 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1603 if (r < 0)
1604 return log_error_errno(r, "error creating worker event source: %m");
1605
1606 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1607 if (r < 0)
1608 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1609
11b1dd8c
TG
1610 *ret = manager;
1611 manager = NULL;
1612
86c3bece 1613 return 0;
c0c6806b
TG
1614}
1615
077fc5e2 1616static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1617 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1618 int r;
1619
1620 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1621 if (r < 0) {
1622 r = log_error_errno(r, "failed to allocate manager object: %m");
1623 goto exit;
1624 }
1625
1626 r = udev_rules_apply_static_dev_perms(manager->rules);
1627 if (r < 0)
1628 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1629
1630 (void) sd_notify(false,
1631 "READY=1\n"
1632 "STATUS=Processing...");
1633
1634 r = sd_event_loop(manager->event);
1635 if (r < 0) {
1636 log_error_errno(r, "event loop failed: %m");
1637 goto exit;
1638 }
1639
1640 sd_event_get_exit_code(manager->event, &r);
1641
1642exit:
1643 sd_notify(false,
1644 "STOPPING=1\n"
1645 "STATUS=Shutting down...");
1646 if (manager)
1647 udev_ctrl_cleanup(manager->ctrl);
1648 return r;
1649}
1650
1651int main(int argc, char *argv[]) {
c26d1879 1652 _cleanup_free_ char *cgroup = NULL;
b7f74dd4 1653 int r, fd_ctrl, fd_uevent;
bba7a484 1654
bba7a484
TG
1655 log_set_target(LOG_TARGET_AUTO);
1656 log_parse_environment();
1657 log_open();
1658
bba7a484
TG
1659 r = parse_argv(argc, argv);
1660 if (r <= 0)
1661 goto exit;
1662
614a823c
TG
1663 r = parse_proc_cmdline(parse_proc_cmdline_item);
1664 if (r < 0)
1665 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1666
78d3e041
KS
1667 if (arg_debug) {
1668 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1669 log_set_max_level(LOG_DEBUG);
78d3e041 1670 }
bba7a484 1671
912541b0 1672 if (getuid() != 0) {
6af5e6a4 1673 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1674 goto exit;
1675 }
1676
712cebf1
TG
1677 if (arg_children_max == 0) {
1678 cpu_set_t cpu_set;
ebc164ef 1679
712cebf1 1680 arg_children_max = 8;
d457ff83 1681
ece174c5 1682 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1683 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1684
712cebf1 1685 log_debug("set children_max to %u", arg_children_max);
d457ff83 1686 }
912541b0 1687
712cebf1
TG
1688 /* set umask before creating any file/directory */
1689 r = chdir("/");
1690 if (r < 0) {
1691 r = log_error_errno(errno, "could not change dir to /: %m");
1692 goto exit;
1693 }
194bbe33 1694
712cebf1 1695 umask(022);
912541b0 1696
712cebf1
TG
1697 r = mac_selinux_init("/dev");
1698 if (r < 0) {
1699 log_error_errno(r, "could not initialize labelling: %m");
1700 goto exit;
912541b0
KS
1701 }
1702
712cebf1
TG
1703 r = mkdir("/run/udev", 0755);
1704 if (r < 0 && errno != EEXIST) {
1705 r = log_error_errno(errno, "could not create /run/udev: %m");
1706 goto exit;
1707 }
1708
03cfe0d5 1709 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1710
c26d1879
TG
1711 if (getppid() == 1) {
1712 /* get our own cgroup, we regularly kill everything udev has left behind
1713 we only do this on systemd systems, and only if we are directly spawned
1714 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1715 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1716 if (r < 0) {
e859aa9e 1717 if (r == -ENOENT || r == -ENOEXEC)
11b9fb15
TG
1718 log_debug_errno(r, "did not find dedicated cgroup: %m");
1719 else
1720 log_warning_errno(r, "failed to get cgroup: %m");
1721 }
c26d1879
TG
1722 }
1723
b7f74dd4
TG
1724 r = listen_fds(&fd_ctrl, &fd_uevent);
1725 if (r < 0) {
1726 r = log_error_errno(r, "could not listen on fds: %m");
1727 goto exit;
1728 }
1729
bba7a484 1730 if (arg_daemonize) {
912541b0 1731 pid_t pid;
912541b0 1732
3cbb2057
TG
1733 log_info("starting version " VERSION);
1734
40e749b5
TG
1735 /* connect /dev/null to stdin, stdout, stderr */
1736 if (log_get_max_level() < LOG_DEBUG)
1737 (void) make_null_stdio();
1738
912541b0
KS
1739 pid = fork();
1740 switch (pid) {
1741 case 0:
1742 break;
1743 case -1:
6af5e6a4 1744 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1745 goto exit;
1746 default:
f53d1fcd
TG
1747 mac_selinux_finish();
1748 log_close();
1749 _exit(EXIT_SUCCESS);
912541b0
KS
1750 }
1751
1752 setsid();
1753
ad118bda 1754 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1755 }
912541b0 1756
077fc5e2 1757 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1758
53921bfa 1759exit:
cc56fafe 1760 mac_selinux_finish();
baa30fbc 1761 log_close();
6af5e6a4 1762 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1763}