]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev: add emacs header line
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
3f65d731
ZJS
1/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
7fafc032 3/*
1298001e 4 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 5 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
6 * Copyright (C) 2009 Canonical Ltd.
7 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 8 *
55e9959b
KS
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 2 of the License, or
12 * (at your option) any later version.
7fafc032 13 *
55e9959b
KS
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
7fafc032 18 *
55e9959b
KS
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
21 */
22
7fafc032 23#include <errno.h>
618234a5
LP
24#include <fcntl.h>
25#include <getopt.h>
26#include <signal.h>
27#include <stdbool.h>
28#include <stddef.h>
7fafc032
KS
29#include <stdio.h>
30#include <stdlib.h>
31#include <string.h>
618234a5 32#include <sys/epoll.h>
3ebdb81e 33#include <sys/file.h>
618234a5
LP
34#include <sys/inotify.h>
35#include <sys/ioctl.h>
36#include <sys/mount.h>
1e03b754 37#include <sys/prctl.h>
1e03b754 38#include <sys/signalfd.h>
618234a5 39#include <sys/socket.h>
dc117daa 40#include <sys/stat.h>
618234a5
LP
41#include <sys/time.h>
42#include <sys/wait.h>
43#include <unistd.h>
7fafc032 44
392ef7a2 45#include "sd-daemon.h"
693d371d 46#include "sd-event.h"
8314de1d 47
b5efdb8a 48#include "alloc-util.h"
194bbe33 49#include "cgroup-util.h"
618234a5 50#include "cpu-set-util.h"
5ba2dc25 51#include "dev-setup.h"
3ffd4af2 52#include "fd-util.h"
a5c32cff 53#include "fileio.h"
6482f626 54#include "formats-util.h"
f4f15635 55#include "fs-util.h"
a505965d 56#include "hashmap.h"
c004493c 57#include "io-util.h"
618234a5 58#include "netlink-util.h"
6bedfcbb 59#include "parse-util.h"
4e731273 60#include "proc-cmdline.h"
618234a5
LP
61#include "process-util.h"
62#include "selinux-util.h"
63#include "signal-util.h"
8f328d36 64#include "socket-util.h"
07630cea 65#include "string-util.h"
618234a5
LP
66#include "terminal-util.h"
67#include "udev-util.h"
68#include "udev.h"
ee104e11 69#include "user-util.h"
7fafc032 70
bba7a484
TG
71static bool arg_debug = false;
72static int arg_daemonize = false;
73static int arg_resolve_names = 1;
020328e1 74static unsigned arg_children_max;
bba7a484
TG
75static int arg_exec_delay;
76static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
77static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
78
79typedef struct Manager {
80 struct udev *udev;
693d371d 81 sd_event *event;
c0c6806b 82 Hashmap *workers;
ecb17862 83 struct udev_list_node events;
c26d1879 84 const char *cgroup;
cb49a4f2 85 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 86
ecb17862 87 struct udev_rules *rules;
c0c6806b
TG
88 struct udev_list properties;
89
90 struct udev_monitor *monitor;
91 struct udev_ctrl *ctrl;
92 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 93 int fd_inotify;
e237d8cb
TG
94 int worker_watch[2];
95
693d371d
TG
96 sd_event_source *ctrl_event;
97 sd_event_source *uevent_event;
98 sd_event_source *inotify_event;
99
7c4c7e89
TG
100 usec_t last_usec;
101
c0c6806b 102 bool stop_exec_queue:1;
c0c6806b
TG
103 bool exit:1;
104} Manager;
1e03b754 105
1e03b754 106enum event_state {
912541b0
KS
107 EVENT_UNDEF,
108 EVENT_QUEUED,
109 EVENT_RUNNING,
1e03b754
KS
110};
111
112struct event {
912541b0 113 struct udev_list_node node;
cb49a4f2 114 Manager *manager;
912541b0
KS
115 struct udev *udev;
116 struct udev_device *dev;
6969c349 117 struct udev_device *dev_kernel;
c6aa11f2 118 struct worker *worker;
912541b0 119 enum event_state state;
912541b0
KS
120 unsigned long long int delaying_seqnum;
121 unsigned long long int seqnum;
122 const char *devpath;
123 size_t devpath_len;
124 const char *devpath_old;
125 dev_t devnum;
912541b0 126 int ifindex;
ea6039a3 127 bool is_block;
693d371d
TG
128 sd_event_source *timeout_warning;
129 sd_event_source *timeout;
1e03b754
KS
130};
131
9ec6e95b 132static inline struct event *node_to_event(struct udev_list_node *node) {
b27ee00b 133 return container_of(node, struct event, node);
1e03b754
KS
134}
135
ecb17862 136static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 137
1e03b754 138enum worker_state {
912541b0
KS
139 WORKER_UNDEF,
140 WORKER_RUNNING,
141 WORKER_IDLE,
142 WORKER_KILLED,
1e03b754
KS
143};
144
145struct worker {
c0c6806b 146 Manager *manager;
912541b0 147 struct udev_list_node node;
912541b0
KS
148 int refcount;
149 pid_t pid;
150 struct udev_monitor *monitor;
151 enum worker_state state;
152 struct event *event;
1e03b754
KS
153};
154
155/* passed from worker to main process */
156struct worker_message {
1e03b754
KS
157};
158
c6aa11f2 159static void event_free(struct event *event) {
cb49a4f2
TG
160 int r;
161
c6aa11f2
TG
162 if (!event)
163 return;
164
912541b0 165 udev_list_node_remove(&event->node);
912541b0 166 udev_device_unref(event->dev);
6969c349 167 udev_device_unref(event->dev_kernel);
c6aa11f2 168
693d371d
TG
169 sd_event_source_unref(event->timeout_warning);
170 sd_event_source_unref(event->timeout);
171
c6aa11f2
TG
172 if (event->worker)
173 event->worker->event = NULL;
174
cb49a4f2
TG
175 assert(event->manager);
176
177 if (udev_list_node_is_empty(&event->manager->events)) {
178 /* only clean up the queue from the process that created it */
179 if (event->manager->pid == getpid()) {
180 r = unlink("/run/udev/queue");
181 if (r < 0)
182 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
183 }
184 }
185
912541b0 186 free(event);
aa8734ff 187}
7a770250 188
c6aa11f2
TG
189static void worker_free(struct worker *worker) {
190 if (!worker)
191 return;
bc113de9 192
c0c6806b
TG
193 assert(worker->manager);
194
4a0b58c4 195 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 196 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
197 event_free(worker->event);
198
c6aa11f2 199 free(worker);
ff2c503d
KS
200}
201
c0c6806b 202static void manager_workers_free(Manager *manager) {
a505965d
TG
203 struct worker *worker;
204 Iterator i;
ff2c503d 205
c0c6806b
TG
206 assert(manager);
207
208 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 209 worker_free(worker);
a505965d 210
c0c6806b 211 manager->workers = hashmap_free(manager->workers);
fc465079
KS
212}
213
c0c6806b 214static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
215 _cleanup_free_ struct worker *worker = NULL;
216 int r;
3a19b32a
TG
217
218 assert(ret);
c0c6806b 219 assert(manager);
3a19b32a
TG
220 assert(worker_monitor);
221 assert(pid > 1);
222
223 worker = new0(struct worker, 1);
224 if (!worker)
225 return -ENOMEM;
226
39c19cf1 227 worker->refcount = 1;
c0c6806b 228 worker->manager = manager;
3a19b32a
TG
229 /* close monitor, but keep address around */
230 udev_monitor_disconnect(worker_monitor);
231 worker->monitor = udev_monitor_ref(worker_monitor);
232 worker->pid = pid;
a505965d 233
c0c6806b 234 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
235 if (r < 0)
236 return r;
237
4a0b58c4 238 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
239 if (r < 0)
240 return r;
241
3a19b32a 242 *ret = worker;
a505965d 243 worker = NULL;
3a19b32a
TG
244
245 return 0;
246}
247
4fa4d885
TG
248static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
249 struct event *event = userdata;
250
251 assert(event);
252 assert(event->worker);
253
254 kill_and_sigcont(event->worker->pid, SIGKILL);
255 event->worker->state = WORKER_KILLED;
256
257 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
258
259 return 1;
260}
261
262static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
263 struct event *event = userdata;
264
265 assert(event);
266
267 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
268
269 return 1;
270}
271
39c19cf1 272static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
273 sd_event *e;
274 uint64_t usec;
693d371d 275
c6aa11f2 276 assert(worker);
693d371d 277 assert(worker->manager);
c6aa11f2
TG
278 assert(event);
279 assert(!event->worker);
280 assert(!worker->event);
281
39c19cf1 282 worker->state = WORKER_RUNNING;
39c19cf1
TG
283 worker->event = event;
284 event->state = EVENT_RUNNING;
c6aa11f2 285 event->worker = worker;
693d371d
TG
286
287 e = worker->manager->event;
288
38a03f06 289 assert_se(sd_event_now(e, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
290
291 (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
292 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
293
294 (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
295 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
296}
297
e237d8cb
TG
298static void manager_free(Manager *manager) {
299 if (!manager)
300 return;
301
b2d21d93
TG
302 udev_builtin_exit(manager->udev);
303
693d371d
TG
304 sd_event_source_unref(manager->ctrl_event);
305 sd_event_source_unref(manager->uevent_event);
306 sd_event_source_unref(manager->inotify_event);
307
e237d8cb 308 udev_unref(manager->udev);
693d371d 309 sd_event_unref(manager->event);
e237d8cb
TG
310 manager_workers_free(manager);
311 event_queue_cleanup(manager, EVENT_UNDEF);
312
313 udev_monitor_unref(manager->monitor);
314 udev_ctrl_unref(manager->ctrl);
315 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
316
317 udev_list_cleanup(&manager->properties);
318 udev_rules_unref(manager->rules);
e237d8cb 319
e237d8cb
TG
320 safe_close(manager->fd_inotify);
321 safe_close_pair(manager->worker_watch);
322
323 free(manager);
324}
325
326DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
327
9a73bd7c
TG
328static int worker_send_message(int fd) {
329 struct worker_message message = {};
330
331 return loop_write(fd, &message, sizeof(message), false);
332}
333
c0c6806b 334static void worker_spawn(Manager *manager, struct event *event) {
912541b0 335 struct udev *udev = event->udev;
3a19b32a 336 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 337 pid_t pid;
b6aab8ef 338 int r = 0;
912541b0
KS
339
340 /* listen for new events */
341 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
342 if (worker_monitor == NULL)
343 return;
344 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 345 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
346 r = udev_monitor_enable_receiving(worker_monitor);
347 if (r < 0)
348 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 349
912541b0
KS
350 pid = fork();
351 switch (pid) {
352 case 0: {
353 struct udev_device *dev = NULL;
4afd3348 354 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 355 int fd_monitor;
e237d8cb 356 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
357 struct epoll_event ep_signal = { .events = EPOLLIN };
358 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 359 sigset_t mask;
912541b0 360
43095991 361 /* take initial device from queue */
912541b0
KS
362 dev = event->dev;
363 event->dev = NULL;
364
39fd2ca1
TG
365 unsetenv("NOTIFY_SOCKET");
366
c0c6806b 367 manager_workers_free(manager);
ecb17862 368 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 369
e237d8cb 370 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 371 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 372 manager->ctrl = udev_ctrl_unref(manager->ctrl);
693d371d 373 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 374 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 375
693d371d
TG
376 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
377 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
378 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
379
380 manager->event = sd_event_unref(manager->event);
381
912541b0
KS
382 sigfillset(&mask);
383 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
384 if (fd_signal < 0) {
6af5e6a4 385 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
386 goto out;
387 }
2dd9f98d
TG
388 ep_signal.data.fd = fd_signal;
389
390 fd_monitor = udev_monitor_get_fd(worker_monitor);
391 ep_monitor.data.fd = fd_monitor;
912541b0
KS
392
393 fd_ep = epoll_create1(EPOLL_CLOEXEC);
394 if (fd_ep < 0) {
6af5e6a4 395 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
396 goto out;
397 }
398
912541b0
KS
399 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
400 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 401 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
402 goto out;
403 }
404
405 /* request TERM signal if parent exits */
406 prctl(PR_SET_PDEATHSIG, SIGTERM);
407
145dae7e 408 /* reset OOM score, we only protect the main daemon */
ad118bda 409 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 410
912541b0
KS
411 for (;;) {
412 struct udev_event *udev_event;
6af5e6a4 413 int fd_lock = -1;
912541b0 414
3b64e4d4
TG
415 assert(dev);
416
9f6445e3 417 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
418 udev_event = udev_event_new(dev);
419 if (udev_event == NULL) {
6af5e6a4 420 r = -ENOMEM;
912541b0
KS
421 goto out;
422 }
423
bba7a484
TG
424 if (arg_exec_delay > 0)
425 udev_event->exec_delay = arg_exec_delay;
912541b0 426
3ebdb81e 427 /*
2e5b17d0 428 * Take a shared lock on the device node; this establishes
3ebdb81e 429 * a concept of device "ownership" to serialize device
2e5b17d0 430 * access. External processes holding an exclusive lock will
3ebdb81e 431 * cause udev to skip the event handling; in the case udev
2e5b17d0 432 * acquired the lock, the external process can block until
3ebdb81e
KS
433 * udev has finished its event handling.
434 */
2e5b17d0
KS
435 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
436 streq_ptr("block", udev_device_get_subsystem(dev)) &&
437 !startswith(udev_device_get_sysname(dev), "dm-") &&
438 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
439 struct udev_device *d = dev;
440
441 if (streq_ptr("partition", udev_device_get_devtype(d)))
442 d = udev_device_get_parent(d);
443
444 if (d) {
445 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
446 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 447 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 448 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
449 goto skip;
450 }
451 }
452 }
453
4c83d994
TG
454 /* needed for renaming netifs */
455 udev_event->rtnl = rtnl;
456
912541b0 457 /* apply rules, create node, symlinks */
adeba500
KS
458 udev_event_execute_rules(udev_event,
459 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 460 &manager->properties,
8314de1d 461 manager->rules);
adeba500
KS
462
463 udev_event_execute_run(udev_event,
8314de1d 464 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 465
523c620b
TG
466 if (udev_event->rtnl)
467 /* in case rtnl was initialized */
1c4baffc 468 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 469
912541b0 470 /* apply/restore inotify watch */
bf9bead1 471 if (udev_event->inotify_watch) {
912541b0
KS
472 udev_watch_begin(udev, dev);
473 udev_device_update_db(dev);
474 }
475
3d06f418 476 safe_close(fd_lock);
3ebdb81e 477
912541b0
KS
478 /* send processed event back to libudev listeners */
479 udev_monitor_send_device(worker_monitor, NULL, dev);
480
3ebdb81e 481skip:
4914cb2d 482 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 483
912541b0 484 /* send udevd the result of the event execution */
e237d8cb 485 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 486 if (r < 0)
9a73bd7c 487 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 488 udev_device_get_seqnum(dev));
912541b0
KS
489
490 udev_device_unref(dev);
491 dev = NULL;
492
73814ca2 493 udev_event_unref(udev_event);
47e737dc 494
912541b0
KS
495 /* wait for more device messages from main udevd, or term signal */
496 while (dev == NULL) {
497 struct epoll_event ev[4];
498 int fdcount;
499 int i;
500
8fef0ff2 501 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
502 if (fdcount < 0) {
503 if (errno == EINTR)
504 continue;
6af5e6a4 505 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
506 goto out;
507 }
508
509 for (i = 0; i < fdcount; i++) {
510 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
511 dev = udev_monitor_receive_device(worker_monitor);
512 break;
513 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
514 struct signalfd_siginfo fdsi;
515 ssize_t size;
516
517 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
518 if (size != sizeof(struct signalfd_siginfo))
519 continue;
520 switch (fdsi.ssi_signo) {
521 case SIGTERM:
522 goto out;
523 }
524 }
525 }
526 }
527 }
82063a88 528out:
912541b0 529 udev_device_unref(dev);
e237d8cb 530 manager_free(manager);
baa30fbc 531 log_close();
8b46c3fc 532 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
533 }
534 case -1:
912541b0 535 event->state = EVENT_QUEUED;
56f64d95 536 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
537 break;
538 default:
e03c7cc2
TG
539 {
540 struct worker *worker;
541
c0c6806b 542 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 543 if (r < 0)
e03c7cc2 544 return;
e03c7cc2 545
39c19cf1
TG
546 worker_attach_event(worker, event);
547
1fa2f38f 548 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
549 break;
550 }
e03c7cc2 551 }
7fafc032
KS
552}
553
c0c6806b 554static void event_run(Manager *manager, struct event *event) {
a505965d
TG
555 struct worker *worker;
556 Iterator i;
912541b0 557
c0c6806b
TG
558 assert(manager);
559 assert(event);
560
561 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
562 ssize_t count;
563
564 if (worker->state != WORKER_IDLE)
565 continue;
566
c0c6806b 567 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 568 if (count < 0) {
1fa2f38f
ZJS
569 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
570 worker->pid, count);
912541b0
KS
571 kill(worker->pid, SIGKILL);
572 worker->state = WORKER_KILLED;
573 continue;
574 }
39c19cf1 575 worker_attach_event(worker, event);
912541b0
KS
576 return;
577 }
578
c0c6806b 579 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 580 if (arg_children_max > 1)
c0c6806b 581 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
582 return;
583 }
584
585 /* start new worker and pass initial device */
c0c6806b 586 worker_spawn(manager, event);
1e03b754
KS
587}
588
ecb17862 589static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 590 struct event *event;
cb49a4f2 591 int r;
912541b0 592
ecb17862
TG
593 assert(manager);
594 assert(dev);
595
040e6896
TG
596 /* only one process can add events to the queue */
597 if (manager->pid == 0)
598 manager->pid = getpid();
599
cb49a4f2
TG
600 assert(manager->pid == getpid());
601
955d98c9 602 event = new0(struct event, 1);
cb49a4f2
TG
603 if (!event)
604 return -ENOMEM;
912541b0
KS
605
606 event->udev = udev_device_get_udev(dev);
cb49a4f2 607 event->manager = manager;
912541b0 608 event->dev = dev;
6969c349
TG
609 event->dev_kernel = udev_device_shallow_clone(dev);
610 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
611 event->seqnum = udev_device_get_seqnum(dev);
612 event->devpath = udev_device_get_devpath(dev);
613 event->devpath_len = strlen(event->devpath);
614 event->devpath_old = udev_device_get_devpath_old(dev);
615 event->devnum = udev_device_get_devnum(dev);
ea6039a3 616 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
617 event->ifindex = udev_device_get_ifindex(dev);
618
9f6445e3 619 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
620 udev_device_get_action(dev), udev_device_get_subsystem(dev));
621
622 event->state = EVENT_QUEUED;
cb49a4f2
TG
623
624 if (udev_list_node_is_empty(&manager->events)) {
625 r = touch("/run/udev/queue");
626 if (r < 0)
627 log_warning_errno(r, "could not touch /run/udev/queue: %m");
628 }
629
ecb17862 630 udev_list_node_append(&event->node, &manager->events);
cb49a4f2 631
912541b0 632 return 0;
fc465079
KS
633}
634
c0c6806b 635static void manager_kill_workers(Manager *manager) {
a505965d
TG
636 struct worker *worker;
637 Iterator i;
1e03b754 638
c0c6806b
TG
639 assert(manager);
640
641 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
642 if (worker->state == WORKER_KILLED)
643 continue;
1e03b754 644
912541b0
KS
645 worker->state = WORKER_KILLED;
646 kill(worker->pid, SIGTERM);
647 }
1e03b754
KS
648}
649
e3196993 650/* lookup event for identical, parent, child device */
ecb17862 651static bool is_devpath_busy(Manager *manager, struct event *event) {
912541b0
KS
652 struct udev_list_node *loop;
653 size_t common;
654
655 /* check if queue contains events we depend on */
ecb17862 656 udev_list_node_foreach(loop, &manager->events) {
912541b0
KS
657 struct event *loop_event = node_to_event(loop);
658
659 /* we already found a later event, earlier can not block us, no need to check again */
660 if (loop_event->seqnum < event->delaying_seqnum)
661 continue;
662
663 /* event we checked earlier still exists, no need to check again */
664 if (loop_event->seqnum == event->delaying_seqnum)
665 return true;
666
667 /* found ourself, no later event can block us */
668 if (loop_event->seqnum >= event->seqnum)
669 break;
670
671 /* check major/minor */
672 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
673 return true;
674
675 /* check network device ifindex */
676 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
677 return true;
678
679 /* check our old name */
090be865 680 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
681 event->delaying_seqnum = loop_event->seqnum;
682 return true;
683 }
684
685 /* compare devpath */
686 common = MIN(loop_event->devpath_len, event->devpath_len);
687
688 /* one devpath is contained in the other? */
689 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
690 continue;
691
692 /* identical device event found */
693 if (loop_event->devpath_len == event->devpath_len) {
694 /* devices names might have changed/swapped in the meantime */
695 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
696 continue;
697 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
698 continue;
699 event->delaying_seqnum = loop_event->seqnum;
700 return true;
701 }
702
703 /* parent device event found */
704 if (event->devpath[common] == '/') {
705 event->delaying_seqnum = loop_event->seqnum;
706 return true;
707 }
708
709 /* child device event found */
710 if (loop_event->devpath[common] == '/') {
711 event->delaying_seqnum = loop_event->seqnum;
712 return true;
713 }
714
715 /* no matching device */
716 continue;
717 }
718
719 return false;
7fafc032
KS
720}
721
693d371d
TG
722static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
723 Manager *manager = userdata;
724
725 assert(manager);
726
727 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
728
729 sd_event_exit(manager->event, -ETIMEDOUT);
730
731 return 1;
732}
733
62d43dac 734static void manager_exit(Manager *manager) {
693d371d
TG
735 uint64_t usec;
736 int r;
62d43dac
TG
737
738 assert(manager);
739
740 manager->exit = true;
741
b79aacbf
TG
742 sd_notify(false,
743 "STOPPING=1\n"
744 "STATUS=Starting shutdown...");
745
62d43dac 746 /* close sources of new events and discard buffered events */
693d371d 747 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 748 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 749
693d371d 750 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 751 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 752
693d371d 753 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 754 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
755
756 /* discard queued events and kill workers */
757 event_queue_cleanup(manager, EVENT_QUEUED);
758 manager_kill_workers(manager);
693d371d 759
38a03f06 760 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
693d371d
TG
761
762 r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
763 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
764 if (r < 0)
765 return;
62d43dac
TG
766}
767
768/* reload requested, HUP signal received, rules changed, builtin changed */
769static void manager_reload(Manager *manager) {
770
771 assert(manager);
772
b79aacbf
TG
773 sd_notify(false,
774 "RELOADING=1\n"
775 "STATUS=Flushing configuration...");
776
62d43dac
TG
777 manager_kill_workers(manager);
778 manager->rules = udev_rules_unref(manager->rules);
779 udev_builtin_exit(manager->udev);
b79aacbf
TG
780
781 sd_notify(false,
782 "READY=1\n"
783 "STATUS=Processing...");
62d43dac
TG
784}
785
c0c6806b 786static void event_queue_start(Manager *manager) {
912541b0 787 struct udev_list_node *loop;
693d371d 788 usec_t usec;
8ab44e3f 789
c0c6806b
TG
790 assert(manager);
791
7c4c7e89
TG
792 if (udev_list_node_is_empty(&manager->events) ||
793 manager->exit || manager->stop_exec_queue)
794 return;
795
38a03f06
LP
796 assert_se(sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec) >= 0);
797 /* check for changed config, every 3 seconds at most */
798 if (manager->last_usec == 0 ||
799 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
800 if (udev_rules_check_timestamp(manager->rules) ||
801 udev_builtin_validate(manager->udev))
802 manager_reload(manager);
693d371d 803
38a03f06 804 manager->last_usec = usec;
7c4c7e89
TG
805 }
806
807 udev_builtin_init(manager->udev);
808
809 if (!manager->rules) {
810 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
811 if (!manager->rules)
812 return;
813 }
814
ecb17862 815 udev_list_node_foreach(loop, &manager->events) {
912541b0 816 struct event *event = node_to_event(loop);
0bc74ea7 817
912541b0
KS
818 if (event->state != EVENT_QUEUED)
819 continue;
0bc74ea7 820
912541b0 821 /* do not start event if parent or child event is still running */
ecb17862 822 if (is_devpath_busy(manager, event))
912541b0 823 continue;
fc465079 824
c0c6806b 825 event_run(manager, event);
912541b0 826 }
1e03b754
KS
827}
828
ecb17862 829static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
912541b0 830 struct udev_list_node *loop, *tmp;
ff2c503d 831
ecb17862 832 udev_list_node_foreach_safe(loop, tmp, &manager->events) {
912541b0 833 struct event *event = node_to_event(loop);
ff2c503d 834
912541b0
KS
835 if (match_type != EVENT_UNDEF && match_type != event->state)
836 continue;
ff2c503d 837
c6aa11f2 838 event_free(event);
912541b0 839 }
ff2c503d
KS
840}
841
e82e8fa5 842static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
843 Manager *manager = userdata;
844
845 assert(manager);
846
912541b0
KS
847 for (;;) {
848 struct worker_message msg;
979558f3
TG
849 struct iovec iovec = {
850 .iov_base = &msg,
851 .iov_len = sizeof(msg),
852 };
853 union {
854 struct cmsghdr cmsghdr;
855 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
856 } control = {};
857 struct msghdr msghdr = {
858 .msg_iov = &iovec,
859 .msg_iovlen = 1,
860 .msg_control = &control,
861 .msg_controllen = sizeof(control),
862 };
863 struct cmsghdr *cmsg;
912541b0 864 ssize_t size;
979558f3 865 struct ucred *ucred = NULL;
a505965d 866 struct worker *worker;
912541b0 867
e82e8fa5 868 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 869 if (size < 0) {
738a7907
TG
870 if (errno == EINTR)
871 continue;
872 else if (errno == EAGAIN)
873 /* nothing more to read */
874 break;
979558f3 875
e82e8fa5 876 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
877 } else if (size != sizeof(struct worker_message)) {
878 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 879 continue;
979558f3
TG
880 }
881
2a1288ff 882 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
883 if (cmsg->cmsg_level == SOL_SOCKET &&
884 cmsg->cmsg_type == SCM_CREDENTIALS &&
885 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
886 ucred = (struct ucred*) CMSG_DATA(cmsg);
887 }
888
889 if (!ucred || ucred->pid <= 0) {
890 log_warning_errno(EIO, "ignoring worker message without valid PID");
891 continue;
892 }
912541b0
KS
893
894 /* lookup worker who sent the signal */
4a0b58c4 895 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
896 if (!worker) {
897 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
898 continue;
912541b0 899 }
c0bbfd72 900
a505965d
TG
901 if (worker->state != WORKER_KILLED)
902 worker->state = WORKER_IDLE;
903
904 /* worker returned */
905 event_free(worker->event);
912541b0 906 }
e82e8fa5 907
8302fe5a
TG
908 /* we have free workers, try to schedule events */
909 event_queue_start(manager);
910
e82e8fa5
TG
911 return 1;
912}
913
914static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 915 Manager *manager = userdata;
e82e8fa5
TG
916 struct udev_device *dev;
917 int r;
918
c0c6806b 919 assert(manager);
e82e8fa5 920
c0c6806b 921 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
922 if (dev) {
923 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 924 r = event_queue_insert(manager, dev);
e82e8fa5
TG
925 if (r < 0)
926 udev_device_unref(dev);
8302fe5a
TG
927 else
928 /* we have fresh events, try to schedule them */
929 event_queue_start(manager);
e82e8fa5
TG
930 }
931
932 return 1;
88f4b648
KS
933}
934
3b47c739 935/* receive the udevd message from userspace */
e82e8fa5 936static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 937 Manager *manager = userdata;
e4f66b77
TG
938 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
939 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
940 const char *str;
941 int i;
942
c0c6806b 943 assert(manager);
e4f66b77 944
c0c6806b 945 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 946 if (!ctrl_conn)
e82e8fa5 947 return 1;
912541b0
KS
948
949 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 950 if (!ctrl_msg)
e82e8fa5 951 return 1;
912541b0
KS
952
953 i = udev_ctrl_get_set_log_level(ctrl_msg);
954 if (i >= 0) {
ed14edc0 955 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 956 log_set_max_level(i);
c0c6806b 957 manager_kill_workers(manager);
912541b0
KS
958 }
959
960 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 961 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 962 manager->stop_exec_queue = true;
912541b0
KS
963 }
964
965 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 966 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 967 manager->stop_exec_queue = false;
8302fe5a 968 event_queue_start(manager);
912541b0
KS
969 }
970
971 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 972 log_debug("udevd message (RELOAD) received");
62d43dac 973 manager_reload(manager);
912541b0
KS
974 }
975
976 str = udev_ctrl_get_set_env(ctrl_msg);
977 if (str != NULL) {
c0c6806b 978 _cleanup_free_ char *key = NULL;
912541b0
KS
979
980 key = strdup(str);
c0c6806b 981 if (key) {
912541b0
KS
982 char *val;
983
984 val = strchr(key, '=');
985 if (val != NULL) {
986 val[0] = '\0';
987 val = &val[1];
988 if (val[0] == '\0') {
9f6445e3 989 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 990 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 991 } else {
9f6445e3 992 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 993 udev_list_entry_add(&manager->properties, key, val);
912541b0 994 }
c0c6806b 995 } else
9f6445e3 996 log_error("wrong key format '%s'", key);
912541b0 997 }
c0c6806b 998 manager_kill_workers(manager);
912541b0
KS
999 }
1000
1001 i = udev_ctrl_get_set_children_max(ctrl_msg);
1002 if (i >= 0) {
9f6445e3 1003 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1004 arg_children_max = i;
912541b0
KS
1005 }
1006
cb49a4f2 1007 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1008 log_debug("udevd message (SYNC) received");
912541b0
KS
1009
1010 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1011 log_debug("udevd message (EXIT) received");
62d43dac 1012 manager_exit(manager);
c0c6806b
TG
1013 /* keep reference to block the client until we exit
1014 TODO: deal with several blocking exit requests */
1015 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1016 }
e4f66b77 1017
e82e8fa5 1018 return 1;
88f4b648 1019}
4a231017 1020
f3a740a5 1021static int synthesize_change(struct udev_device *dev) {
edd32000 1022 char filename[UTIL_PATH_SIZE];
f3a740a5 1023 int r;
edd32000 1024
f3a740a5 1025 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1026 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1027 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1028 bool part_table_read = false;
1029 bool has_partitions = false;
ede34445 1030 int fd;
f3a740a5
KS
1031 struct udev *udev = udev_device_get_udev(dev);
1032 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1033 struct udev_list_entry *item;
1034
ede34445 1035 /*
e9fc29f4
KS
1036 * Try to re-read the partition table. This only succeeds if
1037 * none of the devices is busy. The kernel returns 0 if no
1038 * partition table is found, and we will not get an event for
1039 * the disk.
ede34445 1040 */
02ba8fb3 1041 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1042 if (fd >= 0) {
02ba8fb3
KS
1043 r = flock(fd, LOCK_EX|LOCK_NB);
1044 if (r >= 0)
1045 r = ioctl(fd, BLKRRPART, 0);
1046
ede34445
KS
1047 close(fd);
1048 if (r >= 0)
e9fc29f4 1049 part_table_read = true;
ede34445
KS
1050 }
1051
e9fc29f4 1052 /* search for partitions */
f3a740a5
KS
1053 e = udev_enumerate_new(udev);
1054 if (!e)
1055 return -ENOMEM;
1056
1057 r = udev_enumerate_add_match_parent(e, dev);
1058 if (r < 0)
1059 return r;
1060
1061 r = udev_enumerate_add_match_subsystem(e, "block");
1062 if (r < 0)
1063 return r;
1064
1065 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1066 if (r < 0)
1067 return r;
e9fc29f4
KS
1068
1069 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1070 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1071
1072 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1073 if (!d)
1074 continue;
1075
1076 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1077 continue;
1078
1079 has_partitions = true;
1080 break;
1081 }
1082
1083 /*
1084 * We have partitions and re-read the table, the kernel already sent
1085 * out a "change" event for the disk, and "remove/add" for all
1086 * partitions.
1087 */
1088 if (part_table_read && has_partitions)
1089 return 0;
1090
1091 /*
1092 * We have partitions but re-reading the partition table did not
1093 * work, synthesize "change" for the disk and all partitions.
1094 */
1095 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1096 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1097 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1098
f3a740a5
KS
1099 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1100 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1101
1102 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1103 if (!d)
1104 continue;
1105
1106 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1107 continue;
1108
1109 log_debug("device %s closed, synthesising partition '%s' 'change'",
1110 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1111 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1112 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1113 }
ede34445
KS
1114
1115 return 0;
f3a740a5
KS
1116 }
1117
ede34445
KS
1118 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1119 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1120 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1121
f3a740a5 1122 return 0;
edd32000
KS
1123}
1124
e82e8fa5 1125static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1126 Manager *manager = userdata;
0254e944 1127 union inotify_event_buffer buffer;
f7c1ad4f
LP
1128 struct inotify_event *e;
1129 ssize_t l;
912541b0 1130
c0c6806b 1131 assert(manager);
e82e8fa5
TG
1132
1133 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f
LP
1134 if (l < 0) {
1135 if (errno == EAGAIN || errno == EINTR)
e82e8fa5 1136 return 1;
912541b0 1137
f7c1ad4f 1138 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1139 }
1140
f7c1ad4f 1141 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1142 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1143
c0c6806b 1144 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1145 if (!dev)
1146 continue;
912541b0 1147
f7c1ad4f 1148 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1149 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1150 synthesize_change(dev);
a8389097
TG
1151
1152 /* settle might be waiting on us to determine the queue
1153 * state. If we just handled an inotify event, we might have
1154 * generated a "change" event, but we won't have queued up
1155 * the resultant uevent yet. Do that.
1156 */
c0c6806b 1157 on_uevent(NULL, -1, 0, manager);
a8389097 1158 } else if (e->mask & IN_IGNORED)
c0c6806b 1159 udev_watch_end(manager->udev, dev);
912541b0
KS
1160 }
1161
e82e8fa5 1162 return 1;
bd284db1
SJR
1163}
1164
0561329d 1165static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1166 Manager *manager = userdata;
1167
1168 assert(manager);
1169
62d43dac 1170 manager_exit(manager);
912541b0 1171
e82e8fa5
TG
1172 return 1;
1173}
912541b0 1174
0561329d 1175static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1176 Manager *manager = userdata;
1177
1178 assert(manager);
1179
62d43dac 1180 manager_reload(manager);
912541b0 1181
e82e8fa5
TG
1182 return 1;
1183}
912541b0 1184
e82e8fa5 1185static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1186 Manager *manager = userdata;
1187
1188 assert(manager);
1189
e82e8fa5
TG
1190 for (;;) {
1191 pid_t pid;
1192 int status;
1193 struct worker *worker;
d1317d02 1194
e82e8fa5
TG
1195 pid = waitpid(-1, &status, WNOHANG);
1196 if (pid <= 0)
f29328d6 1197 break;
e82e8fa5 1198
4a0b58c4 1199 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1200 if (!worker) {
1201 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1202 continue;
912541b0 1203 }
e82e8fa5
TG
1204
1205 if (WIFEXITED(status)) {
1206 if (WEXITSTATUS(status) == 0)
1207 log_debug("worker ["PID_FMT"] exited", pid);
1208 else
1209 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1210 } else if (WIFSIGNALED(status)) {
1211 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), strsignal(WTERMSIG(status)));
1212 } else if (WIFSTOPPED(status)) {
1213 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1214 continue;
e82e8fa5
TG
1215 } else if (WIFCONTINUED(status)) {
1216 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1217 continue;
e82e8fa5
TG
1218 } else
1219 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1220
1221 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1222 if (worker->event) {
1223 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1224 /* delete state from disk */
1225 udev_device_delete_db(worker->event->dev);
1226 udev_device_tag_index(worker->event->dev, NULL, false);
1227 /* forward kernel event without amending it */
c0c6806b 1228 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1229 }
1230 }
1231
1232 worker_free(worker);
912541b0 1233 }
e82e8fa5 1234
8302fe5a
TG
1235 /* we can start new workers, try to schedule events */
1236 event_queue_start(manager);
1237
e82e8fa5 1238 return 1;
f27125f9 1239}
1240
693d371d
TG
1241static int on_post(sd_event_source *s, void *userdata) {
1242 Manager *manager = userdata;
1243 int r;
1244
1245 assert(manager);
1246
1247 if (udev_list_node_is_empty(&manager->events)) {
1248 /* no pending events */
1249 if (!hashmap_isempty(manager->workers)) {
1250 /* there are idle workers */
1251 log_debug("cleanup idle workers");
1252 manager_kill_workers(manager);
1253 } else {
1254 /* we are idle */
1255 if (manager->exit) {
1256 r = sd_event_exit(manager->event, 0);
1257 if (r < 0)
1258 return r;
1259 } else if (manager->cgroup)
1260 /* cleanup possible left-over processes in our cgroup */
1261 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
1262 }
1263 }
1264
1265 return 1;
1266}
1267
fcff1e72 1268static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1269 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1270 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1271 int fd, n, r;
912541b0 1272
fcff1e72
TG
1273 assert(rctrl);
1274 assert(rnetlink);
1275
912541b0 1276 n = sd_listen_fds(true);
fcff1e72
TG
1277 if (n < 0)
1278 return n;
912541b0
KS
1279
1280 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1281 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1282 if (ctrl_fd >= 0)
1283 return -EINVAL;
1284 ctrl_fd = fd;
912541b0
KS
1285 continue;
1286 }
1287
1288 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1289 if (netlink_fd >= 0)
1290 return -EINVAL;
1291 netlink_fd = fd;
912541b0
KS
1292 continue;
1293 }
1294
fcff1e72 1295 return -EINVAL;
912541b0
KS
1296 }
1297
f59118ec
TG
1298 if (ctrl_fd < 0) {
1299 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1300
1301 udev = udev_new();
1302 if (!udev)
1303 return -ENOMEM;
1304
1305 ctrl = udev_ctrl_new(udev);
1306 if (!ctrl)
1307 return log_error_errno(EINVAL, "error initializing udev control socket");
1308
1309 r = udev_ctrl_enable_receiving(ctrl);
1310 if (r < 0)
1311 return log_error_errno(EINVAL, "error binding udev control socket");
1312
1313 fd = udev_ctrl_get_fd(ctrl);
1314 if (fd < 0)
1315 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1316
f59118ec
TG
1317 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1318 if (ctrl_fd < 0)
1319 return log_error_errno(errno, "could not dup ctrl fd: %m");
1320 }
1321
1322 if (netlink_fd < 0) {
1323 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1324
1325 if (!udev) {
1326 udev = udev_new();
1327 if (!udev)
1328 return -ENOMEM;
1329 }
1330
1331 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1332 if (!monitor)
1333 return log_error_errno(EINVAL, "error initializing netlink socket");
1334
1335 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1336
1337 r = udev_monitor_enable_receiving(monitor);
1338 if (r < 0)
1339 return log_error_errno(EINVAL, "error binding netlink socket");
1340
1341 fd = udev_monitor_get_fd(monitor);
1342 if (fd < 0)
1343 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1344
1345 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1346 if (ctrl_fd < 0)
1347 return log_error_errno(errno, "could not dup netlink fd: %m");
1348 }
fcff1e72
TG
1349
1350 *rctrl = ctrl_fd;
1351 *rnetlink = netlink_fd;
912541b0 1352
912541b0 1353 return 0;
7459bcdc
KS
1354}
1355
e6f86cac 1356/*
3f85ef0f 1357 * read the kernel command line, in case we need to get into debug mode
614a823c
TG
1358 * udev.log-priority=<level> syslog priority
1359 * udev.children-max=<number of workers> events are fully serialized if set to 1
1360 * udev.exec-delay=<number of seconds> delay execution of every executed program
1361 * udev.event-timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1362 */
614a823c 1363static int parse_proc_cmdline_item(const char *key, const char *value) {
3567afa5 1364 const char *full_key = key;
74df0fca 1365 int r;
e6f86cac 1366
614a823c 1367 assert(key);
e6f86cac 1368
614a823c
TG
1369 if (!value)
1370 return 0;
e6f86cac 1371
614a823c
TG
1372 if (startswith(key, "rd."))
1373 key += strlen("rd.");
e6f86cac 1374
614a823c
TG
1375 if (startswith(key, "udev."))
1376 key += strlen("udev.");
1377 else
1378 return 0;
e6f86cac 1379
614a823c
TG
1380 if (streq(key, "log-priority")) {
1381 int prio;
e6f86cac 1382
614a823c 1383 prio = util_log_priority(value);
e00f5bdd 1384 if (prio < 0)
3567afa5
MS
1385 goto invalid;
1386 log_set_max_level(prio);
614a823c 1387 } else if (streq(key, "children-max")) {
020328e1 1388 r = safe_atou(value, &arg_children_max);
614a823c 1389 if (r < 0)
3567afa5 1390 goto invalid;
614a823c
TG
1391 } else if (streq(key, "exec-delay")) {
1392 r = safe_atoi(value, &arg_exec_delay);
1393 if (r < 0)
3567afa5 1394 goto invalid;
614a823c
TG
1395 } else if (streq(key, "event-timeout")) {
1396 r = safe_atou64(value, &arg_event_timeout_usec);
1397 if (r < 0)
3567afa5
MS
1398 goto invalid;
1399 arg_event_timeout_usec *= USEC_PER_SEC;
1400 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
e6f86cac 1401 }
614a823c 1402
3567afa5
MS
1403 return 0;
1404invalid:
1405 log_warning("invalid %s ignored: %s", full_key, value);
614a823c 1406 return 0;
e6f86cac
KS
1407}
1408
ed216e1f
TG
1409static void help(void) {
1410 printf("%s [OPTIONS...]\n\n"
1411 "Manages devices.\n\n"
5ac0162c
LP
1412 " -h --help Print this message\n"
1413 " --version Print version of the program\n"
1414 " --daemon Detach and run in the background\n"
1415 " --debug Enable debug output\n"
1416 " --children-max=INT Set maximum number of workers\n"
1417 " --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1418 " --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1419 " --resolve-names=early|late|never\n"
1420 " When to resolve users and groups\n"
ed216e1f
TG
1421 , program_invocation_short_name);
1422}
1423
bba7a484 1424static int parse_argv(int argc, char *argv[]) {
912541b0 1425 static const struct option options[] = {
bba7a484
TG
1426 { "daemon", no_argument, NULL, 'd' },
1427 { "debug", no_argument, NULL, 'D' },
1428 { "children-max", required_argument, NULL, 'c' },
1429 { "exec-delay", required_argument, NULL, 'e' },
1430 { "event-timeout", required_argument, NULL, 't' },
1431 { "resolve-names", required_argument, NULL, 'N' },
1432 { "help", no_argument, NULL, 'h' },
1433 { "version", no_argument, NULL, 'V' },
912541b0
KS
1434 {}
1435 };
689a97f5 1436
bba7a484 1437 int c;
689a97f5 1438
bba7a484
TG
1439 assert(argc >= 0);
1440 assert(argv);
912541b0 1441
e14b6f21 1442 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1443 int r;
912541b0 1444
bba7a484 1445 switch (c) {
912541b0 1446
912541b0 1447 case 'd':
bba7a484 1448 arg_daemonize = true;
912541b0
KS
1449 break;
1450 case 'c':
020328e1 1451 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1452 if (r < 0)
1453 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1454 break;
1455 case 'e':
6f5cf8a8
TG
1456 r = safe_atoi(optarg, &arg_exec_delay);
1457 if (r < 0)
1458 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1459 break;
9719859c 1460 case 't':
f1e8664e
TG
1461 r = safe_atou64(optarg, &arg_event_timeout_usec);
1462 if (r < 0)
65fea570 1463 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1464 else {
1465 arg_event_timeout_usec *= USEC_PER_SEC;
1466 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1467 }
9719859c 1468 break;
912541b0 1469 case 'D':
bba7a484 1470 arg_debug = true;
912541b0
KS
1471 break;
1472 case 'N':
090be865 1473 if (streq(optarg, "early")) {
bba7a484 1474 arg_resolve_names = 1;
090be865 1475 } else if (streq(optarg, "late")) {
bba7a484 1476 arg_resolve_names = 0;
090be865 1477 } else if (streq(optarg, "never")) {
bba7a484 1478 arg_resolve_names = -1;
912541b0 1479 } else {
9f6445e3 1480 log_error("resolve-names must be early, late or never");
bba7a484 1481 return 0;
912541b0
KS
1482 }
1483 break;
1484 case 'h':
ed216e1f 1485 help();
bba7a484 1486 return 0;
912541b0
KS
1487 case 'V':
1488 printf("%s\n", VERSION);
bba7a484
TG
1489 return 0;
1490 case '?':
1491 return -EINVAL;
912541b0 1492 default:
bba7a484
TG
1493 assert_not_reached("Unhandled option");
1494
912541b0
KS
1495 }
1496 }
1497
bba7a484
TG
1498 return 1;
1499}
1500
b7f74dd4 1501static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1502 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1503 int r, fd_worker, one = 1;
c0c6806b
TG
1504
1505 assert(ret);
11b1dd8c
TG
1506 assert(fd_ctrl >= 0);
1507 assert(fd_uevent >= 0);
c0c6806b
TG
1508
1509 manager = new0(Manager, 1);
1510 if (!manager)
1511 return log_oom();
1512
e237d8cb
TG
1513 manager->fd_inotify = -1;
1514 manager->worker_watch[WRITE_END] = -1;
1515 manager->worker_watch[READ_END] = -1;
1516
c0c6806b
TG
1517 manager->udev = udev_new();
1518 if (!manager->udev)
1519 return log_error_errno(errno, "could not allocate udev context: %m");
1520
b2d21d93
TG
1521 udev_builtin_init(manager->udev);
1522
ecb17862
TG
1523 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1524 if (!manager->rules)
1525 return log_error_errno(ENOMEM, "error reading rules");
1526
1527 udev_list_node_init(&manager->events);
1528 udev_list_init(manager->udev, &manager->properties, true);
1529
c26d1879
TG
1530 manager->cgroup = cgroup;
1531
f59118ec
TG
1532 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1533 if (!manager->ctrl)
1534 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1535
f59118ec
TG
1536 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1537 if (!manager->monitor)
1538 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1539
1540 /* unnamed socket from workers to the main daemon */
1541 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1542 if (r < 0)
1543 return log_error_errno(errno, "error creating socketpair: %m");
1544
693d371d 1545 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1546
693d371d 1547 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1548 if (r < 0)
1549 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1550
1551 manager->fd_inotify = udev_watch_init(manager->udev);
1552 if (manager->fd_inotify < 0)
1553 return log_error_errno(ENOMEM, "error initializing inotify");
1554
1555 udev_watch_restore(manager->udev);
1556
1557 /* block and listen to all signals on signalfd */
72c0a2c2 1558 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1559
49f997f3
TG
1560 r = sd_event_default(&manager->event);
1561 if (r < 0)
709f6e46 1562 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1563
693d371d
TG
1564 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1565 if (r < 0)
1566 return log_error_errno(r, "error creating sigint event source: %m");
1567
1568 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1569 if (r < 0)
1570 return log_error_errno(r, "error creating sigterm event source: %m");
1571
1572 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1573 if (r < 0)
1574 return log_error_errno(r, "error creating sighup event source: %m");
1575
1576 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1577 if (r < 0)
1578 return log_error_errno(r, "error creating sigchld event source: %m");
1579
1580 r = sd_event_set_watchdog(manager->event, true);
1581 if (r < 0)
1582 return log_error_errno(r, "error creating watchdog event source: %m");
1583
11b1dd8c 1584 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1585 if (r < 0)
1586 return log_error_errno(r, "error creating ctrl event source: %m");
1587
1588 /* This needs to be after the inotify and uevent handling, to make sure
1589 * that the ping is send back after fully processing the pending uevents
1590 * (including the synthetic ones we may create due to inotify events).
1591 */
1592 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1593 if (r < 0)
1594 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1595
1596 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1597 if (r < 0)
1598 return log_error_errno(r, "error creating inotify event source: %m");
1599
11b1dd8c 1600 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1601 if (r < 0)
1602 return log_error_errno(r, "error creating uevent event source: %m");
1603
1604 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1605 if (r < 0)
1606 return log_error_errno(r, "error creating worker event source: %m");
1607
1608 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1609 if (r < 0)
1610 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1611
11b1dd8c
TG
1612 *ret = manager;
1613 manager = NULL;
1614
86c3bece 1615 return 0;
c0c6806b
TG
1616}
1617
077fc5e2 1618static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1619 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1620 int r;
1621
1622 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1623 if (r < 0) {
1624 r = log_error_errno(r, "failed to allocate manager object: %m");
1625 goto exit;
1626 }
1627
1628 r = udev_rules_apply_static_dev_perms(manager->rules);
1629 if (r < 0)
1630 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1631
1632 (void) sd_notify(false,
1633 "READY=1\n"
1634 "STATUS=Processing...");
1635
1636 r = sd_event_loop(manager->event);
1637 if (r < 0) {
1638 log_error_errno(r, "event loop failed: %m");
1639 goto exit;
1640 }
1641
1642 sd_event_get_exit_code(manager->event, &r);
1643
1644exit:
1645 sd_notify(false,
1646 "STOPPING=1\n"
1647 "STATUS=Shutting down...");
1648 if (manager)
1649 udev_ctrl_cleanup(manager->ctrl);
1650 return r;
1651}
1652
1653int main(int argc, char *argv[]) {
c26d1879 1654 _cleanup_free_ char *cgroup = NULL;
b7f74dd4 1655 int r, fd_ctrl, fd_uevent;
bba7a484 1656
bba7a484
TG
1657 log_set_target(LOG_TARGET_AUTO);
1658 log_parse_environment();
1659 log_open();
1660
bba7a484
TG
1661 r = parse_argv(argc, argv);
1662 if (r <= 0)
1663 goto exit;
1664
614a823c
TG
1665 r = parse_proc_cmdline(parse_proc_cmdline_item);
1666 if (r < 0)
1667 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1668
78d3e041
KS
1669 if (arg_debug) {
1670 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1671 log_set_max_level(LOG_DEBUG);
78d3e041 1672 }
bba7a484 1673
912541b0 1674 if (getuid() != 0) {
6af5e6a4 1675 r = log_error_errno(EPERM, "root privileges required");
912541b0
KS
1676 goto exit;
1677 }
1678
712cebf1
TG
1679 if (arg_children_max == 0) {
1680 cpu_set_t cpu_set;
ebc164ef 1681
712cebf1 1682 arg_children_max = 8;
d457ff83 1683
ece174c5 1684 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1685 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1686
712cebf1 1687 log_debug("set children_max to %u", arg_children_max);
d457ff83 1688 }
912541b0 1689
712cebf1
TG
1690 /* set umask before creating any file/directory */
1691 r = chdir("/");
1692 if (r < 0) {
1693 r = log_error_errno(errno, "could not change dir to /: %m");
1694 goto exit;
1695 }
194bbe33 1696
712cebf1 1697 umask(022);
912541b0 1698
712cebf1
TG
1699 r = mac_selinux_init("/dev");
1700 if (r < 0) {
1701 log_error_errno(r, "could not initialize labelling: %m");
1702 goto exit;
912541b0
KS
1703 }
1704
712cebf1
TG
1705 r = mkdir("/run/udev", 0755);
1706 if (r < 0 && errno != EEXIST) {
1707 r = log_error_errno(errno, "could not create /run/udev: %m");
1708 goto exit;
1709 }
1710
03cfe0d5 1711 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1712
c26d1879
TG
1713 if (getppid() == 1) {
1714 /* get our own cgroup, we regularly kill everything udev has left behind
1715 we only do this on systemd systems, and only if we are directly spawned
1716 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1717 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1718 if (r < 0) {
e859aa9e 1719 if (r == -ENOENT || r == -ENOEXEC)
11b9fb15
TG
1720 log_debug_errno(r, "did not find dedicated cgroup: %m");
1721 else
1722 log_warning_errno(r, "failed to get cgroup: %m");
1723 }
c26d1879
TG
1724 }
1725
b7f74dd4
TG
1726 r = listen_fds(&fd_ctrl, &fd_uevent);
1727 if (r < 0) {
1728 r = log_error_errno(r, "could not listen on fds: %m");
1729 goto exit;
1730 }
1731
bba7a484 1732 if (arg_daemonize) {
912541b0 1733 pid_t pid;
912541b0 1734
3cbb2057
TG
1735 log_info("starting version " VERSION);
1736
40e749b5
TG
1737 /* connect /dev/null to stdin, stdout, stderr */
1738 if (log_get_max_level() < LOG_DEBUG)
1739 (void) make_null_stdio();
1740
912541b0
KS
1741 pid = fork();
1742 switch (pid) {
1743 case 0:
1744 break;
1745 case -1:
6af5e6a4 1746 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1747 goto exit;
1748 default:
f53d1fcd
TG
1749 mac_selinux_finish();
1750 log_close();
1751 _exit(EXIT_SUCCESS);
912541b0
KS
1752 }
1753
1754 setsid();
1755
ad118bda 1756 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1757 }
912541b0 1758
077fc5e2 1759 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1760
53921bfa 1761exit:
cc56fafe 1762 mac_selinux_finish();
baa30fbc 1763 log_close();
6af5e6a4 1764 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1765}