]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev-ctrl: move prototypes of udev_ctrl_*() to udev-ctrl.h
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
7d68eb1b 55#include "udev-ctrl.h"
618234a5 56#include "udev-util.h"
70068602 57#include "udev-watch.h"
618234a5 58#include "udev.h"
ee104e11 59#include "user-util.h"
7fafc032 60
bba7a484
TG
61static bool arg_debug = false;
62static int arg_daemonize = false;
63static int arg_resolve_names = 1;
020328e1 64static unsigned arg_children_max;
bba7a484
TG
65static int arg_exec_delay;
66static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
67static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
68
69typedef struct Manager {
693d371d 70 sd_event *event;
c0c6806b 71 Hashmap *workers;
40a57716 72 LIST_HEAD(struct event, events);
c26d1879 73 const char *cgroup;
cb49a4f2 74 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 75
ecb17862 76 struct udev_rules *rules;
c0c6806b
TG
77 struct udev_list properties;
78
79 struct udev_monitor *monitor;
80 struct udev_ctrl *ctrl;
81 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 82 int fd_inotify;
e237d8cb
TG
83 int worker_watch[2];
84
693d371d
TG
85 sd_event_source *ctrl_event;
86 sd_event_source *uevent_event;
87 sd_event_source *inotify_event;
88
7c4c7e89
TG
89 usec_t last_usec;
90
c0c6806b 91 bool stop_exec_queue:1;
c0c6806b
TG
92 bool exit:1;
93} Manager;
1e03b754 94
1e03b754 95enum event_state {
912541b0
KS
96 EVENT_UNDEF,
97 EVENT_QUEUED,
98 EVENT_RUNNING,
1e03b754
KS
99};
100
101struct event {
40a57716 102 LIST_FIELDS(struct event, event);
cb49a4f2 103 Manager *manager;
912541b0 104 struct udev_device *dev;
6969c349 105 struct udev_device *dev_kernel;
c6aa11f2 106 struct worker *worker;
912541b0 107 enum event_state state;
912541b0
KS
108 unsigned long long int delaying_seqnum;
109 unsigned long long int seqnum;
110 const char *devpath;
111 size_t devpath_len;
112 const char *devpath_old;
113 dev_t devnum;
912541b0 114 int ifindex;
ea6039a3 115 bool is_block;
693d371d
TG
116 sd_event_source *timeout_warning;
117 sd_event_source *timeout;
1e03b754
KS
118};
119
ecb17862 120static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 121
1e03b754 122enum worker_state {
912541b0
KS
123 WORKER_UNDEF,
124 WORKER_RUNNING,
125 WORKER_IDLE,
126 WORKER_KILLED,
1e03b754
KS
127};
128
129struct worker {
c0c6806b 130 Manager *manager;
912541b0
KS
131 pid_t pid;
132 struct udev_monitor *monitor;
133 enum worker_state state;
134 struct event *event;
1e03b754
KS
135};
136
137/* passed from worker to main process */
138struct worker_message {
1e03b754
KS
139};
140
c6aa11f2 141static void event_free(struct event *event) {
cb49a4f2
TG
142 int r;
143
c6aa11f2
TG
144 if (!event)
145 return;
40a57716 146 assert(event->manager);
c6aa11f2 147
40a57716 148 LIST_REMOVE(event, event->manager->events, event);
912541b0 149 udev_device_unref(event->dev);
6969c349 150 udev_device_unref(event->dev_kernel);
c6aa11f2 151
693d371d
TG
152 sd_event_source_unref(event->timeout_warning);
153 sd_event_source_unref(event->timeout);
154
c6aa11f2
TG
155 if (event->worker)
156 event->worker->event = NULL;
157
40a57716 158 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 159 /* only clean up the queue from the process that created it */
df0ff127 160 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
161 r = unlink("/run/udev/queue");
162 if (r < 0)
163 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
164 }
165 }
166
912541b0 167 free(event);
aa8734ff 168}
7a770250 169
c6aa11f2
TG
170static void worker_free(struct worker *worker) {
171 if (!worker)
172 return;
bc113de9 173
c0c6806b
TG
174 assert(worker->manager);
175
4a0b58c4 176 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 177 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
178 event_free(worker->event);
179
c6aa11f2 180 free(worker);
ff2c503d
KS
181}
182
c0c6806b 183static void manager_workers_free(Manager *manager) {
a505965d
TG
184 struct worker *worker;
185 Iterator i;
ff2c503d 186
c0c6806b
TG
187 assert(manager);
188
189 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 190 worker_free(worker);
a505965d 191
c0c6806b 192 manager->workers = hashmap_free(manager->workers);
fc465079
KS
193}
194
c0c6806b 195static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
196 _cleanup_free_ struct worker *worker = NULL;
197 int r;
3a19b32a
TG
198
199 assert(ret);
c0c6806b 200 assert(manager);
3a19b32a
TG
201 assert(worker_monitor);
202 assert(pid > 1);
203
204 worker = new0(struct worker, 1);
205 if (!worker)
206 return -ENOMEM;
207
c0c6806b 208 worker->manager = manager;
3a19b32a
TG
209 /* close monitor, but keep address around */
210 udev_monitor_disconnect(worker_monitor);
211 worker->monitor = udev_monitor_ref(worker_monitor);
212 worker->pid = pid;
a505965d 213
c0c6806b 214 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
215 if (r < 0)
216 return r;
217
4a0b58c4 218 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
219 if (r < 0)
220 return r;
221
ae2a15bc 222 *ret = TAKE_PTR(worker);
3a19b32a
TG
223
224 return 0;
225}
226
4fa4d885
TG
227static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
228 struct event *event = userdata;
229
230 assert(event);
231 assert(event->worker);
232
233 kill_and_sigcont(event->worker->pid, SIGKILL);
234 event->worker->state = WORKER_KILLED;
235
236 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
237
238 return 1;
239}
240
241static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
242 struct event *event = userdata;
243
244 assert(event);
245
246 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
247
248 return 1;
249}
250
39c19cf1 251static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
252 sd_event *e;
253 uint64_t usec;
693d371d 254
c6aa11f2 255 assert(worker);
693d371d 256 assert(worker->manager);
c6aa11f2
TG
257 assert(event);
258 assert(!event->worker);
259 assert(!worker->event);
260
39c19cf1 261 worker->state = WORKER_RUNNING;
39c19cf1
TG
262 worker->event = event;
263 event->state = EVENT_RUNNING;
c6aa11f2 264 event->worker = worker;
693d371d
TG
265
266 e = worker->manager->event;
267
3285baa8 268 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 269
3285baa8 270 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
271 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
272
3285baa8 273 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 274 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
275}
276
e237d8cb
TG
277static void manager_free(Manager *manager) {
278 if (!manager)
279 return;
280
2024ed61 281 udev_builtin_exit();
b2d21d93 282
693d371d
TG
283 sd_event_source_unref(manager->ctrl_event);
284 sd_event_source_unref(manager->uevent_event);
285 sd_event_source_unref(manager->inotify_event);
286
693d371d 287 sd_event_unref(manager->event);
e237d8cb
TG
288 manager_workers_free(manager);
289 event_queue_cleanup(manager, EVENT_UNDEF);
290
291 udev_monitor_unref(manager->monitor);
292 udev_ctrl_unref(manager->ctrl);
293 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
294
295 udev_list_cleanup(&manager->properties);
296 udev_rules_unref(manager->rules);
e237d8cb 297
e237d8cb
TG
298 safe_close(manager->fd_inotify);
299 safe_close_pair(manager->worker_watch);
300
301 free(manager);
302}
303
304DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
305
9a73bd7c
TG
306static int worker_send_message(int fd) {
307 struct worker_message message = {};
308
309 return loop_write(fd, &message, sizeof(message), false);
310}
311
fee854ee
RK
312static bool shall_lock_device(struct udev_device *dev) {
313 const char *sysname;
314
315 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
316 return false;
317
318 sysname = udev_device_get_sysname(dev);
319 return !startswith(sysname, "dm-") &&
320 !startswith(sysname, "md") &&
321 !startswith(sysname, "drbd");
322}
323
c0c6806b 324static void worker_spawn(Manager *manager, struct event *event) {
8e766630 325 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 326 pid_t pid;
b6aab8ef 327 int r = 0;
912541b0
KS
328
329 /* listen for new events */
2024ed61 330 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
331 if (worker_monitor == NULL)
332 return;
333 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 334 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
335 r = udev_monitor_enable_receiving(worker_monitor);
336 if (r < 0)
337 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 338
912541b0
KS
339 pid = fork();
340 switch (pid) {
341 case 0: {
342 struct udev_device *dev = NULL;
4afd3348 343 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 344 int fd_monitor;
e237d8cb 345 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
346 struct epoll_event ep_signal = { .events = EPOLLIN };
347 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 348 sigset_t mask;
912541b0 349
43095991 350 /* take initial device from queue */
1cc6c93a 351 dev = TAKE_PTR(event->dev);
912541b0 352
39fd2ca1
TG
353 unsetenv("NOTIFY_SOCKET");
354
c0c6806b 355 manager_workers_free(manager);
ecb17862 356 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 357
e237d8cb 358 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 359 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 360 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 361 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 362
693d371d
TG
363 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
364 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
365 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
366
367 manager->event = sd_event_unref(manager->event);
368
912541b0
KS
369 sigfillset(&mask);
370 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
371 if (fd_signal < 0) {
6af5e6a4 372 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
373 goto out;
374 }
2dd9f98d
TG
375 ep_signal.data.fd = fd_signal;
376
377 fd_monitor = udev_monitor_get_fd(worker_monitor);
378 ep_monitor.data.fd = fd_monitor;
912541b0
KS
379
380 fd_ep = epoll_create1(EPOLL_CLOEXEC);
381 if (fd_ep < 0) {
6af5e6a4 382 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
383 goto out;
384 }
385
912541b0
KS
386 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
387 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 388 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
389 goto out;
390 }
391
045e00cf
ZJS
392 /* Request TERM signal if parent exits.
393 Ignore error, not much we can do in that case. */
394 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 395
045e00cf 396 /* Reset OOM score, we only protect the main daemon. */
ad118bda 397 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 398
912541b0
KS
399 for (;;) {
400 struct udev_event *udev_event;
6af5e6a4 401 int fd_lock = -1;
912541b0 402
3b64e4d4
TG
403 assert(dev);
404
9f6445e3 405 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
406 udev_event = udev_event_new(dev);
407 if (udev_event == NULL) {
6af5e6a4 408 r = -ENOMEM;
912541b0
KS
409 goto out;
410 }
411
bba7a484
TG
412 if (arg_exec_delay > 0)
413 udev_event->exec_delay = arg_exec_delay;
912541b0 414
3ebdb81e 415 /*
2e5b17d0 416 * Take a shared lock on the device node; this establishes
3ebdb81e 417 * a concept of device "ownership" to serialize device
2e5b17d0 418 * access. External processes holding an exclusive lock will
3ebdb81e 419 * cause udev to skip the event handling; in the case udev
2e5b17d0 420 * acquired the lock, the external process can block until
3ebdb81e
KS
421 * udev has finished its event handling.
422 */
2e5b17d0 423 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 424 shall_lock_device(dev)) {
3ebdb81e
KS
425 struct udev_device *d = dev;
426
427 if (streq_ptr("partition", udev_device_get_devtype(d)))
428 d = udev_device_get_parent(d);
429
430 if (d) {
431 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
432 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 433 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 434 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
435 goto skip;
436 }
437 }
438 }
439
4c83d994
TG
440 /* needed for renaming netifs */
441 udev_event->rtnl = rtnl;
442
912541b0 443 /* apply rules, create node, symlinks */
adeba500
KS
444 udev_event_execute_rules(udev_event,
445 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 446 &manager->properties,
8314de1d 447 manager->rules);
adeba500
KS
448
449 udev_event_execute_run(udev_event,
8314de1d 450 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 451
523c620b
TG
452 if (udev_event->rtnl)
453 /* in case rtnl was initialized */
1c4baffc 454 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 455
912541b0 456 /* apply/restore inotify watch */
bf9bead1 457 if (udev_event->inotify_watch) {
70068602 458 udev_watch_begin(dev->device);
912541b0
KS
459 udev_device_update_db(dev);
460 }
461
3d06f418 462 safe_close(fd_lock);
3ebdb81e 463
912541b0
KS
464 /* send processed event back to libudev listeners */
465 udev_monitor_send_device(worker_monitor, NULL, dev);
466
3ebdb81e 467skip:
4914cb2d 468 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 469
912541b0 470 /* send udevd the result of the event execution */
e237d8cb 471 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 472 if (r < 0)
9a73bd7c 473 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 474 udev_device_get_seqnum(dev));
912541b0
KS
475
476 udev_device_unref(dev);
477 dev = NULL;
478
73814ca2 479 udev_event_unref(udev_event);
47e737dc 480
912541b0
KS
481 /* wait for more device messages from main udevd, or term signal */
482 while (dev == NULL) {
483 struct epoll_event ev[4];
484 int fdcount;
485 int i;
486
8fef0ff2 487 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
488 if (fdcount < 0) {
489 if (errno == EINTR)
490 continue;
6af5e6a4 491 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
492 goto out;
493 }
494
495 for (i = 0; i < fdcount; i++) {
496 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
497 dev = udev_monitor_receive_device(worker_monitor);
498 break;
499 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
500 struct signalfd_siginfo fdsi;
501 ssize_t size;
502
503 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
504 if (size != sizeof(struct signalfd_siginfo))
505 continue;
506 switch (fdsi.ssi_signo) {
507 case SIGTERM:
508 goto out;
509 }
510 }
511 }
512 }
513 }
82063a88 514out:
912541b0 515 udev_device_unref(dev);
e237d8cb 516 manager_free(manager);
baa30fbc 517 log_close();
8b46c3fc 518 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
519 }
520 case -1:
912541b0 521 event->state = EVENT_QUEUED;
56f64d95 522 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
523 break;
524 default:
e03c7cc2
TG
525 {
526 struct worker *worker;
527
c0c6806b 528 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 529 if (r < 0)
e03c7cc2 530 return;
e03c7cc2 531
39c19cf1
TG
532 worker_attach_event(worker, event);
533
1fa2f38f 534 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
535 break;
536 }
e03c7cc2 537 }
7fafc032
KS
538}
539
c0c6806b 540static void event_run(Manager *manager, struct event *event) {
a505965d
TG
541 struct worker *worker;
542 Iterator i;
912541b0 543
c0c6806b
TG
544 assert(manager);
545 assert(event);
546
547 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
548 ssize_t count;
549
550 if (worker->state != WORKER_IDLE)
551 continue;
552
c0c6806b 553 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 554 if (count < 0) {
1fa2f38f
ZJS
555 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
556 worker->pid, count);
912541b0
KS
557 kill(worker->pid, SIGKILL);
558 worker->state = WORKER_KILLED;
559 continue;
560 }
39c19cf1 561 worker_attach_event(worker, event);
912541b0
KS
562 return;
563 }
564
c0c6806b 565 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 566 if (arg_children_max > 1)
c0c6806b 567 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
568 return;
569 }
570
571 /* start new worker and pass initial device */
c0c6806b 572 worker_spawn(manager, event);
1e03b754
KS
573}
574
ecb17862 575static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 576 struct event *event;
cb49a4f2 577 int r;
912541b0 578
ecb17862
TG
579 assert(manager);
580 assert(dev);
581
040e6896
TG
582 /* only one process can add events to the queue */
583 if (manager->pid == 0)
df0ff127 584 manager->pid = getpid_cached();
040e6896 585
df0ff127 586 assert(manager->pid == getpid_cached());
cb49a4f2 587
955d98c9 588 event = new0(struct event, 1);
cb49a4f2
TG
589 if (!event)
590 return -ENOMEM;
912541b0 591
cb49a4f2 592 event->manager = manager;
912541b0 593 event->dev = dev;
6969c349
TG
594 event->dev_kernel = udev_device_shallow_clone(dev);
595 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
596 event->seqnum = udev_device_get_seqnum(dev);
597 event->devpath = udev_device_get_devpath(dev);
598 event->devpath_len = strlen(event->devpath);
599 event->devpath_old = udev_device_get_devpath_old(dev);
600 event->devnum = udev_device_get_devnum(dev);
ea6039a3 601 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
602 event->ifindex = udev_device_get_ifindex(dev);
603
9f6445e3 604 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
605 udev_device_get_action(dev), udev_device_get_subsystem(dev));
606
607 event->state = EVENT_QUEUED;
cb49a4f2 608
40a57716 609 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
610 r = touch("/run/udev/queue");
611 if (r < 0)
612 log_warning_errno(r, "could not touch /run/udev/queue: %m");
613 }
614
40a57716 615 LIST_APPEND(event, manager->events, event);
cb49a4f2 616
912541b0 617 return 0;
fc465079
KS
618}
619
c0c6806b 620static void manager_kill_workers(Manager *manager) {
a505965d
TG
621 struct worker *worker;
622 Iterator i;
1e03b754 623
c0c6806b
TG
624 assert(manager);
625
626 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
627 if (worker->state == WORKER_KILLED)
628 continue;
1e03b754 629
912541b0
KS
630 worker->state = WORKER_KILLED;
631 kill(worker->pid, SIGTERM);
632 }
1e03b754
KS
633}
634
e3196993 635/* lookup event for identical, parent, child device */
ecb17862 636static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 637 struct event *loop_event;
912541b0
KS
638 size_t common;
639
640 /* check if queue contains events we depend on */
40a57716 641 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 642 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
643 if (loop_event->seqnum < event->delaying_seqnum)
644 continue;
645
646 /* event we checked earlier still exists, no need to check again */
647 if (loop_event->seqnum == event->delaying_seqnum)
648 return true;
649
650 /* found ourself, no later event can block us */
651 if (loop_event->seqnum >= event->seqnum)
652 break;
653
654 /* check major/minor */
655 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
656 return true;
657
658 /* check network device ifindex */
659 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
660 return true;
661
662 /* check our old name */
090be865 663 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
664 event->delaying_seqnum = loop_event->seqnum;
665 return true;
666 }
667
668 /* compare devpath */
669 common = MIN(loop_event->devpath_len, event->devpath_len);
670
671 /* one devpath is contained in the other? */
672 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
673 continue;
674
675 /* identical device event found */
676 if (loop_event->devpath_len == event->devpath_len) {
677 /* devices names might have changed/swapped in the meantime */
678 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
679 continue;
680 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
681 continue;
682 event->delaying_seqnum = loop_event->seqnum;
683 return true;
684 }
685
686 /* parent device event found */
687 if (event->devpath[common] == '/') {
688 event->delaying_seqnum = loop_event->seqnum;
689 return true;
690 }
691
692 /* child device event found */
693 if (loop_event->devpath[common] == '/') {
694 event->delaying_seqnum = loop_event->seqnum;
695 return true;
696 }
697
698 /* no matching device */
699 continue;
700 }
701
702 return false;
7fafc032
KS
703}
704
693d371d
TG
705static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
706 Manager *manager = userdata;
707
708 assert(manager);
709
710 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
711
712 sd_event_exit(manager->event, -ETIMEDOUT);
713
714 return 1;
715}
716
62d43dac 717static void manager_exit(Manager *manager) {
693d371d
TG
718 uint64_t usec;
719 int r;
62d43dac
TG
720
721 assert(manager);
722
723 manager->exit = true;
724
b79aacbf
TG
725 sd_notify(false,
726 "STOPPING=1\n"
727 "STATUS=Starting shutdown...");
728
62d43dac 729 /* close sources of new events and discard buffered events */
693d371d 730 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 731 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 732
693d371d 733 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 734 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 735
693d371d 736 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 737 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
738
739 /* discard queued events and kill workers */
740 event_queue_cleanup(manager, EVENT_QUEUED);
741 manager_kill_workers(manager);
693d371d 742
3285baa8 743 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 744
3285baa8 745 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
746 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
747 if (r < 0)
748 return;
62d43dac
TG
749}
750
751/* reload requested, HUP signal received, rules changed, builtin changed */
752static void manager_reload(Manager *manager) {
753
754 assert(manager);
755
b79aacbf
TG
756 sd_notify(false,
757 "RELOADING=1\n"
758 "STATUS=Flushing configuration...");
759
62d43dac
TG
760 manager_kill_workers(manager);
761 manager->rules = udev_rules_unref(manager->rules);
2024ed61 762 udev_builtin_exit();
b79aacbf 763
1ef72b55
MS
764 sd_notifyf(false,
765 "READY=1\n"
766 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
767}
768
c0c6806b 769static void event_queue_start(Manager *manager) {
40a57716 770 struct event *event;
693d371d 771 usec_t usec;
8ab44e3f 772
c0c6806b
TG
773 assert(manager);
774
40a57716 775 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
776 manager->exit || manager->stop_exec_queue)
777 return;
778
3285baa8 779 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
780 /* check for changed config, every 3 seconds at most */
781 if (manager->last_usec == 0 ||
782 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
783 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 784 udev_builtin_validate())
38a03f06 785 manager_reload(manager);
693d371d 786
38a03f06 787 manager->last_usec = usec;
7c4c7e89
TG
788 }
789
2024ed61 790 udev_builtin_init();
7c4c7e89
TG
791
792 if (!manager->rules) {
2024ed61 793 manager->rules = udev_rules_new(arg_resolve_names);
7c4c7e89
TG
794 if (!manager->rules)
795 return;
796 }
797
40a57716 798 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
799 if (event->state != EVENT_QUEUED)
800 continue;
0bc74ea7 801
912541b0 802 /* do not start event if parent or child event is still running */
ecb17862 803 if (is_devpath_busy(manager, event))
912541b0 804 continue;
fc465079 805
c0c6806b 806 event_run(manager, event);
912541b0 807 }
1e03b754
KS
808}
809
ecb17862 810static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 811 struct event *event, *tmp;
ff2c503d 812
40a57716 813 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
814 if (match_type != EVENT_UNDEF && match_type != event->state)
815 continue;
ff2c503d 816
c6aa11f2 817 event_free(event);
912541b0 818 }
ff2c503d
KS
819}
820
e82e8fa5 821static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
822 Manager *manager = userdata;
823
824 assert(manager);
825
912541b0
KS
826 for (;;) {
827 struct worker_message msg;
979558f3
TG
828 struct iovec iovec = {
829 .iov_base = &msg,
830 .iov_len = sizeof(msg),
831 };
832 union {
833 struct cmsghdr cmsghdr;
834 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
835 } control = {};
836 struct msghdr msghdr = {
837 .msg_iov = &iovec,
838 .msg_iovlen = 1,
839 .msg_control = &control,
840 .msg_controllen = sizeof(control),
841 };
842 struct cmsghdr *cmsg;
912541b0 843 ssize_t size;
979558f3 844 struct ucred *ucred = NULL;
a505965d 845 struct worker *worker;
912541b0 846
e82e8fa5 847 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 848 if (size < 0) {
738a7907
TG
849 if (errno == EINTR)
850 continue;
851 else if (errno == EAGAIN)
852 /* nothing more to read */
853 break;
979558f3 854
e82e8fa5 855 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
856 } else if (size != sizeof(struct worker_message)) {
857 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 858 continue;
979558f3
TG
859 }
860
2a1288ff 861 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
862 if (cmsg->cmsg_level == SOL_SOCKET &&
863 cmsg->cmsg_type == SCM_CREDENTIALS &&
864 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
865 ucred = (struct ucred*) CMSG_DATA(cmsg);
866 }
867
868 if (!ucred || ucred->pid <= 0) {
869 log_warning_errno(EIO, "ignoring worker message without valid PID");
870 continue;
871 }
912541b0
KS
872
873 /* lookup worker who sent the signal */
4a0b58c4 874 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
875 if (!worker) {
876 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
877 continue;
912541b0 878 }
c0bbfd72 879
a505965d
TG
880 if (worker->state != WORKER_KILLED)
881 worker->state = WORKER_IDLE;
882
883 /* worker returned */
884 event_free(worker->event);
912541b0 885 }
e82e8fa5 886
8302fe5a
TG
887 /* we have free workers, try to schedule events */
888 event_queue_start(manager);
889
e82e8fa5
TG
890 return 1;
891}
892
893static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 894 Manager *manager = userdata;
e82e8fa5
TG
895 struct udev_device *dev;
896 int r;
897
c0c6806b 898 assert(manager);
e82e8fa5 899
c0c6806b 900 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
901 if (dev) {
902 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 903 r = event_queue_insert(manager, dev);
e82e8fa5
TG
904 if (r < 0)
905 udev_device_unref(dev);
8302fe5a
TG
906 else
907 /* we have fresh events, try to schedule them */
908 event_queue_start(manager);
e82e8fa5
TG
909 }
910
911 return 1;
88f4b648
KS
912}
913
3b47c739 914/* receive the udevd message from userspace */
e82e8fa5 915static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 916 Manager *manager = userdata;
8e766630
LP
917 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
918 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
919 const char *str;
920 int i;
921
c0c6806b 922 assert(manager);
e4f66b77 923
c0c6806b 924 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 925 if (!ctrl_conn)
e82e8fa5 926 return 1;
912541b0
KS
927
928 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 929 if (!ctrl_msg)
e82e8fa5 930 return 1;
912541b0
KS
931
932 i = udev_ctrl_get_set_log_level(ctrl_msg);
933 if (i >= 0) {
ed14edc0 934 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 935 log_set_max_level(i);
c0c6806b 936 manager_kill_workers(manager);
912541b0
KS
937 }
938
939 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 940 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 941 manager->stop_exec_queue = true;
912541b0
KS
942 }
943
944 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 945 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 946 manager->stop_exec_queue = false;
8302fe5a 947 event_queue_start(manager);
912541b0
KS
948 }
949
950 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 951 log_debug("udevd message (RELOAD) received");
62d43dac 952 manager_reload(manager);
912541b0
KS
953 }
954
955 str = udev_ctrl_get_set_env(ctrl_msg);
956 if (str != NULL) {
c0c6806b 957 _cleanup_free_ char *key = NULL;
912541b0
KS
958
959 key = strdup(str);
c0c6806b 960 if (key) {
912541b0
KS
961 char *val;
962
963 val = strchr(key, '=');
964 if (val != NULL) {
965 val[0] = '\0';
966 val = &val[1];
967 if (val[0] == '\0') {
9f6445e3 968 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 969 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 970 } else {
9f6445e3 971 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 972 udev_list_entry_add(&manager->properties, key, val);
912541b0 973 }
c0c6806b 974 } else
9f6445e3 975 log_error("wrong key format '%s'", key);
912541b0 976 }
c0c6806b 977 manager_kill_workers(manager);
912541b0
KS
978 }
979
980 i = udev_ctrl_get_set_children_max(ctrl_msg);
981 if (i >= 0) {
9f6445e3 982 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 983 arg_children_max = i;
1ef72b55
MS
984
985 (void) sd_notifyf(false,
986 "READY=1\n"
987 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
988 }
989
cb49a4f2 990 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 991 log_debug("udevd message (SYNC) received");
912541b0
KS
992
993 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 994 log_debug("udevd message (EXIT) received");
62d43dac 995 manager_exit(manager);
c0c6806b
TG
996 /* keep reference to block the client until we exit
997 TODO: deal with several blocking exit requests */
998 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 999 }
e4f66b77 1000
e82e8fa5 1001 return 1;
88f4b648 1002}
4a231017 1003
70068602
YW
1004static int synthesize_change(sd_device *dev) {
1005 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1006 char filename[PATH_MAX];
f3a740a5 1007 int r;
edd32000 1008
70068602
YW
1009 r = sd_device_get_subsystem(dev, &subsystem);
1010 if (r < 0)
1011 return r;
1012
1013 r = sd_device_get_sysname(dev, &sysname);
1014 if (r < 0)
1015 return r;
1016
1017 r = sd_device_get_devname(dev, &devname);
1018 if (r < 0)
1019 return r;
1020
1021 r = sd_device_get_syspath(dev, &syspath);
1022 if (r < 0)
1023 return r;
1024
1025 r = sd_device_get_devtype(dev, &devtype);
1026 if (r < 0)
1027 return r;
1028
1029 if (streq_ptr("block", subsystem) &&
1030 streq_ptr("disk", devtype) &&
1031 !startswith(sysname, "dm-")) {
1032 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1033 bool part_table_read = false, has_partitions = false;
1034 sd_device *d;
ede34445 1035 int fd;
f3a740a5 1036
ede34445 1037 /*
e9fc29f4
KS
1038 * Try to re-read the partition table. This only succeeds if
1039 * none of the devices is busy. The kernel returns 0 if no
1040 * partition table is found, and we will not get an event for
1041 * the disk.
ede34445 1042 */
70068602 1043 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1044 if (fd >= 0) {
02ba8fb3
KS
1045 r = flock(fd, LOCK_EX|LOCK_NB);
1046 if (r >= 0)
1047 r = ioctl(fd, BLKRRPART, 0);
1048
ede34445
KS
1049 close(fd);
1050 if (r >= 0)
e9fc29f4 1051 part_table_read = true;
ede34445
KS
1052 }
1053
e9fc29f4 1054 /* search for partitions */
70068602 1055 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1056 if (r < 0)
1057 return r;
1058
70068602 1059 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1060 if (r < 0)
1061 return r;
1062
70068602 1063 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1064 if (r < 0)
1065 return r;
e9fc29f4 1066
70068602
YW
1067 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1068 if (r < 0)
1069 return r;
e9fc29f4 1070
70068602
YW
1071 FOREACH_DEVICE(e, d) {
1072 const char *t;
e9fc29f4 1073
70068602
YW
1074 if (sd_device_get_devtype(d, &t) < 0 ||
1075 !streq("partition", t))
e9fc29f4
KS
1076 continue;
1077
1078 has_partitions = true;
1079 break;
1080 }
1081
1082 /*
1083 * We have partitions and re-read the table, the kernel already sent
1084 * out a "change" event for the disk, and "remove/add" for all
1085 * partitions.
1086 */
1087 if (part_table_read && has_partitions)
1088 return 0;
1089
1090 /*
1091 * We have partitions but re-reading the partition table did not
1092 * work, synthesize "change" for the disk and all partitions.
1093 */
70068602
YW
1094 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1095 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
4c1fc3e4 1096 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1097
70068602
YW
1098 FOREACH_DEVICE(e, d) {
1099 const char *t, *n, *s;
f3a740a5 1100
70068602
YW
1101 if (sd_device_get_devtype(d, &t) < 0 ||
1102 !streq("partition", t))
f3a740a5
KS
1103 continue;
1104
70068602
YW
1105 if (sd_device_get_devname(d, &n) < 0 ||
1106 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1107 continue;
1108
70068602
YW
1109 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1110 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
4c1fc3e4 1111 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1112 }
ede34445
KS
1113
1114 return 0;
f3a740a5
KS
1115 }
1116
70068602
YW
1117 log_debug("Device %s is closed, synthesising 'change'", devname);
1118 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
4c1fc3e4 1119 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1120
f3a740a5 1121 return 0;
edd32000
KS
1122}
1123
e82e8fa5 1124static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1125 Manager *manager = userdata;
0254e944 1126 union inotify_event_buffer buffer;
f7c1ad4f
LP
1127 struct inotify_event *e;
1128 ssize_t l;
912541b0 1129
c0c6806b 1130 assert(manager);
e82e8fa5
TG
1131
1132 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1133 if (l < 0) {
3742095b 1134 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1135 return 1;
912541b0 1136
f7c1ad4f 1137 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1138 }
1139
f7c1ad4f 1140 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1141 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1142 const char *devnode;
1143
1144 if (udev_watch_lookup(e->wd, &dev) < 0)
1145 continue;
912541b0 1146
70068602 1147 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1148 continue;
912541b0 1149
70068602 1150 log_debug("inotify event: %x for %s", e->mask, devnode);
a8389097 1151 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1152 synthesize_change(dev);
a8389097
TG
1153
1154 /* settle might be waiting on us to determine the queue
1155 * state. If we just handled an inotify event, we might have
1156 * generated a "change" event, but we won't have queued up
1157 * the resultant uevent yet. Do that.
1158 */
c0c6806b 1159 on_uevent(NULL, -1, 0, manager);
a8389097 1160 } else if (e->mask & IN_IGNORED)
2024ed61 1161 udev_watch_end(dev);
912541b0
KS
1162 }
1163
e82e8fa5 1164 return 1;
bd284db1
SJR
1165}
1166
0561329d 1167static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1168 Manager *manager = userdata;
1169
1170 assert(manager);
1171
62d43dac 1172 manager_exit(manager);
912541b0 1173
e82e8fa5
TG
1174 return 1;
1175}
912541b0 1176
0561329d 1177static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1178 Manager *manager = userdata;
1179
1180 assert(manager);
1181
62d43dac 1182 manager_reload(manager);
912541b0 1183
e82e8fa5
TG
1184 return 1;
1185}
912541b0 1186
e82e8fa5 1187static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1188 Manager *manager = userdata;
1189
1190 assert(manager);
1191
e82e8fa5
TG
1192 for (;;) {
1193 pid_t pid;
1194 int status;
1195 struct worker *worker;
d1317d02 1196
e82e8fa5
TG
1197 pid = waitpid(-1, &status, WNOHANG);
1198 if (pid <= 0)
f29328d6 1199 break;
e82e8fa5 1200
4a0b58c4 1201 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1202 if (!worker) {
1203 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1204 continue;
912541b0 1205 }
e82e8fa5
TG
1206
1207 if (WIFEXITED(status)) {
1208 if (WEXITSTATUS(status) == 0)
1209 log_debug("worker ["PID_FMT"] exited", pid);
1210 else
1211 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1212 } else if (WIFSIGNALED(status)) {
76341acc 1213 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1214 } else if (WIFSTOPPED(status)) {
1215 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1216 continue;
e82e8fa5
TG
1217 } else if (WIFCONTINUED(status)) {
1218 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1219 continue;
e82e8fa5
TG
1220 } else
1221 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1222
1223 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1224 if (worker->event) {
1225 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1226 /* delete state from disk */
1227 udev_device_delete_db(worker->event->dev);
1228 udev_device_tag_index(worker->event->dev, NULL, false);
1229 /* forward kernel event without amending it */
c0c6806b 1230 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1231 }
1232 }
1233
1234 worker_free(worker);
912541b0 1235 }
e82e8fa5 1236
8302fe5a
TG
1237 /* we can start new workers, try to schedule events */
1238 event_queue_start(manager);
1239
e82e8fa5 1240 return 1;
f27125f9 1241}
1242
693d371d
TG
1243static int on_post(sd_event_source *s, void *userdata) {
1244 Manager *manager = userdata;
1245 int r;
1246
1247 assert(manager);
1248
40a57716 1249 if (LIST_IS_EMPTY(manager->events)) {
693d371d
TG
1250 /* no pending events */
1251 if (!hashmap_isempty(manager->workers)) {
1252 /* there are idle workers */
1253 log_debug("cleanup idle workers");
1254 manager_kill_workers(manager);
1255 } else {
1256 /* we are idle */
1257 if (manager->exit) {
1258 r = sd_event_exit(manager->event, 0);
1259 if (r < 0)
1260 return r;
1261 } else if (manager->cgroup)
1262 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1263 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1264 }
1265 }
1266
1267 return 1;
1268}
1269
fcff1e72
TG
1270static int listen_fds(int *rctrl, int *rnetlink) {
1271 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1272 int fd, n, r;
912541b0 1273
fcff1e72
TG
1274 assert(rctrl);
1275 assert(rnetlink);
1276
912541b0 1277 n = sd_listen_fds(true);
fcff1e72
TG
1278 if (n < 0)
1279 return n;
912541b0
KS
1280
1281 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1282 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1283 if (ctrl_fd >= 0)
1284 return -EINVAL;
1285 ctrl_fd = fd;
912541b0
KS
1286 continue;
1287 }
1288
1289 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1290 if (netlink_fd >= 0)
1291 return -EINVAL;
1292 netlink_fd = fd;
912541b0
KS
1293 continue;
1294 }
1295
fcff1e72 1296 return -EINVAL;
912541b0
KS
1297 }
1298
f59118ec 1299 if (ctrl_fd < 0) {
8e766630 1300 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1301
2024ed61 1302 ctrl = udev_ctrl_new();
f59118ec
TG
1303 if (!ctrl)
1304 return log_error_errno(EINVAL, "error initializing udev control socket");
1305
1306 r = udev_ctrl_enable_receiving(ctrl);
1307 if (r < 0)
1308 return log_error_errno(EINVAL, "error binding udev control socket");
1309
1310 fd = udev_ctrl_get_fd(ctrl);
1311 if (fd < 0)
1312 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1313
f59118ec
TG
1314 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1315 if (ctrl_fd < 0)
1316 return log_error_errno(errno, "could not dup ctrl fd: %m");
1317 }
1318
1319 if (netlink_fd < 0) {
8e766630 1320 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1321
2024ed61 1322 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1323 if (!monitor)
1324 return log_error_errno(EINVAL, "error initializing netlink socket");
1325
1326 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1327
1328 r = udev_monitor_enable_receiving(monitor);
1329 if (r < 0)
1330 return log_error_errno(EINVAL, "error binding netlink socket");
1331
1332 fd = udev_monitor_get_fd(monitor);
1333 if (fd < 0)
1334 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1335
1336 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1337 if (netlink_fd < 0)
f59118ec
TG
1338 return log_error_errno(errno, "could not dup netlink fd: %m");
1339 }
fcff1e72
TG
1340
1341 *rctrl = ctrl_fd;
1342 *rnetlink = netlink_fd;
912541b0 1343
912541b0 1344 return 0;
7459bcdc
KS
1345}
1346
e6f86cac 1347/*
3f85ef0f 1348 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1349 * udev.log_priority=<level> syslog priority
1350 * udev.children_max=<number of workers> events are fully serialized if set to 1
1351 * udev.exec_delay=<number of seconds> delay execution of every executed program
1352 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1353 */
96287a49 1354static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1355 int r = 0;
e6f86cac 1356
614a823c 1357 assert(key);
e6f86cac 1358
614a823c
TG
1359 if (!value)
1360 return 0;
e6f86cac 1361
1d84ad94
LP
1362 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1363
1364 if (proc_cmdline_value_missing(key, value))
1365 return 0;
1366
92e72467
ZJS
1367 r = util_log_priority(value);
1368 if (r >= 0)
1369 log_set_max_level(r);
1d84ad94
LP
1370
1371 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1372
1373 if (proc_cmdline_value_missing(key, value))
1374 return 0;
1375
92e72467
ZJS
1376 r = safe_atou64(value, &arg_event_timeout_usec);
1377 if (r >= 0) {
1378 arg_event_timeout_usec *= USEC_PER_SEC;
1379 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1380 }
1d84ad94
LP
1381
1382 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1383
1384 if (proc_cmdline_value_missing(key, value))
1385 return 0;
1386
020328e1 1387 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1388
1389 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1390
1391 if (proc_cmdline_value_missing(key, value))
1392 return 0;
1393
614a823c 1394 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1395
1396 } else if (startswith(key, "udev."))
92e72467 1397 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1398
92e72467
ZJS
1399 if (r < 0)
1400 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1401
614a823c 1402 return 0;
e6f86cac
KS
1403}
1404
37ec0fdd
LP
1405static int help(void) {
1406 _cleanup_free_ char *link = NULL;
1407 int r;
1408
1409 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1410 if (r < 0)
1411 return log_oom();
1412
ed216e1f
TG
1413 printf("%s [OPTIONS...]\n\n"
1414 "Manages devices.\n\n"
5ac0162c 1415 " -h --help Print this message\n"
2d19c17e
MF
1416 " -V --version Print version of the program\n"
1417 " -d --daemon Detach and run in the background\n"
1418 " -D --debug Enable debug output\n"
1419 " -c --children-max=INT Set maximum number of workers\n"
1420 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1421 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1422 " -N --resolve-names=early|late|never\n"
5ac0162c 1423 " When to resolve users and groups\n"
37ec0fdd
LP
1424 "\nSee the %s for details.\n"
1425 , program_invocation_short_name
1426 , link
1427 );
1428
1429 return 0;
ed216e1f
TG
1430}
1431
bba7a484 1432static int parse_argv(int argc, char *argv[]) {
912541b0 1433 static const struct option options[] = {
bba7a484
TG
1434 { "daemon", no_argument, NULL, 'd' },
1435 { "debug", no_argument, NULL, 'D' },
1436 { "children-max", required_argument, NULL, 'c' },
1437 { "exec-delay", required_argument, NULL, 'e' },
1438 { "event-timeout", required_argument, NULL, 't' },
1439 { "resolve-names", required_argument, NULL, 'N' },
1440 { "help", no_argument, NULL, 'h' },
1441 { "version", no_argument, NULL, 'V' },
912541b0
KS
1442 {}
1443 };
689a97f5 1444
bba7a484 1445 int c;
689a97f5 1446
bba7a484
TG
1447 assert(argc >= 0);
1448 assert(argv);
912541b0 1449
e14b6f21 1450 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1451 int r;
912541b0 1452
bba7a484 1453 switch (c) {
912541b0 1454
912541b0 1455 case 'd':
bba7a484 1456 arg_daemonize = true;
912541b0
KS
1457 break;
1458 case 'c':
020328e1 1459 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1460 if (r < 0)
1461 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1462 break;
1463 case 'e':
6f5cf8a8
TG
1464 r = safe_atoi(optarg, &arg_exec_delay);
1465 if (r < 0)
1466 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1467 break;
9719859c 1468 case 't':
f1e8664e
TG
1469 r = safe_atou64(optarg, &arg_event_timeout_usec);
1470 if (r < 0)
65fea570 1471 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1472 else {
1473 arg_event_timeout_usec *= USEC_PER_SEC;
1474 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1475 }
9719859c 1476 break;
912541b0 1477 case 'D':
bba7a484 1478 arg_debug = true;
912541b0
KS
1479 break;
1480 case 'N':
090be865 1481 if (streq(optarg, "early")) {
bba7a484 1482 arg_resolve_names = 1;
090be865 1483 } else if (streq(optarg, "late")) {
bba7a484 1484 arg_resolve_names = 0;
090be865 1485 } else if (streq(optarg, "never")) {
bba7a484 1486 arg_resolve_names = -1;
912541b0 1487 } else {
9f6445e3 1488 log_error("resolve-names must be early, late or never");
bba7a484 1489 return 0;
912541b0
KS
1490 }
1491 break;
1492 case 'h':
37ec0fdd 1493 return help();
912541b0 1494 case 'V':
948aaa7c 1495 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1496 return 0;
1497 case '?':
1498 return -EINVAL;
912541b0 1499 default:
bba7a484
TG
1500 assert_not_reached("Unhandled option");
1501
912541b0
KS
1502 }
1503 }
1504
bba7a484
TG
1505 return 1;
1506}
1507
b7f74dd4 1508static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1509 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1510 int r, fd_worker, one = 1;
c0c6806b
TG
1511
1512 assert(ret);
11b1dd8c
TG
1513 assert(fd_ctrl >= 0);
1514 assert(fd_uevent >= 0);
c0c6806b
TG
1515
1516 manager = new0(Manager, 1);
1517 if (!manager)
1518 return log_oom();
1519
e237d8cb
TG
1520 manager->fd_inotify = -1;
1521 manager->worker_watch[WRITE_END] = -1;
1522 manager->worker_watch[READ_END] = -1;
1523
2024ed61 1524 udev_builtin_init();
b2d21d93 1525
2024ed61 1526 manager->rules = udev_rules_new(arg_resolve_names);
ecb17862
TG
1527 if (!manager->rules)
1528 return log_error_errno(ENOMEM, "error reading rules");
1529
40a57716 1530 LIST_HEAD_INIT(manager->events);
2024ed61 1531 udev_list_init(NULL, &manager->properties, true);
ecb17862 1532
c26d1879
TG
1533 manager->cgroup = cgroup;
1534
2024ed61 1535 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1536 if (!manager->ctrl)
1537 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1538
2024ed61 1539 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1540 if (!manager->monitor)
1541 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1542
1543 /* unnamed socket from workers to the main daemon */
1544 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1545 if (r < 0)
1546 return log_error_errno(errno, "error creating socketpair: %m");
1547
693d371d 1548 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1549
693d371d 1550 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1551 if (r < 0)
1552 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1553
2024ed61 1554 manager->fd_inotify = udev_watch_init();
e237d8cb
TG
1555 if (manager->fd_inotify < 0)
1556 return log_error_errno(ENOMEM, "error initializing inotify");
1557
2024ed61 1558 udev_watch_restore();
e237d8cb
TG
1559
1560 /* block and listen to all signals on signalfd */
72c0a2c2 1561 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1562
49f997f3
TG
1563 r = sd_event_default(&manager->event);
1564 if (r < 0)
709f6e46 1565 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1566
693d371d
TG
1567 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1568 if (r < 0)
1569 return log_error_errno(r, "error creating sigint event source: %m");
1570
1571 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1572 if (r < 0)
1573 return log_error_errno(r, "error creating sigterm event source: %m");
1574
1575 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1576 if (r < 0)
1577 return log_error_errno(r, "error creating sighup event source: %m");
1578
1579 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1580 if (r < 0)
1581 return log_error_errno(r, "error creating sigchld event source: %m");
1582
1583 r = sd_event_set_watchdog(manager->event, true);
1584 if (r < 0)
1585 return log_error_errno(r, "error creating watchdog event source: %m");
1586
11b1dd8c 1587 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1588 if (r < 0)
1589 return log_error_errno(r, "error creating ctrl event source: %m");
1590
1591 /* This needs to be after the inotify and uevent handling, to make sure
1592 * that the ping is send back after fully processing the pending uevents
1593 * (including the synthetic ones we may create due to inotify events).
1594 */
1595 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1596 if (r < 0)
1597 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1598
1599 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1600 if (r < 0)
1601 return log_error_errno(r, "error creating inotify event source: %m");
1602
11b1dd8c 1603 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1604 if (r < 0)
1605 return log_error_errno(r, "error creating uevent event source: %m");
1606
1607 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1608 if (r < 0)
1609 return log_error_errno(r, "error creating worker event source: %m");
1610
1611 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1612 if (r < 0)
1613 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1614
1cc6c93a 1615 *ret = TAKE_PTR(manager);
11b1dd8c 1616
86c3bece 1617 return 0;
c0c6806b
TG
1618}
1619
077fc5e2 1620static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1621 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1622 int r;
1623
1624 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1625 if (r < 0) {
1626 r = log_error_errno(r, "failed to allocate manager object: %m");
1627 goto exit;
1628 }
1629
1630 r = udev_rules_apply_static_dev_perms(manager->rules);
1631 if (r < 0)
1632 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1633
1ef72b55
MS
1634 (void) sd_notifyf(false,
1635 "READY=1\n"
1636 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1637
1638 r = sd_event_loop(manager->event);
1639 if (r < 0) {
1640 log_error_errno(r, "event loop failed: %m");
1641 goto exit;
1642 }
1643
1644 sd_event_get_exit_code(manager->event, &r);
1645
1646exit:
1647 sd_notify(false,
1648 "STOPPING=1\n"
1649 "STATUS=Shutting down...");
1650 if (manager)
1651 udev_ctrl_cleanup(manager->ctrl);
1652 return r;
1653}
1654
1655int main(int argc, char *argv[]) {
c26d1879 1656 _cleanup_free_ char *cgroup = NULL;
efa1606e 1657 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1658 int r;
bba7a484 1659
bba7a484 1660 log_set_target(LOG_TARGET_AUTO);
b237a168 1661 udev_parse_config();
bba7a484
TG
1662 log_parse_environment();
1663 log_open();
1664
bba7a484
TG
1665 r = parse_argv(argc, argv);
1666 if (r <= 0)
1667 goto exit;
1668
1d84ad94 1669 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1670 if (r < 0)
1671 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1672
78d3e041
KS
1673 if (arg_debug) {
1674 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1675 log_set_max_level(LOG_DEBUG);
78d3e041 1676 }
bba7a484 1677
fba868fa
LP
1678 r = must_be_root();
1679 if (r < 0)
912541b0 1680 goto exit;
912541b0 1681
712cebf1
TG
1682 if (arg_children_max == 0) {
1683 cpu_set_t cpu_set;
e438c57a 1684 unsigned long mem_limit;
ebc164ef 1685
712cebf1 1686 arg_children_max = 8;
d457ff83 1687
ece174c5 1688 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1689 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1690
e438c57a
MW
1691 mem_limit = physical_memory() / (128LU*1024*1024);
1692 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1693
712cebf1 1694 log_debug("set children_max to %u", arg_children_max);
d457ff83 1695 }
912541b0 1696
712cebf1
TG
1697 /* set umask before creating any file/directory */
1698 r = chdir("/");
1699 if (r < 0) {
1700 r = log_error_errno(errno, "could not change dir to /: %m");
1701 goto exit;
1702 }
194bbe33 1703
712cebf1 1704 umask(022);
912541b0 1705
c3dacc8b 1706 r = mac_selinux_init();
712cebf1
TG
1707 if (r < 0) {
1708 log_error_errno(r, "could not initialize labelling: %m");
1709 goto exit;
912541b0
KS
1710 }
1711
dae8b82e
ZJS
1712 r = mkdir_errno_wrapper("/run/udev", 0755);
1713 if (r < 0 && r != -EEXIST) {
1714 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1715 goto exit;
1716 }
1717
03cfe0d5 1718 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1719
c26d1879
TG
1720 if (getppid() == 1) {
1721 /* get our own cgroup, we regularly kill everything udev has left behind
1722 we only do this on systemd systems, and only if we are directly spawned
1723 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1724 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1725 if (r < 0) {
a2d61f07 1726 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1727 log_debug_errno(r, "did not find dedicated cgroup: %m");
1728 else
1729 log_warning_errno(r, "failed to get cgroup: %m");
1730 }
c26d1879
TG
1731 }
1732
b7f74dd4
TG
1733 r = listen_fds(&fd_ctrl, &fd_uevent);
1734 if (r < 0) {
1735 r = log_error_errno(r, "could not listen on fds: %m");
1736 goto exit;
1737 }
1738
bba7a484 1739 if (arg_daemonize) {
912541b0 1740 pid_t pid;
912541b0 1741
948aaa7c 1742 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1743
40e749b5 1744 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1745 if (log_get_max_level() < LOG_DEBUG) {
1746 r = make_null_stdio();
1747 if (r < 0)
1748 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1749 }
1750
912541b0
KS
1751 pid = fork();
1752 switch (pid) {
1753 case 0:
1754 break;
1755 case -1:
6af5e6a4 1756 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1757 goto exit;
1758 default:
f53d1fcd
TG
1759 mac_selinux_finish();
1760 log_close();
1761 _exit(EXIT_SUCCESS);
912541b0
KS
1762 }
1763
1764 setsid();
1765
ad118bda 1766 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1767 }
912541b0 1768
077fc5e2 1769 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1770
53921bfa 1771exit:
cc56fafe 1772 mac_selinux_finish();
baa30fbc 1773 log_close();
6af5e6a4 1774 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1775}