]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev: exclude DRBD from block device ownership event locking
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
7fafc032
KS
7 */
8
7fafc032 9#include <errno.h>
618234a5
LP
10#include <fcntl.h>
11#include <getopt.h>
12#include <signal.h>
13#include <stdbool.h>
14#include <stddef.h>
7fafc032
KS
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
618234a5 18#include <sys/epoll.h>
3ebdb81e 19#include <sys/file.h>
618234a5
LP
20#include <sys/inotify.h>
21#include <sys/ioctl.h>
22#include <sys/mount.h>
1e03b754 23#include <sys/prctl.h>
1e03b754 24#include <sys/signalfd.h>
618234a5 25#include <sys/socket.h>
dc117daa 26#include <sys/stat.h>
618234a5
LP
27#include <sys/time.h>
28#include <sys/wait.h>
29#include <unistd.h>
7fafc032 30
392ef7a2 31#include "sd-daemon.h"
693d371d 32#include "sd-event.h"
8314de1d 33
b5efdb8a 34#include "alloc-util.h"
194bbe33 35#include "cgroup-util.h"
618234a5 36#include "cpu-set-util.h"
5ba2dc25 37#include "dev-setup.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
40a57716 44#include "list.h"
618234a5 45#include "netlink-util.h"
6bedfcbb 46#include "parse-util.h"
4e731273 47#include "proc-cmdline.h"
618234a5
LP
48#include "process-util.h"
49#include "selinux-util.h"
50#include "signal-util.h"
8f328d36 51#include "socket-util.h"
07630cea 52#include "string-util.h"
618234a5
LP
53#include "terminal-util.h"
54#include "udev-util.h"
55#include "udev.h"
ee104e11 56#include "user-util.h"
7fafc032 57
bba7a484
TG
58static bool arg_debug = false;
59static int arg_daemonize = false;
60static int arg_resolve_names = 1;
020328e1 61static unsigned arg_children_max;
bba7a484
TG
62static int arg_exec_delay;
63static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
64static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
65
66typedef struct Manager {
67 struct udev *udev;
693d371d 68 sd_event *event;
c0c6806b 69 Hashmap *workers;
40a57716 70 LIST_HEAD(struct event, events);
c26d1879 71 const char *cgroup;
cb49a4f2 72 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 73
ecb17862 74 struct udev_rules *rules;
c0c6806b
TG
75 struct udev_list properties;
76
77 struct udev_monitor *monitor;
78 struct udev_ctrl *ctrl;
79 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 80 int fd_inotify;
e237d8cb
TG
81 int worker_watch[2];
82
693d371d
TG
83 sd_event_source *ctrl_event;
84 sd_event_source *uevent_event;
85 sd_event_source *inotify_event;
86
7c4c7e89
TG
87 usec_t last_usec;
88
c0c6806b 89 bool stop_exec_queue:1;
c0c6806b
TG
90 bool exit:1;
91} Manager;
1e03b754 92
1e03b754 93enum event_state {
912541b0
KS
94 EVENT_UNDEF,
95 EVENT_QUEUED,
96 EVENT_RUNNING,
1e03b754
KS
97};
98
99struct event {
40a57716 100 LIST_FIELDS(struct event, event);
cb49a4f2 101 Manager *manager;
912541b0
KS
102 struct udev *udev;
103 struct udev_device *dev;
6969c349 104 struct udev_device *dev_kernel;
c6aa11f2 105 struct worker *worker;
912541b0 106 enum event_state state;
912541b0
KS
107 unsigned long long int delaying_seqnum;
108 unsigned long long int seqnum;
109 const char *devpath;
110 size_t devpath_len;
111 const char *devpath_old;
112 dev_t devnum;
912541b0 113 int ifindex;
ea6039a3 114 bool is_block;
693d371d
TG
115 sd_event_source *timeout_warning;
116 sd_event_source *timeout;
1e03b754
KS
117};
118
ecb17862 119static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 120
1e03b754 121enum worker_state {
912541b0
KS
122 WORKER_UNDEF,
123 WORKER_RUNNING,
124 WORKER_IDLE,
125 WORKER_KILLED,
1e03b754
KS
126};
127
128struct worker {
c0c6806b 129 Manager *manager;
912541b0
KS
130 int refcount;
131 pid_t pid;
132 struct udev_monitor *monitor;
133 enum worker_state state;
134 struct event *event;
1e03b754
KS
135};
136
137/* passed from worker to main process */
138struct worker_message {
1e03b754
KS
139};
140
c6aa11f2 141static void event_free(struct event *event) {
cb49a4f2
TG
142 int r;
143
c6aa11f2
TG
144 if (!event)
145 return;
40a57716 146 assert(event->manager);
c6aa11f2 147
40a57716 148 LIST_REMOVE(event, event->manager->events, event);
912541b0 149 udev_device_unref(event->dev);
6969c349 150 udev_device_unref(event->dev_kernel);
c6aa11f2 151
693d371d
TG
152 sd_event_source_unref(event->timeout_warning);
153 sd_event_source_unref(event->timeout);
154
c6aa11f2
TG
155 if (event->worker)
156 event->worker->event = NULL;
157
40a57716 158 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 159 /* only clean up the queue from the process that created it */
df0ff127 160 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
161 r = unlink("/run/udev/queue");
162 if (r < 0)
163 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
164 }
165 }
166
912541b0 167 free(event);
aa8734ff 168}
7a770250 169
c6aa11f2
TG
170static void worker_free(struct worker *worker) {
171 if (!worker)
172 return;
bc113de9 173
c0c6806b
TG
174 assert(worker->manager);
175
4a0b58c4 176 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 177 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
178 event_free(worker->event);
179
c6aa11f2 180 free(worker);
ff2c503d
KS
181}
182
c0c6806b 183static void manager_workers_free(Manager *manager) {
a505965d
TG
184 struct worker *worker;
185 Iterator i;
ff2c503d 186
c0c6806b
TG
187 assert(manager);
188
189 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 190 worker_free(worker);
a505965d 191
c0c6806b 192 manager->workers = hashmap_free(manager->workers);
fc465079
KS
193}
194
c0c6806b 195static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
196 _cleanup_free_ struct worker *worker = NULL;
197 int r;
3a19b32a
TG
198
199 assert(ret);
c0c6806b 200 assert(manager);
3a19b32a
TG
201 assert(worker_monitor);
202 assert(pid > 1);
203
204 worker = new0(struct worker, 1);
205 if (!worker)
206 return -ENOMEM;
207
39c19cf1 208 worker->refcount = 1;
c0c6806b 209 worker->manager = manager;
3a19b32a
TG
210 /* close monitor, but keep address around */
211 udev_monitor_disconnect(worker_monitor);
212 worker->monitor = udev_monitor_ref(worker_monitor);
213 worker->pid = pid;
a505965d 214
c0c6806b 215 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
216 if (r < 0)
217 return r;
218
4a0b58c4 219 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
220 if (r < 0)
221 return r;
222
ae2a15bc 223 *ret = TAKE_PTR(worker);
3a19b32a
TG
224
225 return 0;
226}
227
4fa4d885
TG
228static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
229 struct event *event = userdata;
230
231 assert(event);
232 assert(event->worker);
233
234 kill_and_sigcont(event->worker->pid, SIGKILL);
235 event->worker->state = WORKER_KILLED;
236
237 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
238
239 return 1;
240}
241
242static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
243 struct event *event = userdata;
244
245 assert(event);
246
247 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
248
249 return 1;
250}
251
39c19cf1 252static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
253 sd_event *e;
254 uint64_t usec;
693d371d 255
c6aa11f2 256 assert(worker);
693d371d 257 assert(worker->manager);
c6aa11f2
TG
258 assert(event);
259 assert(!event->worker);
260 assert(!worker->event);
261
39c19cf1 262 worker->state = WORKER_RUNNING;
39c19cf1
TG
263 worker->event = event;
264 event->state = EVENT_RUNNING;
c6aa11f2 265 event->worker = worker;
693d371d
TG
266
267 e = worker->manager->event;
268
3285baa8 269 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 270
3285baa8 271 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
272 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
273
3285baa8 274 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 275 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
276}
277
e237d8cb
TG
278static void manager_free(Manager *manager) {
279 if (!manager)
280 return;
281
b2d21d93
TG
282 udev_builtin_exit(manager->udev);
283
693d371d
TG
284 sd_event_source_unref(manager->ctrl_event);
285 sd_event_source_unref(manager->uevent_event);
286 sd_event_source_unref(manager->inotify_event);
287
e237d8cb 288 udev_unref(manager->udev);
693d371d 289 sd_event_unref(manager->event);
e237d8cb
TG
290 manager_workers_free(manager);
291 event_queue_cleanup(manager, EVENT_UNDEF);
292
293 udev_monitor_unref(manager->monitor);
294 udev_ctrl_unref(manager->ctrl);
295 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
296
297 udev_list_cleanup(&manager->properties);
298 udev_rules_unref(manager->rules);
e237d8cb 299
e237d8cb
TG
300 safe_close(manager->fd_inotify);
301 safe_close_pair(manager->worker_watch);
302
303 free(manager);
304}
305
306DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
307
9a73bd7c
TG
308static int worker_send_message(int fd) {
309 struct worker_message message = {};
310
311 return loop_write(fd, &message, sizeof(message), false);
312}
313
fee854ee
RK
314static bool shall_lock_device(struct udev_device *dev) {
315 const char *sysname;
316
317 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
318 return false;
319
320 sysname = udev_device_get_sysname(dev);
321 return !startswith(sysname, "dm-") &&
322 !startswith(sysname, "md") &&
323 !startswith(sysname, "drbd");
324}
325
c0c6806b 326static void worker_spawn(Manager *manager, struct event *event) {
912541b0 327 struct udev *udev = event->udev;
8e766630 328 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 329 pid_t pid;
b6aab8ef 330 int r = 0;
912541b0
KS
331
332 /* listen for new events */
333 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
334 if (worker_monitor == NULL)
335 return;
336 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 337 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
338 r = udev_monitor_enable_receiving(worker_monitor);
339 if (r < 0)
340 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 341
912541b0
KS
342 pid = fork();
343 switch (pid) {
344 case 0: {
345 struct udev_device *dev = NULL;
4afd3348 346 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 347 int fd_monitor;
e237d8cb 348 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
349 struct epoll_event ep_signal = { .events = EPOLLIN };
350 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 351 sigset_t mask;
912541b0 352
43095991 353 /* take initial device from queue */
1cc6c93a 354 dev = TAKE_PTR(event->dev);
912541b0 355
39fd2ca1
TG
356 unsetenv("NOTIFY_SOCKET");
357
c0c6806b 358 manager_workers_free(manager);
ecb17862 359 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 360
e237d8cb 361 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 364 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 365
693d371d
TG
366 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
367 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
368 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
369
370 manager->event = sd_event_unref(manager->event);
371
912541b0
KS
372 sigfillset(&mask);
373 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
374 if (fd_signal < 0) {
6af5e6a4 375 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
376 goto out;
377 }
2dd9f98d
TG
378 ep_signal.data.fd = fd_signal;
379
380 fd_monitor = udev_monitor_get_fd(worker_monitor);
381 ep_monitor.data.fd = fd_monitor;
912541b0
KS
382
383 fd_ep = epoll_create1(EPOLL_CLOEXEC);
384 if (fd_ep < 0) {
6af5e6a4 385 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
386 goto out;
387 }
388
912541b0
KS
389 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
390 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 391 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
392 goto out;
393 }
394
045e00cf
ZJS
395 /* Request TERM signal if parent exits.
396 Ignore error, not much we can do in that case. */
397 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 398
045e00cf 399 /* Reset OOM score, we only protect the main daemon. */
ad118bda 400 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 401
912541b0
KS
402 for (;;) {
403 struct udev_event *udev_event;
6af5e6a4 404 int fd_lock = -1;
912541b0 405
3b64e4d4
TG
406 assert(dev);
407
9f6445e3 408 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
409 udev_event = udev_event_new(dev);
410 if (udev_event == NULL) {
6af5e6a4 411 r = -ENOMEM;
912541b0
KS
412 goto out;
413 }
414
bba7a484
TG
415 if (arg_exec_delay > 0)
416 udev_event->exec_delay = arg_exec_delay;
912541b0 417
3ebdb81e 418 /*
2e5b17d0 419 * Take a shared lock on the device node; this establishes
3ebdb81e 420 * a concept of device "ownership" to serialize device
2e5b17d0 421 * access. External processes holding an exclusive lock will
3ebdb81e 422 * cause udev to skip the event handling; in the case udev
2e5b17d0 423 * acquired the lock, the external process can block until
3ebdb81e
KS
424 * udev has finished its event handling.
425 */
2e5b17d0 426 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 427 shall_lock_device(dev)) {
3ebdb81e
KS
428 struct udev_device *d = dev;
429
430 if (streq_ptr("partition", udev_device_get_devtype(d)))
431 d = udev_device_get_parent(d);
432
433 if (d) {
434 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
435 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 436 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 437 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
438 goto skip;
439 }
440 }
441 }
442
4c83d994
TG
443 /* needed for renaming netifs */
444 udev_event->rtnl = rtnl;
445
912541b0 446 /* apply rules, create node, symlinks */
adeba500
KS
447 udev_event_execute_rules(udev_event,
448 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 449 &manager->properties,
8314de1d 450 manager->rules);
adeba500
KS
451
452 udev_event_execute_run(udev_event,
8314de1d 453 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 454
523c620b
TG
455 if (udev_event->rtnl)
456 /* in case rtnl was initialized */
1c4baffc 457 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 458
912541b0 459 /* apply/restore inotify watch */
bf9bead1 460 if (udev_event->inotify_watch) {
912541b0
KS
461 udev_watch_begin(udev, dev);
462 udev_device_update_db(dev);
463 }
464
3d06f418 465 safe_close(fd_lock);
3ebdb81e 466
912541b0
KS
467 /* send processed event back to libudev listeners */
468 udev_monitor_send_device(worker_monitor, NULL, dev);
469
3ebdb81e 470skip:
4914cb2d 471 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 472
912541b0 473 /* send udevd the result of the event execution */
e237d8cb 474 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 475 if (r < 0)
9a73bd7c 476 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 477 udev_device_get_seqnum(dev));
912541b0
KS
478
479 udev_device_unref(dev);
480 dev = NULL;
481
73814ca2 482 udev_event_unref(udev_event);
47e737dc 483
912541b0
KS
484 /* wait for more device messages from main udevd, or term signal */
485 while (dev == NULL) {
486 struct epoll_event ev[4];
487 int fdcount;
488 int i;
489
8fef0ff2 490 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
491 if (fdcount < 0) {
492 if (errno == EINTR)
493 continue;
6af5e6a4 494 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
495 goto out;
496 }
497
498 for (i = 0; i < fdcount; i++) {
499 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
500 dev = udev_monitor_receive_device(worker_monitor);
501 break;
502 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
503 struct signalfd_siginfo fdsi;
504 ssize_t size;
505
506 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
507 if (size != sizeof(struct signalfd_siginfo))
508 continue;
509 switch (fdsi.ssi_signo) {
510 case SIGTERM:
511 goto out;
512 }
513 }
514 }
515 }
516 }
82063a88 517out:
912541b0 518 udev_device_unref(dev);
e237d8cb 519 manager_free(manager);
baa30fbc 520 log_close();
8b46c3fc 521 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
522 }
523 case -1:
912541b0 524 event->state = EVENT_QUEUED;
56f64d95 525 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
526 break;
527 default:
e03c7cc2
TG
528 {
529 struct worker *worker;
530
c0c6806b 531 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 532 if (r < 0)
e03c7cc2 533 return;
e03c7cc2 534
39c19cf1
TG
535 worker_attach_event(worker, event);
536
1fa2f38f 537 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
538 break;
539 }
e03c7cc2 540 }
7fafc032
KS
541}
542
c0c6806b 543static void event_run(Manager *manager, struct event *event) {
a505965d
TG
544 struct worker *worker;
545 Iterator i;
912541b0 546
c0c6806b
TG
547 assert(manager);
548 assert(event);
549
550 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
551 ssize_t count;
552
553 if (worker->state != WORKER_IDLE)
554 continue;
555
c0c6806b 556 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 557 if (count < 0) {
1fa2f38f
ZJS
558 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
559 worker->pid, count);
912541b0
KS
560 kill(worker->pid, SIGKILL);
561 worker->state = WORKER_KILLED;
562 continue;
563 }
39c19cf1 564 worker_attach_event(worker, event);
912541b0
KS
565 return;
566 }
567
c0c6806b 568 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 569 if (arg_children_max > 1)
c0c6806b 570 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
571 return;
572 }
573
574 /* start new worker and pass initial device */
c0c6806b 575 worker_spawn(manager, event);
1e03b754
KS
576}
577
ecb17862 578static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 579 struct event *event;
cb49a4f2 580 int r;
912541b0 581
ecb17862
TG
582 assert(manager);
583 assert(dev);
584
040e6896
TG
585 /* only one process can add events to the queue */
586 if (manager->pid == 0)
df0ff127 587 manager->pid = getpid_cached();
040e6896 588
df0ff127 589 assert(manager->pid == getpid_cached());
cb49a4f2 590
955d98c9 591 event = new0(struct event, 1);
cb49a4f2
TG
592 if (!event)
593 return -ENOMEM;
912541b0
KS
594
595 event->udev = udev_device_get_udev(dev);
cb49a4f2 596 event->manager = manager;
912541b0 597 event->dev = dev;
6969c349
TG
598 event->dev_kernel = udev_device_shallow_clone(dev);
599 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
600 event->seqnum = udev_device_get_seqnum(dev);
601 event->devpath = udev_device_get_devpath(dev);
602 event->devpath_len = strlen(event->devpath);
603 event->devpath_old = udev_device_get_devpath_old(dev);
604 event->devnum = udev_device_get_devnum(dev);
ea6039a3 605 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
606 event->ifindex = udev_device_get_ifindex(dev);
607
9f6445e3 608 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
609 udev_device_get_action(dev), udev_device_get_subsystem(dev));
610
611 event->state = EVENT_QUEUED;
cb49a4f2 612
40a57716 613 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
614 r = touch("/run/udev/queue");
615 if (r < 0)
616 log_warning_errno(r, "could not touch /run/udev/queue: %m");
617 }
618
40a57716 619 LIST_APPEND(event, manager->events, event);
cb49a4f2 620
912541b0 621 return 0;
fc465079
KS
622}
623
c0c6806b 624static void manager_kill_workers(Manager *manager) {
a505965d
TG
625 struct worker *worker;
626 Iterator i;
1e03b754 627
c0c6806b
TG
628 assert(manager);
629
630 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
631 if (worker->state == WORKER_KILLED)
632 continue;
1e03b754 633
912541b0
KS
634 worker->state = WORKER_KILLED;
635 kill(worker->pid, SIGTERM);
636 }
1e03b754
KS
637}
638
e3196993 639/* lookup event for identical, parent, child device */
ecb17862 640static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 641 struct event *loop_event;
912541b0
KS
642 size_t common;
643
644 /* check if queue contains events we depend on */
40a57716 645 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 646 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
647 if (loop_event->seqnum < event->delaying_seqnum)
648 continue;
649
650 /* event we checked earlier still exists, no need to check again */
651 if (loop_event->seqnum == event->delaying_seqnum)
652 return true;
653
654 /* found ourself, no later event can block us */
655 if (loop_event->seqnum >= event->seqnum)
656 break;
657
658 /* check major/minor */
659 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
660 return true;
661
662 /* check network device ifindex */
663 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
664 return true;
665
666 /* check our old name */
090be865 667 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
668 event->delaying_seqnum = loop_event->seqnum;
669 return true;
670 }
671
672 /* compare devpath */
673 common = MIN(loop_event->devpath_len, event->devpath_len);
674
675 /* one devpath is contained in the other? */
676 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
677 continue;
678
679 /* identical device event found */
680 if (loop_event->devpath_len == event->devpath_len) {
681 /* devices names might have changed/swapped in the meantime */
682 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
683 continue;
684 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
685 continue;
686 event->delaying_seqnum = loop_event->seqnum;
687 return true;
688 }
689
690 /* parent device event found */
691 if (event->devpath[common] == '/') {
692 event->delaying_seqnum = loop_event->seqnum;
693 return true;
694 }
695
696 /* child device event found */
697 if (loop_event->devpath[common] == '/') {
698 event->delaying_seqnum = loop_event->seqnum;
699 return true;
700 }
701
702 /* no matching device */
703 continue;
704 }
705
706 return false;
7fafc032
KS
707}
708
693d371d
TG
709static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
710 Manager *manager = userdata;
711
712 assert(manager);
713
714 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
715
716 sd_event_exit(manager->event, -ETIMEDOUT);
717
718 return 1;
719}
720
62d43dac 721static void manager_exit(Manager *manager) {
693d371d
TG
722 uint64_t usec;
723 int r;
62d43dac
TG
724
725 assert(manager);
726
727 manager->exit = true;
728
b79aacbf
TG
729 sd_notify(false,
730 "STOPPING=1\n"
731 "STATUS=Starting shutdown...");
732
62d43dac 733 /* close sources of new events and discard buffered events */
693d371d 734 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 735 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 736
693d371d 737 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 738 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 739
693d371d 740 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 741 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
742
743 /* discard queued events and kill workers */
744 event_queue_cleanup(manager, EVENT_QUEUED);
745 manager_kill_workers(manager);
693d371d 746
3285baa8 747 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 748
3285baa8 749 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
750 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
751 if (r < 0)
752 return;
62d43dac
TG
753}
754
755/* reload requested, HUP signal received, rules changed, builtin changed */
756static void manager_reload(Manager *manager) {
757
758 assert(manager);
759
b79aacbf
TG
760 sd_notify(false,
761 "RELOADING=1\n"
762 "STATUS=Flushing configuration...");
763
62d43dac
TG
764 manager_kill_workers(manager);
765 manager->rules = udev_rules_unref(manager->rules);
766 udev_builtin_exit(manager->udev);
b79aacbf 767
1ef72b55
MS
768 sd_notifyf(false,
769 "READY=1\n"
770 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
771}
772
c0c6806b 773static void event_queue_start(Manager *manager) {
40a57716 774 struct event *event;
693d371d 775 usec_t usec;
8ab44e3f 776
c0c6806b
TG
777 assert(manager);
778
40a57716 779 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
780 manager->exit || manager->stop_exec_queue)
781 return;
782
3285baa8 783 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
784 /* check for changed config, every 3 seconds at most */
785 if (manager->last_usec == 0 ||
786 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
787 if (udev_rules_check_timestamp(manager->rules) ||
788 udev_builtin_validate(manager->udev))
789 manager_reload(manager);
693d371d 790
38a03f06 791 manager->last_usec = usec;
7c4c7e89
TG
792 }
793
794 udev_builtin_init(manager->udev);
795
796 if (!manager->rules) {
797 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
798 if (!manager->rules)
799 return;
800 }
801
40a57716 802 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
803 if (event->state != EVENT_QUEUED)
804 continue;
0bc74ea7 805
912541b0 806 /* do not start event if parent or child event is still running */
ecb17862 807 if (is_devpath_busy(manager, event))
912541b0 808 continue;
fc465079 809
c0c6806b 810 event_run(manager, event);
912541b0 811 }
1e03b754
KS
812}
813
ecb17862 814static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 815 struct event *event, *tmp;
ff2c503d 816
40a57716 817 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
818 if (match_type != EVENT_UNDEF && match_type != event->state)
819 continue;
ff2c503d 820
c6aa11f2 821 event_free(event);
912541b0 822 }
ff2c503d
KS
823}
824
e82e8fa5 825static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
826 Manager *manager = userdata;
827
828 assert(manager);
829
912541b0
KS
830 for (;;) {
831 struct worker_message msg;
979558f3
TG
832 struct iovec iovec = {
833 .iov_base = &msg,
834 .iov_len = sizeof(msg),
835 };
836 union {
837 struct cmsghdr cmsghdr;
838 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
839 } control = {};
840 struct msghdr msghdr = {
841 .msg_iov = &iovec,
842 .msg_iovlen = 1,
843 .msg_control = &control,
844 .msg_controllen = sizeof(control),
845 };
846 struct cmsghdr *cmsg;
912541b0 847 ssize_t size;
979558f3 848 struct ucred *ucred = NULL;
a505965d 849 struct worker *worker;
912541b0 850
e82e8fa5 851 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 852 if (size < 0) {
738a7907
TG
853 if (errno == EINTR)
854 continue;
855 else if (errno == EAGAIN)
856 /* nothing more to read */
857 break;
979558f3 858
e82e8fa5 859 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
860 } else if (size != sizeof(struct worker_message)) {
861 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 862 continue;
979558f3
TG
863 }
864
2a1288ff 865 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
866 if (cmsg->cmsg_level == SOL_SOCKET &&
867 cmsg->cmsg_type == SCM_CREDENTIALS &&
868 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
869 ucred = (struct ucred*) CMSG_DATA(cmsg);
870 }
871
872 if (!ucred || ucred->pid <= 0) {
873 log_warning_errno(EIO, "ignoring worker message without valid PID");
874 continue;
875 }
912541b0
KS
876
877 /* lookup worker who sent the signal */
4a0b58c4 878 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
879 if (!worker) {
880 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
881 continue;
912541b0 882 }
c0bbfd72 883
a505965d
TG
884 if (worker->state != WORKER_KILLED)
885 worker->state = WORKER_IDLE;
886
887 /* worker returned */
888 event_free(worker->event);
912541b0 889 }
e82e8fa5 890
8302fe5a
TG
891 /* we have free workers, try to schedule events */
892 event_queue_start(manager);
893
e82e8fa5
TG
894 return 1;
895}
896
897static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 898 Manager *manager = userdata;
e82e8fa5
TG
899 struct udev_device *dev;
900 int r;
901
c0c6806b 902 assert(manager);
e82e8fa5 903
c0c6806b 904 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
905 if (dev) {
906 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 907 r = event_queue_insert(manager, dev);
e82e8fa5
TG
908 if (r < 0)
909 udev_device_unref(dev);
8302fe5a
TG
910 else
911 /* we have fresh events, try to schedule them */
912 event_queue_start(manager);
e82e8fa5
TG
913 }
914
915 return 1;
88f4b648
KS
916}
917
3b47c739 918/* receive the udevd message from userspace */
e82e8fa5 919static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 920 Manager *manager = userdata;
8e766630
LP
921 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
922 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
923 const char *str;
924 int i;
925
c0c6806b 926 assert(manager);
e4f66b77 927
c0c6806b 928 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 929 if (!ctrl_conn)
e82e8fa5 930 return 1;
912541b0
KS
931
932 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 933 if (!ctrl_msg)
e82e8fa5 934 return 1;
912541b0
KS
935
936 i = udev_ctrl_get_set_log_level(ctrl_msg);
937 if (i >= 0) {
ed14edc0 938 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 939 log_set_max_level(i);
c0c6806b 940 manager_kill_workers(manager);
912541b0
KS
941 }
942
943 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 944 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 945 manager->stop_exec_queue = true;
912541b0
KS
946 }
947
948 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 949 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 950 manager->stop_exec_queue = false;
8302fe5a 951 event_queue_start(manager);
912541b0
KS
952 }
953
954 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 955 log_debug("udevd message (RELOAD) received");
62d43dac 956 manager_reload(manager);
912541b0
KS
957 }
958
959 str = udev_ctrl_get_set_env(ctrl_msg);
960 if (str != NULL) {
c0c6806b 961 _cleanup_free_ char *key = NULL;
912541b0
KS
962
963 key = strdup(str);
c0c6806b 964 if (key) {
912541b0
KS
965 char *val;
966
967 val = strchr(key, '=');
968 if (val != NULL) {
969 val[0] = '\0';
970 val = &val[1];
971 if (val[0] == '\0') {
9f6445e3 972 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 973 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 974 } else {
9f6445e3 975 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 976 udev_list_entry_add(&manager->properties, key, val);
912541b0 977 }
c0c6806b 978 } else
9f6445e3 979 log_error("wrong key format '%s'", key);
912541b0 980 }
c0c6806b 981 manager_kill_workers(manager);
912541b0
KS
982 }
983
984 i = udev_ctrl_get_set_children_max(ctrl_msg);
985 if (i >= 0) {
9f6445e3 986 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 987 arg_children_max = i;
1ef72b55
MS
988
989 (void) sd_notifyf(false,
990 "READY=1\n"
991 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
992 }
993
cb49a4f2 994 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 995 log_debug("udevd message (SYNC) received");
912541b0
KS
996
997 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 998 log_debug("udevd message (EXIT) received");
62d43dac 999 manager_exit(manager);
c0c6806b
TG
1000 /* keep reference to block the client until we exit
1001 TODO: deal with several blocking exit requests */
1002 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1003 }
e4f66b77 1004
e82e8fa5 1005 return 1;
88f4b648 1006}
4a231017 1007
f3a740a5 1008static int synthesize_change(struct udev_device *dev) {
edd32000 1009 char filename[UTIL_PATH_SIZE];
f3a740a5 1010 int r;
edd32000 1011
f3a740a5 1012 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1013 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1014 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1015 bool part_table_read = false;
1016 bool has_partitions = false;
ede34445 1017 int fd;
f3a740a5 1018 struct udev *udev = udev_device_get_udev(dev);
8e766630 1019 _cleanup_(udev_enumerate_unrefp) struct udev_enumerate *e = NULL;
f3a740a5
KS
1020 struct udev_list_entry *item;
1021
ede34445 1022 /*
e9fc29f4
KS
1023 * Try to re-read the partition table. This only succeeds if
1024 * none of the devices is busy. The kernel returns 0 if no
1025 * partition table is found, and we will not get an event for
1026 * the disk.
ede34445 1027 */
02ba8fb3 1028 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1029 if (fd >= 0) {
02ba8fb3
KS
1030 r = flock(fd, LOCK_EX|LOCK_NB);
1031 if (r >= 0)
1032 r = ioctl(fd, BLKRRPART, 0);
1033
ede34445
KS
1034 close(fd);
1035 if (r >= 0)
e9fc29f4 1036 part_table_read = true;
ede34445
KS
1037 }
1038
e9fc29f4 1039 /* search for partitions */
f3a740a5
KS
1040 e = udev_enumerate_new(udev);
1041 if (!e)
1042 return -ENOMEM;
1043
1044 r = udev_enumerate_add_match_parent(e, dev);
1045 if (r < 0)
1046 return r;
1047
1048 r = udev_enumerate_add_match_subsystem(e, "block");
1049 if (r < 0)
1050 return r;
1051
1052 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1053 if (r < 0)
1054 return r;
e9fc29f4
KS
1055
1056 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
8e766630 1057 _cleanup_(udev_device_unrefp) struct udev_device *d = NULL;
e9fc29f4
KS
1058
1059 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1060 if (!d)
1061 continue;
1062
1063 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1064 continue;
1065
1066 has_partitions = true;
1067 break;
1068 }
1069
1070 /*
1071 * We have partitions and re-read the table, the kernel already sent
1072 * out a "change" event for the disk, and "remove/add" for all
1073 * partitions.
1074 */
1075 if (part_table_read && has_partitions)
1076 return 0;
1077
1078 /*
1079 * We have partitions but re-reading the partition table did not
1080 * work, synthesize "change" for the disk and all partitions.
1081 */
1082 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1083 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1084 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1085
f3a740a5 1086 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
8e766630 1087 _cleanup_(udev_device_unrefp) struct udev_device *d = NULL;
f3a740a5
KS
1088
1089 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1090 if (!d)
1091 continue;
1092
1093 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1094 continue;
1095
1096 log_debug("device %s closed, synthesising partition '%s' 'change'",
1097 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1098 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1099 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1100 }
ede34445
KS
1101
1102 return 0;
f3a740a5
KS
1103 }
1104
ede34445
KS
1105 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1106 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1107 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1108
f3a740a5 1109 return 0;
edd32000
KS
1110}
1111
e82e8fa5 1112static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1113 Manager *manager = userdata;
0254e944 1114 union inotify_event_buffer buffer;
f7c1ad4f
LP
1115 struct inotify_event *e;
1116 ssize_t l;
912541b0 1117
c0c6806b 1118 assert(manager);
e82e8fa5
TG
1119
1120 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1121 if (l < 0) {
3742095b 1122 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1123 return 1;
912541b0 1124
f7c1ad4f 1125 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1126 }
1127
f7c1ad4f 1128 FOREACH_INOTIFY_EVENT(e, buffer, l) {
8e766630 1129 _cleanup_(udev_device_unrefp) struct udev_device *dev = NULL;
912541b0 1130
c0c6806b 1131 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1132 if (!dev)
1133 continue;
912541b0 1134
f7c1ad4f 1135 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1136 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1137 synthesize_change(dev);
a8389097
TG
1138
1139 /* settle might be waiting on us to determine the queue
1140 * state. If we just handled an inotify event, we might have
1141 * generated a "change" event, but we won't have queued up
1142 * the resultant uevent yet. Do that.
1143 */
c0c6806b 1144 on_uevent(NULL, -1, 0, manager);
a8389097 1145 } else if (e->mask & IN_IGNORED)
c0c6806b 1146 udev_watch_end(manager->udev, dev);
912541b0
KS
1147 }
1148
e82e8fa5 1149 return 1;
bd284db1
SJR
1150}
1151
0561329d 1152static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1153 Manager *manager = userdata;
1154
1155 assert(manager);
1156
62d43dac 1157 manager_exit(manager);
912541b0 1158
e82e8fa5
TG
1159 return 1;
1160}
912541b0 1161
0561329d 1162static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1163 Manager *manager = userdata;
1164
1165 assert(manager);
1166
62d43dac 1167 manager_reload(manager);
912541b0 1168
e82e8fa5
TG
1169 return 1;
1170}
912541b0 1171
e82e8fa5 1172static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1173 Manager *manager = userdata;
1174
1175 assert(manager);
1176
e82e8fa5
TG
1177 for (;;) {
1178 pid_t pid;
1179 int status;
1180 struct worker *worker;
d1317d02 1181
e82e8fa5
TG
1182 pid = waitpid(-1, &status, WNOHANG);
1183 if (pid <= 0)
f29328d6 1184 break;
e82e8fa5 1185
4a0b58c4 1186 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1187 if (!worker) {
1188 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1189 continue;
912541b0 1190 }
e82e8fa5
TG
1191
1192 if (WIFEXITED(status)) {
1193 if (WEXITSTATUS(status) == 0)
1194 log_debug("worker ["PID_FMT"] exited", pid);
1195 else
1196 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1197 } else if (WIFSIGNALED(status)) {
76341acc 1198 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1199 } else if (WIFSTOPPED(status)) {
1200 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1201 continue;
e82e8fa5
TG
1202 } else if (WIFCONTINUED(status)) {
1203 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1204 continue;
e82e8fa5
TG
1205 } else
1206 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1207
1208 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1209 if (worker->event) {
1210 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1211 /* delete state from disk */
1212 udev_device_delete_db(worker->event->dev);
1213 udev_device_tag_index(worker->event->dev, NULL, false);
1214 /* forward kernel event without amending it */
c0c6806b 1215 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1216 }
1217 }
1218
1219 worker_free(worker);
912541b0 1220 }
e82e8fa5 1221
8302fe5a
TG
1222 /* we can start new workers, try to schedule events */
1223 event_queue_start(manager);
1224
e82e8fa5 1225 return 1;
f27125f9 1226}
1227
693d371d
TG
1228static int on_post(sd_event_source *s, void *userdata) {
1229 Manager *manager = userdata;
1230 int r;
1231
1232 assert(manager);
1233
40a57716 1234 if (LIST_IS_EMPTY(manager->events)) {
693d371d
TG
1235 /* no pending events */
1236 if (!hashmap_isempty(manager->workers)) {
1237 /* there are idle workers */
1238 log_debug("cleanup idle workers");
1239 manager_kill_workers(manager);
1240 } else {
1241 /* we are idle */
1242 if (manager->exit) {
1243 r = sd_event_exit(manager->event, 0);
1244 if (r < 0)
1245 return r;
1246 } else if (manager->cgroup)
1247 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1248 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1249 }
1250 }
1251
1252 return 1;
1253}
1254
fcff1e72 1255static int listen_fds(int *rctrl, int *rnetlink) {
8e766630 1256 _cleanup_(udev_unrefp) struct udev *udev = NULL;
fcff1e72 1257 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1258 int fd, n, r;
912541b0 1259
fcff1e72
TG
1260 assert(rctrl);
1261 assert(rnetlink);
1262
912541b0 1263 n = sd_listen_fds(true);
fcff1e72
TG
1264 if (n < 0)
1265 return n;
912541b0
KS
1266
1267 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1268 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1269 if (ctrl_fd >= 0)
1270 return -EINVAL;
1271 ctrl_fd = fd;
912541b0
KS
1272 continue;
1273 }
1274
1275 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1276 if (netlink_fd >= 0)
1277 return -EINVAL;
1278 netlink_fd = fd;
912541b0
KS
1279 continue;
1280 }
1281
fcff1e72 1282 return -EINVAL;
912541b0
KS
1283 }
1284
f59118ec 1285 if (ctrl_fd < 0) {
8e766630 1286 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec
TG
1287
1288 udev = udev_new();
1289 if (!udev)
1290 return -ENOMEM;
1291
1292 ctrl = udev_ctrl_new(udev);
1293 if (!ctrl)
1294 return log_error_errno(EINVAL, "error initializing udev control socket");
1295
1296 r = udev_ctrl_enable_receiving(ctrl);
1297 if (r < 0)
1298 return log_error_errno(EINVAL, "error binding udev control socket");
1299
1300 fd = udev_ctrl_get_fd(ctrl);
1301 if (fd < 0)
1302 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1303
f59118ec
TG
1304 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1305 if (ctrl_fd < 0)
1306 return log_error_errno(errno, "could not dup ctrl fd: %m");
1307 }
1308
1309 if (netlink_fd < 0) {
8e766630 1310 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec
TG
1311
1312 if (!udev) {
1313 udev = udev_new();
1314 if (!udev)
1315 return -ENOMEM;
1316 }
1317
1318 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1319 if (!monitor)
1320 return log_error_errno(EINVAL, "error initializing netlink socket");
1321
1322 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1323
1324 r = udev_monitor_enable_receiving(monitor);
1325 if (r < 0)
1326 return log_error_errno(EINVAL, "error binding netlink socket");
1327
1328 fd = udev_monitor_get_fd(monitor);
1329 if (fd < 0)
1330 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1331
1332 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1333 if (netlink_fd < 0)
f59118ec
TG
1334 return log_error_errno(errno, "could not dup netlink fd: %m");
1335 }
fcff1e72
TG
1336
1337 *rctrl = ctrl_fd;
1338 *rnetlink = netlink_fd;
912541b0 1339
912541b0 1340 return 0;
7459bcdc
KS
1341}
1342
e6f86cac 1343/*
3f85ef0f 1344 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1345 * udev.log_priority=<level> syslog priority
1346 * udev.children_max=<number of workers> events are fully serialized if set to 1
1347 * udev.exec_delay=<number of seconds> delay execution of every executed program
1348 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1349 */
96287a49 1350static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1351 int r = 0;
e6f86cac 1352
614a823c 1353 assert(key);
e6f86cac 1354
614a823c
TG
1355 if (!value)
1356 return 0;
e6f86cac 1357
1d84ad94
LP
1358 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1359
1360 if (proc_cmdline_value_missing(key, value))
1361 return 0;
1362
92e72467
ZJS
1363 r = util_log_priority(value);
1364 if (r >= 0)
1365 log_set_max_level(r);
1d84ad94
LP
1366
1367 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1368
1369 if (proc_cmdline_value_missing(key, value))
1370 return 0;
1371
92e72467
ZJS
1372 r = safe_atou64(value, &arg_event_timeout_usec);
1373 if (r >= 0) {
1374 arg_event_timeout_usec *= USEC_PER_SEC;
1375 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1376 }
1d84ad94
LP
1377
1378 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1379
1380 if (proc_cmdline_value_missing(key, value))
1381 return 0;
1382
020328e1 1383 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1384
1385 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1386
1387 if (proc_cmdline_value_missing(key, value))
1388 return 0;
1389
614a823c 1390 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1391
1392 } else if (startswith(key, "udev."))
92e72467 1393 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1394
92e72467
ZJS
1395 if (r < 0)
1396 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1397
614a823c 1398 return 0;
e6f86cac
KS
1399}
1400
ed216e1f
TG
1401static void help(void) {
1402 printf("%s [OPTIONS...]\n\n"
1403 "Manages devices.\n\n"
5ac0162c 1404 " -h --help Print this message\n"
2d19c17e
MF
1405 " -V --version Print version of the program\n"
1406 " -d --daemon Detach and run in the background\n"
1407 " -D --debug Enable debug output\n"
1408 " -c --children-max=INT Set maximum number of workers\n"
1409 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1410 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1411 " -N --resolve-names=early|late|never\n"
5ac0162c 1412 " When to resolve users and groups\n"
ed216e1f
TG
1413 , program_invocation_short_name);
1414}
1415
bba7a484 1416static int parse_argv(int argc, char *argv[]) {
912541b0 1417 static const struct option options[] = {
bba7a484
TG
1418 { "daemon", no_argument, NULL, 'd' },
1419 { "debug", no_argument, NULL, 'D' },
1420 { "children-max", required_argument, NULL, 'c' },
1421 { "exec-delay", required_argument, NULL, 'e' },
1422 { "event-timeout", required_argument, NULL, 't' },
1423 { "resolve-names", required_argument, NULL, 'N' },
1424 { "help", no_argument, NULL, 'h' },
1425 { "version", no_argument, NULL, 'V' },
912541b0
KS
1426 {}
1427 };
689a97f5 1428
bba7a484 1429 int c;
689a97f5 1430
bba7a484
TG
1431 assert(argc >= 0);
1432 assert(argv);
912541b0 1433
e14b6f21 1434 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1435 int r;
912541b0 1436
bba7a484 1437 switch (c) {
912541b0 1438
912541b0 1439 case 'd':
bba7a484 1440 arg_daemonize = true;
912541b0
KS
1441 break;
1442 case 'c':
020328e1 1443 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1444 if (r < 0)
1445 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1446 break;
1447 case 'e':
6f5cf8a8
TG
1448 r = safe_atoi(optarg, &arg_exec_delay);
1449 if (r < 0)
1450 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1451 break;
9719859c 1452 case 't':
f1e8664e
TG
1453 r = safe_atou64(optarg, &arg_event_timeout_usec);
1454 if (r < 0)
65fea570 1455 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1456 else {
1457 arg_event_timeout_usec *= USEC_PER_SEC;
1458 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1459 }
9719859c 1460 break;
912541b0 1461 case 'D':
bba7a484 1462 arg_debug = true;
912541b0
KS
1463 break;
1464 case 'N':
090be865 1465 if (streq(optarg, "early")) {
bba7a484 1466 arg_resolve_names = 1;
090be865 1467 } else if (streq(optarg, "late")) {
bba7a484 1468 arg_resolve_names = 0;
090be865 1469 } else if (streq(optarg, "never")) {
bba7a484 1470 arg_resolve_names = -1;
912541b0 1471 } else {
9f6445e3 1472 log_error("resolve-names must be early, late or never");
bba7a484 1473 return 0;
912541b0
KS
1474 }
1475 break;
1476 case 'h':
ed216e1f 1477 help();
bba7a484 1478 return 0;
912541b0 1479 case 'V':
948aaa7c 1480 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1481 return 0;
1482 case '?':
1483 return -EINVAL;
912541b0 1484 default:
bba7a484
TG
1485 assert_not_reached("Unhandled option");
1486
912541b0
KS
1487 }
1488 }
1489
bba7a484
TG
1490 return 1;
1491}
1492
b7f74dd4 1493static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1494 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1495 int r, fd_worker, one = 1;
c0c6806b
TG
1496
1497 assert(ret);
11b1dd8c
TG
1498 assert(fd_ctrl >= 0);
1499 assert(fd_uevent >= 0);
c0c6806b
TG
1500
1501 manager = new0(Manager, 1);
1502 if (!manager)
1503 return log_oom();
1504
e237d8cb
TG
1505 manager->fd_inotify = -1;
1506 manager->worker_watch[WRITE_END] = -1;
1507 manager->worker_watch[READ_END] = -1;
1508
c0c6806b
TG
1509 manager->udev = udev_new();
1510 if (!manager->udev)
1511 return log_error_errno(errno, "could not allocate udev context: %m");
1512
b2d21d93
TG
1513 udev_builtin_init(manager->udev);
1514
ecb17862
TG
1515 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1516 if (!manager->rules)
1517 return log_error_errno(ENOMEM, "error reading rules");
1518
40a57716 1519 LIST_HEAD_INIT(manager->events);
ecb17862
TG
1520 udev_list_init(manager->udev, &manager->properties, true);
1521
c26d1879
TG
1522 manager->cgroup = cgroup;
1523
f59118ec
TG
1524 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1525 if (!manager->ctrl)
1526 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1527
f59118ec
TG
1528 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1529 if (!manager->monitor)
1530 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1531
1532 /* unnamed socket from workers to the main daemon */
1533 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1534 if (r < 0)
1535 return log_error_errno(errno, "error creating socketpair: %m");
1536
693d371d 1537 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1538
693d371d 1539 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1540 if (r < 0)
1541 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1542
1543 manager->fd_inotify = udev_watch_init(manager->udev);
1544 if (manager->fd_inotify < 0)
1545 return log_error_errno(ENOMEM, "error initializing inotify");
1546
1547 udev_watch_restore(manager->udev);
1548
1549 /* block and listen to all signals on signalfd */
72c0a2c2 1550 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1551
49f997f3
TG
1552 r = sd_event_default(&manager->event);
1553 if (r < 0)
709f6e46 1554 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1555
693d371d
TG
1556 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1557 if (r < 0)
1558 return log_error_errno(r, "error creating sigint event source: %m");
1559
1560 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1561 if (r < 0)
1562 return log_error_errno(r, "error creating sigterm event source: %m");
1563
1564 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1565 if (r < 0)
1566 return log_error_errno(r, "error creating sighup event source: %m");
1567
1568 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1569 if (r < 0)
1570 return log_error_errno(r, "error creating sigchld event source: %m");
1571
1572 r = sd_event_set_watchdog(manager->event, true);
1573 if (r < 0)
1574 return log_error_errno(r, "error creating watchdog event source: %m");
1575
11b1dd8c 1576 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1577 if (r < 0)
1578 return log_error_errno(r, "error creating ctrl event source: %m");
1579
1580 /* This needs to be after the inotify and uevent handling, to make sure
1581 * that the ping is send back after fully processing the pending uevents
1582 * (including the synthetic ones we may create due to inotify events).
1583 */
1584 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1585 if (r < 0)
1586 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1587
1588 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1589 if (r < 0)
1590 return log_error_errno(r, "error creating inotify event source: %m");
1591
11b1dd8c 1592 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1593 if (r < 0)
1594 return log_error_errno(r, "error creating uevent event source: %m");
1595
1596 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1597 if (r < 0)
1598 return log_error_errno(r, "error creating worker event source: %m");
1599
1600 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1601 if (r < 0)
1602 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1603
1cc6c93a 1604 *ret = TAKE_PTR(manager);
11b1dd8c 1605
86c3bece 1606 return 0;
c0c6806b
TG
1607}
1608
077fc5e2 1609static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1610 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1611 int r;
1612
1613 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1614 if (r < 0) {
1615 r = log_error_errno(r, "failed to allocate manager object: %m");
1616 goto exit;
1617 }
1618
1619 r = udev_rules_apply_static_dev_perms(manager->rules);
1620 if (r < 0)
1621 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1622
1ef72b55
MS
1623 (void) sd_notifyf(false,
1624 "READY=1\n"
1625 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1626
1627 r = sd_event_loop(manager->event);
1628 if (r < 0) {
1629 log_error_errno(r, "event loop failed: %m");
1630 goto exit;
1631 }
1632
1633 sd_event_get_exit_code(manager->event, &r);
1634
1635exit:
1636 sd_notify(false,
1637 "STOPPING=1\n"
1638 "STATUS=Shutting down...");
1639 if (manager)
1640 udev_ctrl_cleanup(manager->ctrl);
1641 return r;
1642}
1643
1644int main(int argc, char *argv[]) {
c26d1879 1645 _cleanup_free_ char *cgroup = NULL;
efa1606e 1646 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1647 int r;
bba7a484 1648
bba7a484 1649 log_set_target(LOG_TARGET_AUTO);
b237a168 1650 udev_parse_config();
bba7a484
TG
1651 log_parse_environment();
1652 log_open();
1653
bba7a484
TG
1654 r = parse_argv(argc, argv);
1655 if (r <= 0)
1656 goto exit;
1657
1d84ad94 1658 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1659 if (r < 0)
1660 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1661
78d3e041
KS
1662 if (arg_debug) {
1663 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1664 log_set_max_level(LOG_DEBUG);
78d3e041 1665 }
bba7a484 1666
fba868fa
LP
1667 r = must_be_root();
1668 if (r < 0)
912541b0 1669 goto exit;
912541b0 1670
712cebf1
TG
1671 if (arg_children_max == 0) {
1672 cpu_set_t cpu_set;
e438c57a 1673 unsigned long mem_limit;
ebc164ef 1674
712cebf1 1675 arg_children_max = 8;
d457ff83 1676
ece174c5 1677 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1678 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1679
e438c57a
MW
1680 mem_limit = physical_memory() / (128LU*1024*1024);
1681 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1682
712cebf1 1683 log_debug("set children_max to %u", arg_children_max);
d457ff83 1684 }
912541b0 1685
712cebf1
TG
1686 /* set umask before creating any file/directory */
1687 r = chdir("/");
1688 if (r < 0) {
1689 r = log_error_errno(errno, "could not change dir to /: %m");
1690 goto exit;
1691 }
194bbe33 1692
712cebf1 1693 umask(022);
912541b0 1694
c3dacc8b 1695 r = mac_selinux_init();
712cebf1
TG
1696 if (r < 0) {
1697 log_error_errno(r, "could not initialize labelling: %m");
1698 goto exit;
912541b0
KS
1699 }
1700
dae8b82e
ZJS
1701 r = mkdir_errno_wrapper("/run/udev", 0755);
1702 if (r < 0 && r != -EEXIST) {
1703 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1704 goto exit;
1705 }
1706
03cfe0d5 1707 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1708
c26d1879
TG
1709 if (getppid() == 1) {
1710 /* get our own cgroup, we regularly kill everything udev has left behind
1711 we only do this on systemd systems, and only if we are directly spawned
1712 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1713 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1714 if (r < 0) {
a2d61f07 1715 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1716 log_debug_errno(r, "did not find dedicated cgroup: %m");
1717 else
1718 log_warning_errno(r, "failed to get cgroup: %m");
1719 }
c26d1879
TG
1720 }
1721
b7f74dd4
TG
1722 r = listen_fds(&fd_ctrl, &fd_uevent);
1723 if (r < 0) {
1724 r = log_error_errno(r, "could not listen on fds: %m");
1725 goto exit;
1726 }
1727
bba7a484 1728 if (arg_daemonize) {
912541b0 1729 pid_t pid;
912541b0 1730
948aaa7c 1731 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1732
40e749b5 1733 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1734 if (log_get_max_level() < LOG_DEBUG) {
1735 r = make_null_stdio();
1736 if (r < 0)
1737 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1738 }
1739
912541b0
KS
1740 pid = fork();
1741 switch (pid) {
1742 case 0:
1743 break;
1744 case -1:
6af5e6a4 1745 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1746 goto exit;
1747 default:
f53d1fcd
TG
1748 mac_selinux_finish();
1749 log_close();
1750 _exit(EXIT_SUCCESS);
912541b0
KS
1751 }
1752
1753 setsid();
1754
ad118bda 1755 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1756 }
912541b0 1757
077fc5e2 1758 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1759
53921bfa 1760exit:
cc56fafe 1761 mac_selinux_finish();
baa30fbc 1762 log_close();
6af5e6a4 1763 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1764}