]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev: drop unused udev_watch struct
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
3ffd4af2 37#include "fd-util.h"
a5c32cff 38#include "fileio.h"
f97b34a6 39#include "format-util.h"
f4f15635 40#include "fs-util.h"
a505965d 41#include "hashmap.h"
c004493c 42#include "io-util.h"
40a57716 43#include "list.h"
618234a5 44#include "netlink-util.h"
6bedfcbb 45#include "parse-util.h"
4e731273 46#include "proc-cmdline.h"
618234a5
LP
47#include "process-util.h"
48#include "selinux-util.h"
49#include "signal-util.h"
8f328d36 50#include "socket-util.h"
07630cea 51#include "string-util.h"
618234a5
LP
52#include "terminal-util.h"
53#include "udev-util.h"
54#include "udev.h"
ee104e11 55#include "user-util.h"
7fafc032 56
bba7a484
TG
57static bool arg_debug = false;
58static int arg_daemonize = false;
59static int arg_resolve_names = 1;
020328e1 60static unsigned arg_children_max;
bba7a484
TG
61static int arg_exec_delay;
62static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
63static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
64
65typedef struct Manager {
66 struct udev *udev;
693d371d 67 sd_event *event;
c0c6806b 68 Hashmap *workers;
40a57716 69 LIST_HEAD(struct event, events);
c26d1879 70 const char *cgroup;
cb49a4f2 71 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 72
ecb17862 73 struct udev_rules *rules;
c0c6806b
TG
74 struct udev_list properties;
75
76 struct udev_monitor *monitor;
77 struct udev_ctrl *ctrl;
78 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 79 int fd_inotify;
e237d8cb
TG
80 int worker_watch[2];
81
693d371d
TG
82 sd_event_source *ctrl_event;
83 sd_event_source *uevent_event;
84 sd_event_source *inotify_event;
85
7c4c7e89
TG
86 usec_t last_usec;
87
c0c6806b 88 bool stop_exec_queue:1;
c0c6806b
TG
89 bool exit:1;
90} Manager;
1e03b754 91
1e03b754 92enum event_state {
912541b0
KS
93 EVENT_UNDEF,
94 EVENT_QUEUED,
95 EVENT_RUNNING,
1e03b754
KS
96};
97
98struct event {
40a57716 99 LIST_FIELDS(struct event, event);
cb49a4f2 100 Manager *manager;
912541b0
KS
101 struct udev *udev;
102 struct udev_device *dev;
6969c349 103 struct udev_device *dev_kernel;
c6aa11f2 104 struct worker *worker;
912541b0 105 enum event_state state;
912541b0
KS
106 unsigned long long int delaying_seqnum;
107 unsigned long long int seqnum;
108 const char *devpath;
109 size_t devpath_len;
110 const char *devpath_old;
111 dev_t devnum;
912541b0 112 int ifindex;
ea6039a3 113 bool is_block;
693d371d
TG
114 sd_event_source *timeout_warning;
115 sd_event_source *timeout;
1e03b754
KS
116};
117
ecb17862 118static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 119
1e03b754 120enum worker_state {
912541b0
KS
121 WORKER_UNDEF,
122 WORKER_RUNNING,
123 WORKER_IDLE,
124 WORKER_KILLED,
1e03b754
KS
125};
126
127struct worker {
c0c6806b 128 Manager *manager;
912541b0
KS
129 int refcount;
130 pid_t pid;
131 struct udev_monitor *monitor;
132 enum worker_state state;
133 struct event *event;
1e03b754
KS
134};
135
136/* passed from worker to main process */
137struct worker_message {
1e03b754
KS
138};
139
c6aa11f2 140static void event_free(struct event *event) {
cb49a4f2
TG
141 int r;
142
c6aa11f2
TG
143 if (!event)
144 return;
40a57716 145 assert(event->manager);
c6aa11f2 146
40a57716 147 LIST_REMOVE(event, event->manager->events, event);
912541b0 148 udev_device_unref(event->dev);
6969c349 149 udev_device_unref(event->dev_kernel);
c6aa11f2 150
693d371d
TG
151 sd_event_source_unref(event->timeout_warning);
152 sd_event_source_unref(event->timeout);
153
c6aa11f2
TG
154 if (event->worker)
155 event->worker->event = NULL;
156
40a57716 157 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 158 /* only clean up the queue from the process that created it */
df0ff127 159 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
160 r = unlink("/run/udev/queue");
161 if (r < 0)
162 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
163 }
164 }
165
912541b0 166 free(event);
aa8734ff 167}
7a770250 168
c6aa11f2
TG
169static void worker_free(struct worker *worker) {
170 if (!worker)
171 return;
bc113de9 172
c0c6806b
TG
173 assert(worker->manager);
174
4a0b58c4 175 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 176 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
177 event_free(worker->event);
178
c6aa11f2 179 free(worker);
ff2c503d
KS
180}
181
c0c6806b 182static void manager_workers_free(Manager *manager) {
a505965d
TG
183 struct worker *worker;
184 Iterator i;
ff2c503d 185
c0c6806b
TG
186 assert(manager);
187
188 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 189 worker_free(worker);
a505965d 190
c0c6806b 191 manager->workers = hashmap_free(manager->workers);
fc465079
KS
192}
193
c0c6806b 194static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
195 _cleanup_free_ struct worker *worker = NULL;
196 int r;
3a19b32a
TG
197
198 assert(ret);
c0c6806b 199 assert(manager);
3a19b32a
TG
200 assert(worker_monitor);
201 assert(pid > 1);
202
203 worker = new0(struct worker, 1);
204 if (!worker)
205 return -ENOMEM;
206
39c19cf1 207 worker->refcount = 1;
c0c6806b 208 worker->manager = manager;
3a19b32a
TG
209 /* close monitor, but keep address around */
210 udev_monitor_disconnect(worker_monitor);
211 worker->monitor = udev_monitor_ref(worker_monitor);
212 worker->pid = pid;
a505965d 213
c0c6806b 214 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
215 if (r < 0)
216 return r;
217
4a0b58c4 218 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
219 if (r < 0)
220 return r;
221
ae2a15bc 222 *ret = TAKE_PTR(worker);
3a19b32a
TG
223
224 return 0;
225}
226
4fa4d885
TG
227static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
228 struct event *event = userdata;
229
230 assert(event);
231 assert(event->worker);
232
233 kill_and_sigcont(event->worker->pid, SIGKILL);
234 event->worker->state = WORKER_KILLED;
235
236 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
237
238 return 1;
239}
240
241static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
242 struct event *event = userdata;
243
244 assert(event);
245
246 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
247
248 return 1;
249}
250
39c19cf1 251static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
252 sd_event *e;
253 uint64_t usec;
693d371d 254
c6aa11f2 255 assert(worker);
693d371d 256 assert(worker->manager);
c6aa11f2
TG
257 assert(event);
258 assert(!event->worker);
259 assert(!worker->event);
260
39c19cf1 261 worker->state = WORKER_RUNNING;
39c19cf1
TG
262 worker->event = event;
263 event->state = EVENT_RUNNING;
c6aa11f2 264 event->worker = worker;
693d371d
TG
265
266 e = worker->manager->event;
267
3285baa8 268 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 269
3285baa8 270 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
271 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
272
3285baa8 273 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 274 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
275}
276
e237d8cb
TG
277static void manager_free(Manager *manager) {
278 if (!manager)
279 return;
280
b2d21d93
TG
281 udev_builtin_exit(manager->udev);
282
693d371d
TG
283 sd_event_source_unref(manager->ctrl_event);
284 sd_event_source_unref(manager->uevent_event);
285 sd_event_source_unref(manager->inotify_event);
286
e237d8cb 287 udev_unref(manager->udev);
693d371d 288 sd_event_unref(manager->event);
e237d8cb
TG
289 manager_workers_free(manager);
290 event_queue_cleanup(manager, EVENT_UNDEF);
291
292 udev_monitor_unref(manager->monitor);
293 udev_ctrl_unref(manager->ctrl);
294 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
295
296 udev_list_cleanup(&manager->properties);
297 udev_rules_unref(manager->rules);
e237d8cb 298
e237d8cb
TG
299 safe_close(manager->fd_inotify);
300 safe_close_pair(manager->worker_watch);
301
302 free(manager);
303}
304
305DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
306
9a73bd7c
TG
307static int worker_send_message(int fd) {
308 struct worker_message message = {};
309
310 return loop_write(fd, &message, sizeof(message), false);
311}
312
fee854ee
RK
313static bool shall_lock_device(struct udev_device *dev) {
314 const char *sysname;
315
316 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
317 return false;
318
319 sysname = udev_device_get_sysname(dev);
320 return !startswith(sysname, "dm-") &&
321 !startswith(sysname, "md") &&
322 !startswith(sysname, "drbd");
323}
324
c0c6806b 325static void worker_spawn(Manager *manager, struct event *event) {
912541b0 326 struct udev *udev = event->udev;
8e766630 327 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 328 pid_t pid;
b6aab8ef 329 int r = 0;
912541b0
KS
330
331 /* listen for new events */
332 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
333 if (worker_monitor == NULL)
334 return;
335 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 336 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
337 r = udev_monitor_enable_receiving(worker_monitor);
338 if (r < 0)
339 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 340
912541b0
KS
341 pid = fork();
342 switch (pid) {
343 case 0: {
344 struct udev_device *dev = NULL;
4afd3348 345 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 346 int fd_monitor;
e237d8cb 347 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
348 struct epoll_event ep_signal = { .events = EPOLLIN };
349 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 350 sigset_t mask;
912541b0 351
43095991 352 /* take initial device from queue */
1cc6c93a 353 dev = TAKE_PTR(event->dev);
912541b0 354
39fd2ca1
TG
355 unsetenv("NOTIFY_SOCKET");
356
c0c6806b 357 manager_workers_free(manager);
ecb17862 358 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 359
e237d8cb 360 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 361 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 362 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 363 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 364
693d371d
TG
365 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
366 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
367 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
368
369 manager->event = sd_event_unref(manager->event);
370
912541b0
KS
371 sigfillset(&mask);
372 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
373 if (fd_signal < 0) {
6af5e6a4 374 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
375 goto out;
376 }
2dd9f98d
TG
377 ep_signal.data.fd = fd_signal;
378
379 fd_monitor = udev_monitor_get_fd(worker_monitor);
380 ep_monitor.data.fd = fd_monitor;
912541b0
KS
381
382 fd_ep = epoll_create1(EPOLL_CLOEXEC);
383 if (fd_ep < 0) {
6af5e6a4 384 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
385 goto out;
386 }
387
912541b0
KS
388 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
389 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 390 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
391 goto out;
392 }
393
045e00cf
ZJS
394 /* Request TERM signal if parent exits.
395 Ignore error, not much we can do in that case. */
396 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 397
045e00cf 398 /* Reset OOM score, we only protect the main daemon. */
ad118bda 399 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 400
912541b0
KS
401 for (;;) {
402 struct udev_event *udev_event;
6af5e6a4 403 int fd_lock = -1;
912541b0 404
3b64e4d4
TG
405 assert(dev);
406
9f6445e3 407 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
408 udev_event = udev_event_new(dev);
409 if (udev_event == NULL) {
6af5e6a4 410 r = -ENOMEM;
912541b0
KS
411 goto out;
412 }
413
bba7a484
TG
414 if (arg_exec_delay > 0)
415 udev_event->exec_delay = arg_exec_delay;
912541b0 416
3ebdb81e 417 /*
2e5b17d0 418 * Take a shared lock on the device node; this establishes
3ebdb81e 419 * a concept of device "ownership" to serialize device
2e5b17d0 420 * access. External processes holding an exclusive lock will
3ebdb81e 421 * cause udev to skip the event handling; in the case udev
2e5b17d0 422 * acquired the lock, the external process can block until
3ebdb81e
KS
423 * udev has finished its event handling.
424 */
2e5b17d0 425 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 426 shall_lock_device(dev)) {
3ebdb81e
KS
427 struct udev_device *d = dev;
428
429 if (streq_ptr("partition", udev_device_get_devtype(d)))
430 d = udev_device_get_parent(d);
431
432 if (d) {
433 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
434 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 435 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 436 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
437 goto skip;
438 }
439 }
440 }
441
4c83d994
TG
442 /* needed for renaming netifs */
443 udev_event->rtnl = rtnl;
444
912541b0 445 /* apply rules, create node, symlinks */
adeba500
KS
446 udev_event_execute_rules(udev_event,
447 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 448 &manager->properties,
8314de1d 449 manager->rules);
adeba500
KS
450
451 udev_event_execute_run(udev_event,
8314de1d 452 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 453
523c620b
TG
454 if (udev_event->rtnl)
455 /* in case rtnl was initialized */
1c4baffc 456 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 457
912541b0 458 /* apply/restore inotify watch */
bf9bead1 459 if (udev_event->inotify_watch) {
912541b0
KS
460 udev_watch_begin(udev, dev);
461 udev_device_update_db(dev);
462 }
463
3d06f418 464 safe_close(fd_lock);
3ebdb81e 465
912541b0
KS
466 /* send processed event back to libudev listeners */
467 udev_monitor_send_device(worker_monitor, NULL, dev);
468
3ebdb81e 469skip:
4914cb2d 470 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 471
912541b0 472 /* send udevd the result of the event execution */
e237d8cb 473 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 474 if (r < 0)
9a73bd7c 475 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 476 udev_device_get_seqnum(dev));
912541b0
KS
477
478 udev_device_unref(dev);
479 dev = NULL;
480
73814ca2 481 udev_event_unref(udev_event);
47e737dc 482
912541b0
KS
483 /* wait for more device messages from main udevd, or term signal */
484 while (dev == NULL) {
485 struct epoll_event ev[4];
486 int fdcount;
487 int i;
488
8fef0ff2 489 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
490 if (fdcount < 0) {
491 if (errno == EINTR)
492 continue;
6af5e6a4 493 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
494 goto out;
495 }
496
497 for (i = 0; i < fdcount; i++) {
498 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
499 dev = udev_monitor_receive_device(worker_monitor);
500 break;
501 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
502 struct signalfd_siginfo fdsi;
503 ssize_t size;
504
505 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
506 if (size != sizeof(struct signalfd_siginfo))
507 continue;
508 switch (fdsi.ssi_signo) {
509 case SIGTERM:
510 goto out;
511 }
512 }
513 }
514 }
515 }
82063a88 516out:
912541b0 517 udev_device_unref(dev);
e237d8cb 518 manager_free(manager);
baa30fbc 519 log_close();
8b46c3fc 520 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
521 }
522 case -1:
912541b0 523 event->state = EVENT_QUEUED;
56f64d95 524 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
525 break;
526 default:
e03c7cc2
TG
527 {
528 struct worker *worker;
529
c0c6806b 530 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 531 if (r < 0)
e03c7cc2 532 return;
e03c7cc2 533
39c19cf1
TG
534 worker_attach_event(worker, event);
535
1fa2f38f 536 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
537 break;
538 }
e03c7cc2 539 }
7fafc032
KS
540}
541
c0c6806b 542static void event_run(Manager *manager, struct event *event) {
a505965d
TG
543 struct worker *worker;
544 Iterator i;
912541b0 545
c0c6806b
TG
546 assert(manager);
547 assert(event);
548
549 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
550 ssize_t count;
551
552 if (worker->state != WORKER_IDLE)
553 continue;
554
c0c6806b 555 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 556 if (count < 0) {
1fa2f38f
ZJS
557 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
558 worker->pid, count);
912541b0
KS
559 kill(worker->pid, SIGKILL);
560 worker->state = WORKER_KILLED;
561 continue;
562 }
39c19cf1 563 worker_attach_event(worker, event);
912541b0
KS
564 return;
565 }
566
c0c6806b 567 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 568 if (arg_children_max > 1)
c0c6806b 569 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
570 return;
571 }
572
573 /* start new worker and pass initial device */
c0c6806b 574 worker_spawn(manager, event);
1e03b754
KS
575}
576
ecb17862 577static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 578 struct event *event;
cb49a4f2 579 int r;
912541b0 580
ecb17862
TG
581 assert(manager);
582 assert(dev);
583
040e6896
TG
584 /* only one process can add events to the queue */
585 if (manager->pid == 0)
df0ff127 586 manager->pid = getpid_cached();
040e6896 587
df0ff127 588 assert(manager->pid == getpid_cached());
cb49a4f2 589
955d98c9 590 event = new0(struct event, 1);
cb49a4f2
TG
591 if (!event)
592 return -ENOMEM;
912541b0
KS
593
594 event->udev = udev_device_get_udev(dev);
cb49a4f2 595 event->manager = manager;
912541b0 596 event->dev = dev;
6969c349
TG
597 event->dev_kernel = udev_device_shallow_clone(dev);
598 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
599 event->seqnum = udev_device_get_seqnum(dev);
600 event->devpath = udev_device_get_devpath(dev);
601 event->devpath_len = strlen(event->devpath);
602 event->devpath_old = udev_device_get_devpath_old(dev);
603 event->devnum = udev_device_get_devnum(dev);
ea6039a3 604 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
605 event->ifindex = udev_device_get_ifindex(dev);
606
9f6445e3 607 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
608 udev_device_get_action(dev), udev_device_get_subsystem(dev));
609
610 event->state = EVENT_QUEUED;
cb49a4f2 611
40a57716 612 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
613 r = touch("/run/udev/queue");
614 if (r < 0)
615 log_warning_errno(r, "could not touch /run/udev/queue: %m");
616 }
617
40a57716 618 LIST_APPEND(event, manager->events, event);
cb49a4f2 619
912541b0 620 return 0;
fc465079
KS
621}
622
c0c6806b 623static void manager_kill_workers(Manager *manager) {
a505965d
TG
624 struct worker *worker;
625 Iterator i;
1e03b754 626
c0c6806b
TG
627 assert(manager);
628
629 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
630 if (worker->state == WORKER_KILLED)
631 continue;
1e03b754 632
912541b0
KS
633 worker->state = WORKER_KILLED;
634 kill(worker->pid, SIGTERM);
635 }
1e03b754
KS
636}
637
e3196993 638/* lookup event for identical, parent, child device */
ecb17862 639static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 640 struct event *loop_event;
912541b0
KS
641 size_t common;
642
643 /* check if queue contains events we depend on */
40a57716 644 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 645 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
646 if (loop_event->seqnum < event->delaying_seqnum)
647 continue;
648
649 /* event we checked earlier still exists, no need to check again */
650 if (loop_event->seqnum == event->delaying_seqnum)
651 return true;
652
653 /* found ourself, no later event can block us */
654 if (loop_event->seqnum >= event->seqnum)
655 break;
656
657 /* check major/minor */
658 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
659 return true;
660
661 /* check network device ifindex */
662 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
663 return true;
664
665 /* check our old name */
090be865 666 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
667 event->delaying_seqnum = loop_event->seqnum;
668 return true;
669 }
670
671 /* compare devpath */
672 common = MIN(loop_event->devpath_len, event->devpath_len);
673
674 /* one devpath is contained in the other? */
675 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
676 continue;
677
678 /* identical device event found */
679 if (loop_event->devpath_len == event->devpath_len) {
680 /* devices names might have changed/swapped in the meantime */
681 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
682 continue;
683 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
684 continue;
685 event->delaying_seqnum = loop_event->seqnum;
686 return true;
687 }
688
689 /* parent device event found */
690 if (event->devpath[common] == '/') {
691 event->delaying_seqnum = loop_event->seqnum;
692 return true;
693 }
694
695 /* child device event found */
696 if (loop_event->devpath[common] == '/') {
697 event->delaying_seqnum = loop_event->seqnum;
698 return true;
699 }
700
701 /* no matching device */
702 continue;
703 }
704
705 return false;
7fafc032
KS
706}
707
693d371d
TG
708static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
709 Manager *manager = userdata;
710
711 assert(manager);
712
713 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
714
715 sd_event_exit(manager->event, -ETIMEDOUT);
716
717 return 1;
718}
719
62d43dac 720static void manager_exit(Manager *manager) {
693d371d
TG
721 uint64_t usec;
722 int r;
62d43dac
TG
723
724 assert(manager);
725
726 manager->exit = true;
727
b79aacbf
TG
728 sd_notify(false,
729 "STOPPING=1\n"
730 "STATUS=Starting shutdown...");
731
62d43dac 732 /* close sources of new events and discard buffered events */
693d371d 733 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 734 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 735
693d371d 736 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 737 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 738
693d371d 739 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 740 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
741
742 /* discard queued events and kill workers */
743 event_queue_cleanup(manager, EVENT_QUEUED);
744 manager_kill_workers(manager);
693d371d 745
3285baa8 746 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 747
3285baa8 748 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
749 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
750 if (r < 0)
751 return;
62d43dac
TG
752}
753
754/* reload requested, HUP signal received, rules changed, builtin changed */
755static void manager_reload(Manager *manager) {
756
757 assert(manager);
758
b79aacbf
TG
759 sd_notify(false,
760 "RELOADING=1\n"
761 "STATUS=Flushing configuration...");
762
62d43dac
TG
763 manager_kill_workers(manager);
764 manager->rules = udev_rules_unref(manager->rules);
765 udev_builtin_exit(manager->udev);
b79aacbf 766
1ef72b55
MS
767 sd_notifyf(false,
768 "READY=1\n"
769 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
770}
771
c0c6806b 772static void event_queue_start(Manager *manager) {
40a57716 773 struct event *event;
693d371d 774 usec_t usec;
8ab44e3f 775
c0c6806b
TG
776 assert(manager);
777
40a57716 778 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
779 manager->exit || manager->stop_exec_queue)
780 return;
781
3285baa8 782 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
783 /* check for changed config, every 3 seconds at most */
784 if (manager->last_usec == 0 ||
785 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
786 if (udev_rules_check_timestamp(manager->rules) ||
787 udev_builtin_validate(manager->udev))
788 manager_reload(manager);
693d371d 789
38a03f06 790 manager->last_usec = usec;
7c4c7e89
TG
791 }
792
793 udev_builtin_init(manager->udev);
794
795 if (!manager->rules) {
796 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
797 if (!manager->rules)
798 return;
799 }
800
40a57716 801 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
802 if (event->state != EVENT_QUEUED)
803 continue;
0bc74ea7 804
912541b0 805 /* do not start event if parent or child event is still running */
ecb17862 806 if (is_devpath_busy(manager, event))
912541b0 807 continue;
fc465079 808
c0c6806b 809 event_run(manager, event);
912541b0 810 }
1e03b754
KS
811}
812
ecb17862 813static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 814 struct event *event, *tmp;
ff2c503d 815
40a57716 816 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
817 if (match_type != EVENT_UNDEF && match_type != event->state)
818 continue;
ff2c503d 819
c6aa11f2 820 event_free(event);
912541b0 821 }
ff2c503d
KS
822}
823
e82e8fa5 824static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
825 Manager *manager = userdata;
826
827 assert(manager);
828
912541b0
KS
829 for (;;) {
830 struct worker_message msg;
979558f3
TG
831 struct iovec iovec = {
832 .iov_base = &msg,
833 .iov_len = sizeof(msg),
834 };
835 union {
836 struct cmsghdr cmsghdr;
837 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
838 } control = {};
839 struct msghdr msghdr = {
840 .msg_iov = &iovec,
841 .msg_iovlen = 1,
842 .msg_control = &control,
843 .msg_controllen = sizeof(control),
844 };
845 struct cmsghdr *cmsg;
912541b0 846 ssize_t size;
979558f3 847 struct ucred *ucred = NULL;
a505965d 848 struct worker *worker;
912541b0 849
e82e8fa5 850 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 851 if (size < 0) {
738a7907
TG
852 if (errno == EINTR)
853 continue;
854 else if (errno == EAGAIN)
855 /* nothing more to read */
856 break;
979558f3 857
e82e8fa5 858 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
859 } else if (size != sizeof(struct worker_message)) {
860 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 861 continue;
979558f3
TG
862 }
863
2a1288ff 864 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
865 if (cmsg->cmsg_level == SOL_SOCKET &&
866 cmsg->cmsg_type == SCM_CREDENTIALS &&
867 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
868 ucred = (struct ucred*) CMSG_DATA(cmsg);
869 }
870
871 if (!ucred || ucred->pid <= 0) {
872 log_warning_errno(EIO, "ignoring worker message without valid PID");
873 continue;
874 }
912541b0
KS
875
876 /* lookup worker who sent the signal */
4a0b58c4 877 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
878 if (!worker) {
879 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
880 continue;
912541b0 881 }
c0bbfd72 882
a505965d
TG
883 if (worker->state != WORKER_KILLED)
884 worker->state = WORKER_IDLE;
885
886 /* worker returned */
887 event_free(worker->event);
912541b0 888 }
e82e8fa5 889
8302fe5a
TG
890 /* we have free workers, try to schedule events */
891 event_queue_start(manager);
892
e82e8fa5
TG
893 return 1;
894}
895
896static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 897 Manager *manager = userdata;
e82e8fa5
TG
898 struct udev_device *dev;
899 int r;
900
c0c6806b 901 assert(manager);
e82e8fa5 902
c0c6806b 903 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
904 if (dev) {
905 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 906 r = event_queue_insert(manager, dev);
e82e8fa5
TG
907 if (r < 0)
908 udev_device_unref(dev);
8302fe5a
TG
909 else
910 /* we have fresh events, try to schedule them */
911 event_queue_start(manager);
e82e8fa5
TG
912 }
913
914 return 1;
88f4b648
KS
915}
916
3b47c739 917/* receive the udevd message from userspace */
e82e8fa5 918static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 919 Manager *manager = userdata;
8e766630
LP
920 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
921 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
922 const char *str;
923 int i;
924
c0c6806b 925 assert(manager);
e4f66b77 926
c0c6806b 927 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 928 if (!ctrl_conn)
e82e8fa5 929 return 1;
912541b0
KS
930
931 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 932 if (!ctrl_msg)
e82e8fa5 933 return 1;
912541b0
KS
934
935 i = udev_ctrl_get_set_log_level(ctrl_msg);
936 if (i >= 0) {
ed14edc0 937 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 938 log_set_max_level(i);
c0c6806b 939 manager_kill_workers(manager);
912541b0
KS
940 }
941
942 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 943 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 944 manager->stop_exec_queue = true;
912541b0
KS
945 }
946
947 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 948 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 949 manager->stop_exec_queue = false;
8302fe5a 950 event_queue_start(manager);
912541b0
KS
951 }
952
953 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 954 log_debug("udevd message (RELOAD) received");
62d43dac 955 manager_reload(manager);
912541b0
KS
956 }
957
958 str = udev_ctrl_get_set_env(ctrl_msg);
959 if (str != NULL) {
c0c6806b 960 _cleanup_free_ char *key = NULL;
912541b0
KS
961
962 key = strdup(str);
c0c6806b 963 if (key) {
912541b0
KS
964 char *val;
965
966 val = strchr(key, '=');
967 if (val != NULL) {
968 val[0] = '\0';
969 val = &val[1];
970 if (val[0] == '\0') {
9f6445e3 971 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 972 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 973 } else {
9f6445e3 974 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 975 udev_list_entry_add(&manager->properties, key, val);
912541b0 976 }
c0c6806b 977 } else
9f6445e3 978 log_error("wrong key format '%s'", key);
912541b0 979 }
c0c6806b 980 manager_kill_workers(manager);
912541b0
KS
981 }
982
983 i = udev_ctrl_get_set_children_max(ctrl_msg);
984 if (i >= 0) {
9f6445e3 985 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 986 arg_children_max = i;
1ef72b55
MS
987
988 (void) sd_notifyf(false,
989 "READY=1\n"
990 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
991 }
992
cb49a4f2 993 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 994 log_debug("udevd message (SYNC) received");
912541b0
KS
995
996 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 997 log_debug("udevd message (EXIT) received");
62d43dac 998 manager_exit(manager);
c0c6806b
TG
999 /* keep reference to block the client until we exit
1000 TODO: deal with several blocking exit requests */
1001 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1002 }
e4f66b77 1003
e82e8fa5 1004 return 1;
88f4b648 1005}
4a231017 1006
f3a740a5 1007static int synthesize_change(struct udev_device *dev) {
edd32000 1008 char filename[UTIL_PATH_SIZE];
f3a740a5 1009 int r;
edd32000 1010
f3a740a5 1011 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1012 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1013 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1014 bool part_table_read = false;
1015 bool has_partitions = false;
ede34445 1016 int fd;
f3a740a5 1017 struct udev *udev = udev_device_get_udev(dev);
8e766630 1018 _cleanup_(udev_enumerate_unrefp) struct udev_enumerate *e = NULL;
f3a740a5
KS
1019 struct udev_list_entry *item;
1020
ede34445 1021 /*
e9fc29f4
KS
1022 * Try to re-read the partition table. This only succeeds if
1023 * none of the devices is busy. The kernel returns 0 if no
1024 * partition table is found, and we will not get an event for
1025 * the disk.
ede34445 1026 */
02ba8fb3 1027 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1028 if (fd >= 0) {
02ba8fb3
KS
1029 r = flock(fd, LOCK_EX|LOCK_NB);
1030 if (r >= 0)
1031 r = ioctl(fd, BLKRRPART, 0);
1032
ede34445
KS
1033 close(fd);
1034 if (r >= 0)
e9fc29f4 1035 part_table_read = true;
ede34445
KS
1036 }
1037
e9fc29f4 1038 /* search for partitions */
f3a740a5
KS
1039 e = udev_enumerate_new(udev);
1040 if (!e)
1041 return -ENOMEM;
1042
1043 r = udev_enumerate_add_match_parent(e, dev);
1044 if (r < 0)
1045 return r;
1046
1047 r = udev_enumerate_add_match_subsystem(e, "block");
1048 if (r < 0)
1049 return r;
1050
1051 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1052 if (r < 0)
1053 return r;
e9fc29f4
KS
1054
1055 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
8e766630 1056 _cleanup_(udev_device_unrefp) struct udev_device *d = NULL;
e9fc29f4
KS
1057
1058 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1059 if (!d)
1060 continue;
1061
1062 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1063 continue;
1064
1065 has_partitions = true;
1066 break;
1067 }
1068
1069 /*
1070 * We have partitions and re-read the table, the kernel already sent
1071 * out a "change" event for the disk, and "remove/add" for all
1072 * partitions.
1073 */
1074 if (part_table_read && has_partitions)
1075 return 0;
1076
1077 /*
1078 * We have partitions but re-reading the partition table did not
1079 * work, synthesize "change" for the disk and all partitions.
1080 */
1081 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1082 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1083 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1084
f3a740a5 1085 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
8e766630 1086 _cleanup_(udev_device_unrefp) struct udev_device *d = NULL;
f3a740a5
KS
1087
1088 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1089 if (!d)
1090 continue;
1091
1092 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1093 continue;
1094
1095 log_debug("device %s closed, synthesising partition '%s' 'change'",
1096 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1097 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1098 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1099 }
ede34445
KS
1100
1101 return 0;
f3a740a5
KS
1102 }
1103
ede34445
KS
1104 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1105 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1106 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1107
f3a740a5 1108 return 0;
edd32000
KS
1109}
1110
e82e8fa5 1111static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1112 Manager *manager = userdata;
0254e944 1113 union inotify_event_buffer buffer;
f7c1ad4f
LP
1114 struct inotify_event *e;
1115 ssize_t l;
912541b0 1116
c0c6806b 1117 assert(manager);
e82e8fa5
TG
1118
1119 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1120 if (l < 0) {
3742095b 1121 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1122 return 1;
912541b0 1123
f7c1ad4f 1124 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1125 }
1126
f7c1ad4f 1127 FOREACH_INOTIFY_EVENT(e, buffer, l) {
8e766630 1128 _cleanup_(udev_device_unrefp) struct udev_device *dev = NULL;
912541b0 1129
c0c6806b 1130 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1131 if (!dev)
1132 continue;
912541b0 1133
f7c1ad4f 1134 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1135 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1136 synthesize_change(dev);
a8389097
TG
1137
1138 /* settle might be waiting on us to determine the queue
1139 * state. If we just handled an inotify event, we might have
1140 * generated a "change" event, but we won't have queued up
1141 * the resultant uevent yet. Do that.
1142 */
c0c6806b 1143 on_uevent(NULL, -1, 0, manager);
a8389097 1144 } else if (e->mask & IN_IGNORED)
c0c6806b 1145 udev_watch_end(manager->udev, dev);
912541b0
KS
1146 }
1147
e82e8fa5 1148 return 1;
bd284db1
SJR
1149}
1150
0561329d 1151static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1152 Manager *manager = userdata;
1153
1154 assert(manager);
1155
62d43dac 1156 manager_exit(manager);
912541b0 1157
e82e8fa5
TG
1158 return 1;
1159}
912541b0 1160
0561329d 1161static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1162 Manager *manager = userdata;
1163
1164 assert(manager);
1165
62d43dac 1166 manager_reload(manager);
912541b0 1167
e82e8fa5
TG
1168 return 1;
1169}
912541b0 1170
e82e8fa5 1171static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1172 Manager *manager = userdata;
1173
1174 assert(manager);
1175
e82e8fa5
TG
1176 for (;;) {
1177 pid_t pid;
1178 int status;
1179 struct worker *worker;
d1317d02 1180
e82e8fa5
TG
1181 pid = waitpid(-1, &status, WNOHANG);
1182 if (pid <= 0)
f29328d6 1183 break;
e82e8fa5 1184
4a0b58c4 1185 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1186 if (!worker) {
1187 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1188 continue;
912541b0 1189 }
e82e8fa5
TG
1190
1191 if (WIFEXITED(status)) {
1192 if (WEXITSTATUS(status) == 0)
1193 log_debug("worker ["PID_FMT"] exited", pid);
1194 else
1195 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1196 } else if (WIFSIGNALED(status)) {
76341acc 1197 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1198 } else if (WIFSTOPPED(status)) {
1199 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1200 continue;
e82e8fa5
TG
1201 } else if (WIFCONTINUED(status)) {
1202 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1203 continue;
e82e8fa5
TG
1204 } else
1205 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1206
1207 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1208 if (worker->event) {
1209 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1210 /* delete state from disk */
1211 udev_device_delete_db(worker->event->dev);
1212 udev_device_tag_index(worker->event->dev, NULL, false);
1213 /* forward kernel event without amending it */
c0c6806b 1214 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1215 }
1216 }
1217
1218 worker_free(worker);
912541b0 1219 }
e82e8fa5 1220
8302fe5a
TG
1221 /* we can start new workers, try to schedule events */
1222 event_queue_start(manager);
1223
e82e8fa5 1224 return 1;
f27125f9 1225}
1226
693d371d
TG
1227static int on_post(sd_event_source *s, void *userdata) {
1228 Manager *manager = userdata;
1229 int r;
1230
1231 assert(manager);
1232
40a57716 1233 if (LIST_IS_EMPTY(manager->events)) {
693d371d
TG
1234 /* no pending events */
1235 if (!hashmap_isempty(manager->workers)) {
1236 /* there are idle workers */
1237 log_debug("cleanup idle workers");
1238 manager_kill_workers(manager);
1239 } else {
1240 /* we are idle */
1241 if (manager->exit) {
1242 r = sd_event_exit(manager->event, 0);
1243 if (r < 0)
1244 return r;
1245 } else if (manager->cgroup)
1246 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1247 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1248 }
1249 }
1250
1251 return 1;
1252}
1253
fcff1e72 1254static int listen_fds(int *rctrl, int *rnetlink) {
8e766630 1255 _cleanup_(udev_unrefp) struct udev *udev = NULL;
fcff1e72 1256 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1257 int fd, n, r;
912541b0 1258
fcff1e72
TG
1259 assert(rctrl);
1260 assert(rnetlink);
1261
912541b0 1262 n = sd_listen_fds(true);
fcff1e72
TG
1263 if (n < 0)
1264 return n;
912541b0
KS
1265
1266 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1267 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1268 if (ctrl_fd >= 0)
1269 return -EINVAL;
1270 ctrl_fd = fd;
912541b0
KS
1271 continue;
1272 }
1273
1274 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1275 if (netlink_fd >= 0)
1276 return -EINVAL;
1277 netlink_fd = fd;
912541b0
KS
1278 continue;
1279 }
1280
fcff1e72 1281 return -EINVAL;
912541b0
KS
1282 }
1283
f59118ec 1284 if (ctrl_fd < 0) {
8e766630 1285 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec
TG
1286
1287 udev = udev_new();
1288 if (!udev)
1289 return -ENOMEM;
1290
1291 ctrl = udev_ctrl_new(udev);
1292 if (!ctrl)
1293 return log_error_errno(EINVAL, "error initializing udev control socket");
1294
1295 r = udev_ctrl_enable_receiving(ctrl);
1296 if (r < 0)
1297 return log_error_errno(EINVAL, "error binding udev control socket");
1298
1299 fd = udev_ctrl_get_fd(ctrl);
1300 if (fd < 0)
1301 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1302
f59118ec
TG
1303 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1304 if (ctrl_fd < 0)
1305 return log_error_errno(errno, "could not dup ctrl fd: %m");
1306 }
1307
1308 if (netlink_fd < 0) {
8e766630 1309 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec
TG
1310
1311 if (!udev) {
1312 udev = udev_new();
1313 if (!udev)
1314 return -ENOMEM;
1315 }
1316
1317 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1318 if (!monitor)
1319 return log_error_errno(EINVAL, "error initializing netlink socket");
1320
1321 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1322
1323 r = udev_monitor_enable_receiving(monitor);
1324 if (r < 0)
1325 return log_error_errno(EINVAL, "error binding netlink socket");
1326
1327 fd = udev_monitor_get_fd(monitor);
1328 if (fd < 0)
1329 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1330
1331 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1332 if (netlink_fd < 0)
f59118ec
TG
1333 return log_error_errno(errno, "could not dup netlink fd: %m");
1334 }
fcff1e72
TG
1335
1336 *rctrl = ctrl_fd;
1337 *rnetlink = netlink_fd;
912541b0 1338
912541b0 1339 return 0;
7459bcdc
KS
1340}
1341
e6f86cac 1342/*
3f85ef0f 1343 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1344 * udev.log_priority=<level> syslog priority
1345 * udev.children_max=<number of workers> events are fully serialized if set to 1
1346 * udev.exec_delay=<number of seconds> delay execution of every executed program
1347 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1348 */
96287a49 1349static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1350 int r = 0;
e6f86cac 1351
614a823c 1352 assert(key);
e6f86cac 1353
614a823c
TG
1354 if (!value)
1355 return 0;
e6f86cac 1356
1d84ad94
LP
1357 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1358
1359 if (proc_cmdline_value_missing(key, value))
1360 return 0;
1361
92e72467
ZJS
1362 r = util_log_priority(value);
1363 if (r >= 0)
1364 log_set_max_level(r);
1d84ad94
LP
1365
1366 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1367
1368 if (proc_cmdline_value_missing(key, value))
1369 return 0;
1370
92e72467
ZJS
1371 r = safe_atou64(value, &arg_event_timeout_usec);
1372 if (r >= 0) {
1373 arg_event_timeout_usec *= USEC_PER_SEC;
1374 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1375 }
1d84ad94
LP
1376
1377 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1378
1379 if (proc_cmdline_value_missing(key, value))
1380 return 0;
1381
020328e1 1382 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1383
1384 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1385
1386 if (proc_cmdline_value_missing(key, value))
1387 return 0;
1388
614a823c 1389 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1390
1391 } else if (startswith(key, "udev."))
92e72467 1392 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1393
92e72467
ZJS
1394 if (r < 0)
1395 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1396
614a823c 1397 return 0;
e6f86cac
KS
1398}
1399
37ec0fdd
LP
1400static int help(void) {
1401 _cleanup_free_ char *link = NULL;
1402 int r;
1403
1404 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1405 if (r < 0)
1406 return log_oom();
1407
ed216e1f
TG
1408 printf("%s [OPTIONS...]\n\n"
1409 "Manages devices.\n\n"
5ac0162c 1410 " -h --help Print this message\n"
2d19c17e
MF
1411 " -V --version Print version of the program\n"
1412 " -d --daemon Detach and run in the background\n"
1413 " -D --debug Enable debug output\n"
1414 " -c --children-max=INT Set maximum number of workers\n"
1415 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1416 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1417 " -N --resolve-names=early|late|never\n"
5ac0162c 1418 " When to resolve users and groups\n"
37ec0fdd
LP
1419 "\nSee the %s for details.\n"
1420 , program_invocation_short_name
1421 , link
1422 );
1423
1424 return 0;
ed216e1f
TG
1425}
1426
bba7a484 1427static int parse_argv(int argc, char *argv[]) {
912541b0 1428 static const struct option options[] = {
bba7a484
TG
1429 { "daemon", no_argument, NULL, 'd' },
1430 { "debug", no_argument, NULL, 'D' },
1431 { "children-max", required_argument, NULL, 'c' },
1432 { "exec-delay", required_argument, NULL, 'e' },
1433 { "event-timeout", required_argument, NULL, 't' },
1434 { "resolve-names", required_argument, NULL, 'N' },
1435 { "help", no_argument, NULL, 'h' },
1436 { "version", no_argument, NULL, 'V' },
912541b0
KS
1437 {}
1438 };
689a97f5 1439
bba7a484 1440 int c;
689a97f5 1441
bba7a484
TG
1442 assert(argc >= 0);
1443 assert(argv);
912541b0 1444
e14b6f21 1445 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1446 int r;
912541b0 1447
bba7a484 1448 switch (c) {
912541b0 1449
912541b0 1450 case 'd':
bba7a484 1451 arg_daemonize = true;
912541b0
KS
1452 break;
1453 case 'c':
020328e1 1454 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1455 if (r < 0)
1456 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1457 break;
1458 case 'e':
6f5cf8a8
TG
1459 r = safe_atoi(optarg, &arg_exec_delay);
1460 if (r < 0)
1461 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1462 break;
9719859c 1463 case 't':
f1e8664e
TG
1464 r = safe_atou64(optarg, &arg_event_timeout_usec);
1465 if (r < 0)
65fea570 1466 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1467 else {
1468 arg_event_timeout_usec *= USEC_PER_SEC;
1469 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1470 }
9719859c 1471 break;
912541b0 1472 case 'D':
bba7a484 1473 arg_debug = true;
912541b0
KS
1474 break;
1475 case 'N':
090be865 1476 if (streq(optarg, "early")) {
bba7a484 1477 arg_resolve_names = 1;
090be865 1478 } else if (streq(optarg, "late")) {
bba7a484 1479 arg_resolve_names = 0;
090be865 1480 } else if (streq(optarg, "never")) {
bba7a484 1481 arg_resolve_names = -1;
912541b0 1482 } else {
9f6445e3 1483 log_error("resolve-names must be early, late or never");
bba7a484 1484 return 0;
912541b0
KS
1485 }
1486 break;
1487 case 'h':
37ec0fdd 1488 return help();
912541b0 1489 case 'V':
948aaa7c 1490 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1491 return 0;
1492 case '?':
1493 return -EINVAL;
912541b0 1494 default:
bba7a484
TG
1495 assert_not_reached("Unhandled option");
1496
912541b0
KS
1497 }
1498 }
1499
bba7a484
TG
1500 return 1;
1501}
1502
b7f74dd4 1503static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1504 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1505 int r, fd_worker, one = 1;
c0c6806b
TG
1506
1507 assert(ret);
11b1dd8c
TG
1508 assert(fd_ctrl >= 0);
1509 assert(fd_uevent >= 0);
c0c6806b
TG
1510
1511 manager = new0(Manager, 1);
1512 if (!manager)
1513 return log_oom();
1514
e237d8cb
TG
1515 manager->fd_inotify = -1;
1516 manager->worker_watch[WRITE_END] = -1;
1517 manager->worker_watch[READ_END] = -1;
1518
c0c6806b
TG
1519 manager->udev = udev_new();
1520 if (!manager->udev)
1521 return log_error_errno(errno, "could not allocate udev context: %m");
1522
b2d21d93
TG
1523 udev_builtin_init(manager->udev);
1524
ecb17862
TG
1525 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1526 if (!manager->rules)
1527 return log_error_errno(ENOMEM, "error reading rules");
1528
40a57716 1529 LIST_HEAD_INIT(manager->events);
ecb17862
TG
1530 udev_list_init(manager->udev, &manager->properties, true);
1531
c26d1879
TG
1532 manager->cgroup = cgroup;
1533
f59118ec
TG
1534 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1535 if (!manager->ctrl)
1536 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1537
f59118ec
TG
1538 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1539 if (!manager->monitor)
1540 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1541
1542 /* unnamed socket from workers to the main daemon */
1543 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1544 if (r < 0)
1545 return log_error_errno(errno, "error creating socketpair: %m");
1546
693d371d 1547 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1548
693d371d 1549 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1550 if (r < 0)
1551 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1552
1553 manager->fd_inotify = udev_watch_init(manager->udev);
1554 if (manager->fd_inotify < 0)
1555 return log_error_errno(ENOMEM, "error initializing inotify");
1556
1557 udev_watch_restore(manager->udev);
1558
1559 /* block and listen to all signals on signalfd */
72c0a2c2 1560 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1561
49f997f3
TG
1562 r = sd_event_default(&manager->event);
1563 if (r < 0)
709f6e46 1564 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1565
693d371d
TG
1566 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1567 if (r < 0)
1568 return log_error_errno(r, "error creating sigint event source: %m");
1569
1570 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1571 if (r < 0)
1572 return log_error_errno(r, "error creating sigterm event source: %m");
1573
1574 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1575 if (r < 0)
1576 return log_error_errno(r, "error creating sighup event source: %m");
1577
1578 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1579 if (r < 0)
1580 return log_error_errno(r, "error creating sigchld event source: %m");
1581
1582 r = sd_event_set_watchdog(manager->event, true);
1583 if (r < 0)
1584 return log_error_errno(r, "error creating watchdog event source: %m");
1585
11b1dd8c 1586 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1587 if (r < 0)
1588 return log_error_errno(r, "error creating ctrl event source: %m");
1589
1590 /* This needs to be after the inotify and uevent handling, to make sure
1591 * that the ping is send back after fully processing the pending uevents
1592 * (including the synthetic ones we may create due to inotify events).
1593 */
1594 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1595 if (r < 0)
1596 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1597
1598 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1599 if (r < 0)
1600 return log_error_errno(r, "error creating inotify event source: %m");
1601
11b1dd8c 1602 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1603 if (r < 0)
1604 return log_error_errno(r, "error creating uevent event source: %m");
1605
1606 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1607 if (r < 0)
1608 return log_error_errno(r, "error creating worker event source: %m");
1609
1610 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1611 if (r < 0)
1612 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1613
1cc6c93a 1614 *ret = TAKE_PTR(manager);
11b1dd8c 1615
86c3bece 1616 return 0;
c0c6806b
TG
1617}
1618
077fc5e2 1619static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1620 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1621 int r;
1622
1623 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1624 if (r < 0) {
1625 r = log_error_errno(r, "failed to allocate manager object: %m");
1626 goto exit;
1627 }
1628
1629 r = udev_rules_apply_static_dev_perms(manager->rules);
1630 if (r < 0)
1631 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1632
1ef72b55
MS
1633 (void) sd_notifyf(false,
1634 "READY=1\n"
1635 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1636
1637 r = sd_event_loop(manager->event);
1638 if (r < 0) {
1639 log_error_errno(r, "event loop failed: %m");
1640 goto exit;
1641 }
1642
1643 sd_event_get_exit_code(manager->event, &r);
1644
1645exit:
1646 sd_notify(false,
1647 "STOPPING=1\n"
1648 "STATUS=Shutting down...");
1649 if (manager)
1650 udev_ctrl_cleanup(manager->ctrl);
1651 return r;
1652}
1653
1654int main(int argc, char *argv[]) {
c26d1879 1655 _cleanup_free_ char *cgroup = NULL;
efa1606e 1656 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1657 int r;
bba7a484 1658
bba7a484 1659 log_set_target(LOG_TARGET_AUTO);
b237a168 1660 udev_parse_config();
bba7a484
TG
1661 log_parse_environment();
1662 log_open();
1663
bba7a484
TG
1664 r = parse_argv(argc, argv);
1665 if (r <= 0)
1666 goto exit;
1667
1d84ad94 1668 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1669 if (r < 0)
1670 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1671
78d3e041
KS
1672 if (arg_debug) {
1673 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1674 log_set_max_level(LOG_DEBUG);
78d3e041 1675 }
bba7a484 1676
fba868fa
LP
1677 r = must_be_root();
1678 if (r < 0)
912541b0 1679 goto exit;
912541b0 1680
712cebf1
TG
1681 if (arg_children_max == 0) {
1682 cpu_set_t cpu_set;
e438c57a 1683 unsigned long mem_limit;
ebc164ef 1684
712cebf1 1685 arg_children_max = 8;
d457ff83 1686
ece174c5 1687 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1688 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1689
e438c57a
MW
1690 mem_limit = physical_memory() / (128LU*1024*1024);
1691 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1692
712cebf1 1693 log_debug("set children_max to %u", arg_children_max);
d457ff83 1694 }
912541b0 1695
712cebf1
TG
1696 /* set umask before creating any file/directory */
1697 r = chdir("/");
1698 if (r < 0) {
1699 r = log_error_errno(errno, "could not change dir to /: %m");
1700 goto exit;
1701 }
194bbe33 1702
712cebf1 1703 umask(022);
912541b0 1704
c3dacc8b 1705 r = mac_selinux_init();
712cebf1
TG
1706 if (r < 0) {
1707 log_error_errno(r, "could not initialize labelling: %m");
1708 goto exit;
912541b0
KS
1709 }
1710
dae8b82e
ZJS
1711 r = mkdir_errno_wrapper("/run/udev", 0755);
1712 if (r < 0 && r != -EEXIST) {
1713 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1714 goto exit;
1715 }
1716
03cfe0d5 1717 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1718
c26d1879
TG
1719 if (getppid() == 1) {
1720 /* get our own cgroup, we regularly kill everything udev has left behind
1721 we only do this on systemd systems, and only if we are directly spawned
1722 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1723 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1724 if (r < 0) {
a2d61f07 1725 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1726 log_debug_errno(r, "did not find dedicated cgroup: %m");
1727 else
1728 log_warning_errno(r, "failed to get cgroup: %m");
1729 }
c26d1879
TG
1730 }
1731
b7f74dd4
TG
1732 r = listen_fds(&fd_ctrl, &fd_uevent);
1733 if (r < 0) {
1734 r = log_error_errno(r, "could not listen on fds: %m");
1735 goto exit;
1736 }
1737
bba7a484 1738 if (arg_daemonize) {
912541b0 1739 pid_t pid;
912541b0 1740
948aaa7c 1741 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1742
40e749b5 1743 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1744 if (log_get_max_level() < LOG_DEBUG) {
1745 r = make_null_stdio();
1746 if (r < 0)
1747 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1748 }
1749
912541b0
KS
1750 pid = fork();
1751 switch (pid) {
1752 case 0:
1753 break;
1754 case -1:
6af5e6a4 1755 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1756 goto exit;
1757 default:
f53d1fcd
TG
1758 mac_selinux_finish();
1759 log_close();
1760 _exit(EXIT_SUCCESS);
912541b0
KS
1761 }
1762
1763 setsid();
1764
ad118bda 1765 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1766 }
912541b0 1767
077fc5e2 1768 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1769
53921bfa 1770exit:
cc56fafe 1771 mac_selinux_finish();
baa30fbc 1772 log_close();
6af5e6a4 1773 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1774}