]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
tools/hwdb-update: allow downloads to fail
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
1298001e 3 * Copyright (C) 2004-2012 Kay Sievers <kay@vrfy.org>
2f6cbd19 4 * Copyright (C) 2004 Chris Friesen <chris_friesen@sympatico.ca>
bb38678e
SJR
5 * Copyright (C) 2009 Canonical Ltd.
6 * Copyright (C) 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 7 *
55e9959b
KS
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 2 of the License, or
11 * (at your option) any later version.
7fafc032 12 *
55e9959b
KS
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
7fafc032 17 *
55e9959b
KS
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
7fafc032
KS
20 */
21
7fafc032 22#include <errno.h>
618234a5
LP
23#include <fcntl.h>
24#include <getopt.h>
25#include <signal.h>
26#include <stdbool.h>
27#include <stddef.h>
7fafc032
KS
28#include <stdio.h>
29#include <stdlib.h>
30#include <string.h>
618234a5 31#include <sys/epoll.h>
3ebdb81e 32#include <sys/file.h>
618234a5
LP
33#include <sys/inotify.h>
34#include <sys/ioctl.h>
35#include <sys/mount.h>
1e03b754 36#include <sys/prctl.h>
1e03b754 37#include <sys/signalfd.h>
618234a5 38#include <sys/socket.h>
dc117daa 39#include <sys/stat.h>
618234a5
LP
40#include <sys/time.h>
41#include <sys/wait.h>
42#include <unistd.h>
7fafc032 43
392ef7a2 44#include "sd-daemon.h"
693d371d 45#include "sd-event.h"
8314de1d 46
b5efdb8a 47#include "alloc-util.h"
194bbe33 48#include "cgroup-util.h"
618234a5 49#include "cpu-set-util.h"
5ba2dc25 50#include "dev-setup.h"
3ffd4af2 51#include "fd-util.h"
a5c32cff 52#include "fileio.h"
f97b34a6 53#include "format-util.h"
f4f15635 54#include "fs-util.h"
a505965d 55#include "hashmap.h"
c004493c 56#include "io-util.h"
40a57716 57#include "list.h"
618234a5 58#include "netlink-util.h"
6bedfcbb 59#include "parse-util.h"
4e731273 60#include "proc-cmdline.h"
618234a5
LP
61#include "process-util.h"
62#include "selinux-util.h"
63#include "signal-util.h"
8f328d36 64#include "socket-util.h"
07630cea 65#include "string-util.h"
618234a5
LP
66#include "terminal-util.h"
67#include "udev-util.h"
68#include "udev.h"
ee104e11 69#include "user-util.h"
7fafc032 70
bba7a484
TG
71static bool arg_debug = false;
72static int arg_daemonize = false;
73static int arg_resolve_names = 1;
020328e1 74static unsigned arg_children_max;
bba7a484
TG
75static int arg_exec_delay;
76static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
77static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
78
79typedef struct Manager {
80 struct udev *udev;
693d371d 81 sd_event *event;
c0c6806b 82 Hashmap *workers;
40a57716 83 LIST_HEAD(struct event, events);
c26d1879 84 const char *cgroup;
cb49a4f2 85 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 86
ecb17862 87 struct udev_rules *rules;
c0c6806b
TG
88 struct udev_list properties;
89
90 struct udev_monitor *monitor;
91 struct udev_ctrl *ctrl;
92 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 93 int fd_inotify;
e237d8cb
TG
94 int worker_watch[2];
95
693d371d
TG
96 sd_event_source *ctrl_event;
97 sd_event_source *uevent_event;
98 sd_event_source *inotify_event;
99
7c4c7e89
TG
100 usec_t last_usec;
101
c0c6806b 102 bool stop_exec_queue:1;
c0c6806b
TG
103 bool exit:1;
104} Manager;
1e03b754 105
1e03b754 106enum event_state {
912541b0
KS
107 EVENT_UNDEF,
108 EVENT_QUEUED,
109 EVENT_RUNNING,
1e03b754
KS
110};
111
112struct event {
40a57716 113 LIST_FIELDS(struct event, event);
cb49a4f2 114 Manager *manager;
912541b0
KS
115 struct udev *udev;
116 struct udev_device *dev;
6969c349 117 struct udev_device *dev_kernel;
c6aa11f2 118 struct worker *worker;
912541b0 119 enum event_state state;
912541b0
KS
120 unsigned long long int delaying_seqnum;
121 unsigned long long int seqnum;
122 const char *devpath;
123 size_t devpath_len;
124 const char *devpath_old;
125 dev_t devnum;
912541b0 126 int ifindex;
ea6039a3 127 bool is_block;
693d371d
TG
128 sd_event_source *timeout_warning;
129 sd_event_source *timeout;
1e03b754
KS
130};
131
ecb17862 132static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 133
1e03b754 134enum worker_state {
912541b0
KS
135 WORKER_UNDEF,
136 WORKER_RUNNING,
137 WORKER_IDLE,
138 WORKER_KILLED,
1e03b754
KS
139};
140
141struct worker {
c0c6806b 142 Manager *manager;
912541b0
KS
143 int refcount;
144 pid_t pid;
145 struct udev_monitor *monitor;
146 enum worker_state state;
147 struct event *event;
1e03b754
KS
148};
149
150/* passed from worker to main process */
151struct worker_message {
1e03b754
KS
152};
153
c6aa11f2 154static void event_free(struct event *event) {
cb49a4f2
TG
155 int r;
156
c6aa11f2
TG
157 if (!event)
158 return;
40a57716 159 assert(event->manager);
c6aa11f2 160
40a57716 161 LIST_REMOVE(event, event->manager->events, event);
912541b0 162 udev_device_unref(event->dev);
6969c349 163 udev_device_unref(event->dev_kernel);
c6aa11f2 164
693d371d
TG
165 sd_event_source_unref(event->timeout_warning);
166 sd_event_source_unref(event->timeout);
167
c6aa11f2
TG
168 if (event->worker)
169 event->worker->event = NULL;
170
40a57716 171 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 172 /* only clean up the queue from the process that created it */
df0ff127 173 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
174 r = unlink("/run/udev/queue");
175 if (r < 0)
176 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
177 }
178 }
179
912541b0 180 free(event);
aa8734ff 181}
7a770250 182
c6aa11f2
TG
183static void worker_free(struct worker *worker) {
184 if (!worker)
185 return;
bc113de9 186
c0c6806b
TG
187 assert(worker->manager);
188
4a0b58c4 189 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 190 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
191 event_free(worker->event);
192
c6aa11f2 193 free(worker);
ff2c503d
KS
194}
195
c0c6806b 196static void manager_workers_free(Manager *manager) {
a505965d
TG
197 struct worker *worker;
198 Iterator i;
ff2c503d 199
c0c6806b
TG
200 assert(manager);
201
202 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 203 worker_free(worker);
a505965d 204
c0c6806b 205 manager->workers = hashmap_free(manager->workers);
fc465079
KS
206}
207
c0c6806b 208static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
209 _cleanup_free_ struct worker *worker = NULL;
210 int r;
3a19b32a
TG
211
212 assert(ret);
c0c6806b 213 assert(manager);
3a19b32a
TG
214 assert(worker_monitor);
215 assert(pid > 1);
216
217 worker = new0(struct worker, 1);
218 if (!worker)
219 return -ENOMEM;
220
39c19cf1 221 worker->refcount = 1;
c0c6806b 222 worker->manager = manager;
3a19b32a
TG
223 /* close monitor, but keep address around */
224 udev_monitor_disconnect(worker_monitor);
225 worker->monitor = udev_monitor_ref(worker_monitor);
226 worker->pid = pid;
a505965d 227
c0c6806b 228 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
229 if (r < 0)
230 return r;
231
4a0b58c4 232 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
233 if (r < 0)
234 return r;
235
3a19b32a 236 *ret = worker;
a505965d 237 worker = NULL;
3a19b32a
TG
238
239 return 0;
240}
241
4fa4d885
TG
242static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
243 struct event *event = userdata;
244
245 assert(event);
246 assert(event->worker);
247
248 kill_and_sigcont(event->worker->pid, SIGKILL);
249 event->worker->state = WORKER_KILLED;
250
251 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
252
253 return 1;
254}
255
256static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
257 struct event *event = userdata;
258
259 assert(event);
260
261 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
262
263 return 1;
264}
265
39c19cf1 266static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
267 sd_event *e;
268 uint64_t usec;
693d371d 269
c6aa11f2 270 assert(worker);
693d371d 271 assert(worker->manager);
c6aa11f2
TG
272 assert(event);
273 assert(!event->worker);
274 assert(!worker->event);
275
39c19cf1 276 worker->state = WORKER_RUNNING;
39c19cf1
TG
277 worker->event = event;
278 event->state = EVENT_RUNNING;
c6aa11f2 279 event->worker = worker;
693d371d
TG
280
281 e = worker->manager->event;
282
3285baa8 283 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 284
3285baa8 285 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
286 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
287
3285baa8 288 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 289 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
290}
291
e237d8cb
TG
292static void manager_free(Manager *manager) {
293 if (!manager)
294 return;
295
b2d21d93
TG
296 udev_builtin_exit(manager->udev);
297
693d371d
TG
298 sd_event_source_unref(manager->ctrl_event);
299 sd_event_source_unref(manager->uevent_event);
300 sd_event_source_unref(manager->inotify_event);
301
e237d8cb 302 udev_unref(manager->udev);
693d371d 303 sd_event_unref(manager->event);
e237d8cb
TG
304 manager_workers_free(manager);
305 event_queue_cleanup(manager, EVENT_UNDEF);
306
307 udev_monitor_unref(manager->monitor);
308 udev_ctrl_unref(manager->ctrl);
309 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
310
311 udev_list_cleanup(&manager->properties);
312 udev_rules_unref(manager->rules);
e237d8cb 313
e237d8cb
TG
314 safe_close(manager->fd_inotify);
315 safe_close_pair(manager->worker_watch);
316
317 free(manager);
318}
319
320DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
321
9a73bd7c
TG
322static int worker_send_message(int fd) {
323 struct worker_message message = {};
324
325 return loop_write(fd, &message, sizeof(message), false);
326}
327
c0c6806b 328static void worker_spawn(Manager *manager, struct event *event) {
912541b0 329 struct udev *udev = event->udev;
3a19b32a 330 _cleanup_udev_monitor_unref_ struct udev_monitor *worker_monitor = NULL;
912541b0 331 pid_t pid;
b6aab8ef 332 int r = 0;
912541b0
KS
333
334 /* listen for new events */
335 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
336 if (worker_monitor == NULL)
337 return;
338 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 339 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
340 r = udev_monitor_enable_receiving(worker_monitor);
341 if (r < 0)
342 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 343
912541b0
KS
344 pid = fork();
345 switch (pid) {
346 case 0: {
347 struct udev_device *dev = NULL;
4afd3348 348 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 349 int fd_monitor;
e237d8cb 350 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
351 struct epoll_event ep_signal = { .events = EPOLLIN };
352 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 353 sigset_t mask;
912541b0 354
43095991 355 /* take initial device from queue */
912541b0
KS
356 dev = event->dev;
357 event->dev = NULL;
358
39fd2ca1
TG
359 unsetenv("NOTIFY_SOCKET");
360
c0c6806b 361 manager_workers_free(manager);
ecb17862 362 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 363
e237d8cb 364 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 365 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 366 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 367 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 368
693d371d
TG
369 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
370 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
371 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
372
373 manager->event = sd_event_unref(manager->event);
374
912541b0
KS
375 sigfillset(&mask);
376 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
377 if (fd_signal < 0) {
6af5e6a4 378 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
379 goto out;
380 }
2dd9f98d
TG
381 ep_signal.data.fd = fd_signal;
382
383 fd_monitor = udev_monitor_get_fd(worker_monitor);
384 ep_monitor.data.fd = fd_monitor;
912541b0
KS
385
386 fd_ep = epoll_create1(EPOLL_CLOEXEC);
387 if (fd_ep < 0) {
6af5e6a4 388 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
389 goto out;
390 }
391
912541b0
KS
392 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
393 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 394 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
395 goto out;
396 }
397
045e00cf
ZJS
398 /* Request TERM signal if parent exits.
399 Ignore error, not much we can do in that case. */
400 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 401
045e00cf 402 /* Reset OOM score, we only protect the main daemon. */
ad118bda 403 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 404
912541b0
KS
405 for (;;) {
406 struct udev_event *udev_event;
6af5e6a4 407 int fd_lock = -1;
912541b0 408
3b64e4d4
TG
409 assert(dev);
410
9f6445e3 411 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
412 udev_event = udev_event_new(dev);
413 if (udev_event == NULL) {
6af5e6a4 414 r = -ENOMEM;
912541b0
KS
415 goto out;
416 }
417
bba7a484
TG
418 if (arg_exec_delay > 0)
419 udev_event->exec_delay = arg_exec_delay;
912541b0 420
3ebdb81e 421 /*
2e5b17d0 422 * Take a shared lock on the device node; this establishes
3ebdb81e 423 * a concept of device "ownership" to serialize device
2e5b17d0 424 * access. External processes holding an exclusive lock will
3ebdb81e 425 * cause udev to skip the event handling; in the case udev
2e5b17d0 426 * acquired the lock, the external process can block until
3ebdb81e
KS
427 * udev has finished its event handling.
428 */
2e5b17d0
KS
429 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
430 streq_ptr("block", udev_device_get_subsystem(dev)) &&
431 !startswith(udev_device_get_sysname(dev), "dm-") &&
432 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
433 struct udev_device *d = dev;
434
435 if (streq_ptr("partition", udev_device_get_devtype(d)))
436 d = udev_device_get_parent(d);
437
438 if (d) {
439 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
440 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 441 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 442 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
443 goto skip;
444 }
445 }
446 }
447
4c83d994
TG
448 /* needed for renaming netifs */
449 udev_event->rtnl = rtnl;
450
912541b0 451 /* apply rules, create node, symlinks */
adeba500
KS
452 udev_event_execute_rules(udev_event,
453 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 454 &manager->properties,
8314de1d 455 manager->rules);
adeba500
KS
456
457 udev_event_execute_run(udev_event,
8314de1d 458 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 459
523c620b
TG
460 if (udev_event->rtnl)
461 /* in case rtnl was initialized */
1c4baffc 462 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 463
912541b0 464 /* apply/restore inotify watch */
bf9bead1 465 if (udev_event->inotify_watch) {
912541b0
KS
466 udev_watch_begin(udev, dev);
467 udev_device_update_db(dev);
468 }
469
3d06f418 470 safe_close(fd_lock);
3ebdb81e 471
912541b0
KS
472 /* send processed event back to libudev listeners */
473 udev_monitor_send_device(worker_monitor, NULL, dev);
474
3ebdb81e 475skip:
4914cb2d 476 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 477
912541b0 478 /* send udevd the result of the event execution */
e237d8cb 479 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 480 if (r < 0)
9a73bd7c 481 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 482 udev_device_get_seqnum(dev));
912541b0
KS
483
484 udev_device_unref(dev);
485 dev = NULL;
486
73814ca2 487 udev_event_unref(udev_event);
47e737dc 488
912541b0
KS
489 /* wait for more device messages from main udevd, or term signal */
490 while (dev == NULL) {
491 struct epoll_event ev[4];
492 int fdcount;
493 int i;
494
8fef0ff2 495 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
496 if (fdcount < 0) {
497 if (errno == EINTR)
498 continue;
6af5e6a4 499 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
500 goto out;
501 }
502
503 for (i = 0; i < fdcount; i++) {
504 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
505 dev = udev_monitor_receive_device(worker_monitor);
506 break;
507 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
508 struct signalfd_siginfo fdsi;
509 ssize_t size;
510
511 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
512 if (size != sizeof(struct signalfd_siginfo))
513 continue;
514 switch (fdsi.ssi_signo) {
515 case SIGTERM:
516 goto out;
517 }
518 }
519 }
520 }
521 }
82063a88 522out:
912541b0 523 udev_device_unref(dev);
e237d8cb 524 manager_free(manager);
baa30fbc 525 log_close();
8b46c3fc 526 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
527 }
528 case -1:
912541b0 529 event->state = EVENT_QUEUED;
56f64d95 530 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
531 break;
532 default:
e03c7cc2
TG
533 {
534 struct worker *worker;
535
c0c6806b 536 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 537 if (r < 0)
e03c7cc2 538 return;
e03c7cc2 539
39c19cf1
TG
540 worker_attach_event(worker, event);
541
1fa2f38f 542 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
543 break;
544 }
e03c7cc2 545 }
7fafc032
KS
546}
547
c0c6806b 548static void event_run(Manager *manager, struct event *event) {
a505965d
TG
549 struct worker *worker;
550 Iterator i;
912541b0 551
c0c6806b
TG
552 assert(manager);
553 assert(event);
554
555 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
556 ssize_t count;
557
558 if (worker->state != WORKER_IDLE)
559 continue;
560
c0c6806b 561 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 562 if (count < 0) {
1fa2f38f
ZJS
563 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
564 worker->pid, count);
912541b0
KS
565 kill(worker->pid, SIGKILL);
566 worker->state = WORKER_KILLED;
567 continue;
568 }
39c19cf1 569 worker_attach_event(worker, event);
912541b0
KS
570 return;
571 }
572
c0c6806b 573 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 574 if (arg_children_max > 1)
c0c6806b 575 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
576 return;
577 }
578
579 /* start new worker and pass initial device */
c0c6806b 580 worker_spawn(manager, event);
1e03b754
KS
581}
582
ecb17862 583static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 584 struct event *event;
cb49a4f2 585 int r;
912541b0 586
ecb17862
TG
587 assert(manager);
588 assert(dev);
589
040e6896
TG
590 /* only one process can add events to the queue */
591 if (manager->pid == 0)
df0ff127 592 manager->pid = getpid_cached();
040e6896 593
df0ff127 594 assert(manager->pid == getpid_cached());
cb49a4f2 595
955d98c9 596 event = new0(struct event, 1);
cb49a4f2
TG
597 if (!event)
598 return -ENOMEM;
912541b0
KS
599
600 event->udev = udev_device_get_udev(dev);
cb49a4f2 601 event->manager = manager;
912541b0 602 event->dev = dev;
6969c349
TG
603 event->dev_kernel = udev_device_shallow_clone(dev);
604 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
605 event->seqnum = udev_device_get_seqnum(dev);
606 event->devpath = udev_device_get_devpath(dev);
607 event->devpath_len = strlen(event->devpath);
608 event->devpath_old = udev_device_get_devpath_old(dev);
609 event->devnum = udev_device_get_devnum(dev);
ea6039a3 610 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
611 event->ifindex = udev_device_get_ifindex(dev);
612
9f6445e3 613 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
614 udev_device_get_action(dev), udev_device_get_subsystem(dev));
615
616 event->state = EVENT_QUEUED;
cb49a4f2 617
40a57716 618 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
619 r = touch("/run/udev/queue");
620 if (r < 0)
621 log_warning_errno(r, "could not touch /run/udev/queue: %m");
622 }
623
40a57716 624 LIST_APPEND(event, manager->events, event);
cb49a4f2 625
912541b0 626 return 0;
fc465079
KS
627}
628
c0c6806b 629static void manager_kill_workers(Manager *manager) {
a505965d
TG
630 struct worker *worker;
631 Iterator i;
1e03b754 632
c0c6806b
TG
633 assert(manager);
634
635 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
636 if (worker->state == WORKER_KILLED)
637 continue;
1e03b754 638
912541b0
KS
639 worker->state = WORKER_KILLED;
640 kill(worker->pid, SIGTERM);
641 }
1e03b754
KS
642}
643
e3196993 644/* lookup event for identical, parent, child device */
ecb17862 645static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 646 struct event *loop_event;
912541b0
KS
647 size_t common;
648
649 /* check if queue contains events we depend on */
40a57716 650 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 651 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
652 if (loop_event->seqnum < event->delaying_seqnum)
653 continue;
654
655 /* event we checked earlier still exists, no need to check again */
656 if (loop_event->seqnum == event->delaying_seqnum)
657 return true;
658
659 /* found ourself, no later event can block us */
660 if (loop_event->seqnum >= event->seqnum)
661 break;
662
663 /* check major/minor */
664 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
665 return true;
666
667 /* check network device ifindex */
668 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
669 return true;
670
671 /* check our old name */
090be865 672 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
673 event->delaying_seqnum = loop_event->seqnum;
674 return true;
675 }
676
677 /* compare devpath */
678 common = MIN(loop_event->devpath_len, event->devpath_len);
679
680 /* one devpath is contained in the other? */
681 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
682 continue;
683
684 /* identical device event found */
685 if (loop_event->devpath_len == event->devpath_len) {
686 /* devices names might have changed/swapped in the meantime */
687 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
688 continue;
689 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
690 continue;
691 event->delaying_seqnum = loop_event->seqnum;
692 return true;
693 }
694
695 /* parent device event found */
696 if (event->devpath[common] == '/') {
697 event->delaying_seqnum = loop_event->seqnum;
698 return true;
699 }
700
701 /* child device event found */
702 if (loop_event->devpath[common] == '/') {
703 event->delaying_seqnum = loop_event->seqnum;
704 return true;
705 }
706
707 /* no matching device */
708 continue;
709 }
710
711 return false;
7fafc032
KS
712}
713
693d371d
TG
714static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
715 Manager *manager = userdata;
716
717 assert(manager);
718
719 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
720
721 sd_event_exit(manager->event, -ETIMEDOUT);
722
723 return 1;
724}
725
62d43dac 726static void manager_exit(Manager *manager) {
693d371d
TG
727 uint64_t usec;
728 int r;
62d43dac
TG
729
730 assert(manager);
731
732 manager->exit = true;
733
b79aacbf
TG
734 sd_notify(false,
735 "STOPPING=1\n"
736 "STATUS=Starting shutdown...");
737
62d43dac 738 /* close sources of new events and discard buffered events */
693d371d 739 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 740 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 741
693d371d 742 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 743 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 744
693d371d 745 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 746 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
747
748 /* discard queued events and kill workers */
749 event_queue_cleanup(manager, EVENT_QUEUED);
750 manager_kill_workers(manager);
693d371d 751
3285baa8 752 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 753
3285baa8 754 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
755 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
756 if (r < 0)
757 return;
62d43dac
TG
758}
759
760/* reload requested, HUP signal received, rules changed, builtin changed */
761static void manager_reload(Manager *manager) {
762
763 assert(manager);
764
b79aacbf
TG
765 sd_notify(false,
766 "RELOADING=1\n"
767 "STATUS=Flushing configuration...");
768
62d43dac
TG
769 manager_kill_workers(manager);
770 manager->rules = udev_rules_unref(manager->rules);
771 udev_builtin_exit(manager->udev);
b79aacbf 772
1ef72b55
MS
773 sd_notifyf(false,
774 "READY=1\n"
775 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
776}
777
c0c6806b 778static void event_queue_start(Manager *manager) {
40a57716 779 struct event *event;
693d371d 780 usec_t usec;
8ab44e3f 781
c0c6806b
TG
782 assert(manager);
783
40a57716 784 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
785 manager->exit || manager->stop_exec_queue)
786 return;
787
3285baa8 788 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
789 /* check for changed config, every 3 seconds at most */
790 if (manager->last_usec == 0 ||
791 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
792 if (udev_rules_check_timestamp(manager->rules) ||
793 udev_builtin_validate(manager->udev))
794 manager_reload(manager);
693d371d 795
38a03f06 796 manager->last_usec = usec;
7c4c7e89
TG
797 }
798
799 udev_builtin_init(manager->udev);
800
801 if (!manager->rules) {
802 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
803 if (!manager->rules)
804 return;
805 }
806
40a57716 807 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
808 if (event->state != EVENT_QUEUED)
809 continue;
0bc74ea7 810
912541b0 811 /* do not start event if parent or child event is still running */
ecb17862 812 if (is_devpath_busy(manager, event))
912541b0 813 continue;
fc465079 814
c0c6806b 815 event_run(manager, event);
912541b0 816 }
1e03b754
KS
817}
818
ecb17862 819static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 820 struct event *event, *tmp;
ff2c503d 821
40a57716 822 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
823 if (match_type != EVENT_UNDEF && match_type != event->state)
824 continue;
ff2c503d 825
c6aa11f2 826 event_free(event);
912541b0 827 }
ff2c503d
KS
828}
829
e82e8fa5 830static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
831 Manager *manager = userdata;
832
833 assert(manager);
834
912541b0
KS
835 for (;;) {
836 struct worker_message msg;
979558f3
TG
837 struct iovec iovec = {
838 .iov_base = &msg,
839 .iov_len = sizeof(msg),
840 };
841 union {
842 struct cmsghdr cmsghdr;
843 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
844 } control = {};
845 struct msghdr msghdr = {
846 .msg_iov = &iovec,
847 .msg_iovlen = 1,
848 .msg_control = &control,
849 .msg_controllen = sizeof(control),
850 };
851 struct cmsghdr *cmsg;
912541b0 852 ssize_t size;
979558f3 853 struct ucred *ucred = NULL;
a505965d 854 struct worker *worker;
912541b0 855
e82e8fa5 856 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 857 if (size < 0) {
738a7907
TG
858 if (errno == EINTR)
859 continue;
860 else if (errno == EAGAIN)
861 /* nothing more to read */
862 break;
979558f3 863
e82e8fa5 864 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
865 } else if (size != sizeof(struct worker_message)) {
866 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 867 continue;
979558f3
TG
868 }
869
2a1288ff 870 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
871 if (cmsg->cmsg_level == SOL_SOCKET &&
872 cmsg->cmsg_type == SCM_CREDENTIALS &&
873 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
874 ucred = (struct ucred*) CMSG_DATA(cmsg);
875 }
876
877 if (!ucred || ucred->pid <= 0) {
878 log_warning_errno(EIO, "ignoring worker message without valid PID");
879 continue;
880 }
912541b0
KS
881
882 /* lookup worker who sent the signal */
4a0b58c4 883 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
884 if (!worker) {
885 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
886 continue;
912541b0 887 }
c0bbfd72 888
a505965d
TG
889 if (worker->state != WORKER_KILLED)
890 worker->state = WORKER_IDLE;
891
892 /* worker returned */
893 event_free(worker->event);
912541b0 894 }
e82e8fa5 895
8302fe5a
TG
896 /* we have free workers, try to schedule events */
897 event_queue_start(manager);
898
e82e8fa5
TG
899 return 1;
900}
901
902static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 903 Manager *manager = userdata;
e82e8fa5
TG
904 struct udev_device *dev;
905 int r;
906
c0c6806b 907 assert(manager);
e82e8fa5 908
c0c6806b 909 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
910 if (dev) {
911 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 912 r = event_queue_insert(manager, dev);
e82e8fa5
TG
913 if (r < 0)
914 udev_device_unref(dev);
8302fe5a
TG
915 else
916 /* we have fresh events, try to schedule them */
917 event_queue_start(manager);
e82e8fa5
TG
918 }
919
920 return 1;
88f4b648
KS
921}
922
3b47c739 923/* receive the udevd message from userspace */
e82e8fa5 924static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 925 Manager *manager = userdata;
e4f66b77
TG
926 _cleanup_udev_ctrl_connection_unref_ struct udev_ctrl_connection *ctrl_conn = NULL;
927 _cleanup_udev_ctrl_msg_unref_ struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
928 const char *str;
929 int i;
930
c0c6806b 931 assert(manager);
e4f66b77 932
c0c6806b 933 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 934 if (!ctrl_conn)
e82e8fa5 935 return 1;
912541b0
KS
936
937 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 938 if (!ctrl_msg)
e82e8fa5 939 return 1;
912541b0
KS
940
941 i = udev_ctrl_get_set_log_level(ctrl_msg);
942 if (i >= 0) {
ed14edc0 943 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 944 log_set_max_level(i);
c0c6806b 945 manager_kill_workers(manager);
912541b0
KS
946 }
947
948 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 949 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 950 manager->stop_exec_queue = true;
912541b0
KS
951 }
952
953 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 954 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 955 manager->stop_exec_queue = false;
8302fe5a 956 event_queue_start(manager);
912541b0
KS
957 }
958
959 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 960 log_debug("udevd message (RELOAD) received");
62d43dac 961 manager_reload(manager);
912541b0
KS
962 }
963
964 str = udev_ctrl_get_set_env(ctrl_msg);
965 if (str != NULL) {
c0c6806b 966 _cleanup_free_ char *key = NULL;
912541b0
KS
967
968 key = strdup(str);
c0c6806b 969 if (key) {
912541b0
KS
970 char *val;
971
972 val = strchr(key, '=');
973 if (val != NULL) {
974 val[0] = '\0';
975 val = &val[1];
976 if (val[0] == '\0') {
9f6445e3 977 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 978 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 979 } else {
9f6445e3 980 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 981 udev_list_entry_add(&manager->properties, key, val);
912541b0 982 }
c0c6806b 983 } else
9f6445e3 984 log_error("wrong key format '%s'", key);
912541b0 985 }
c0c6806b 986 manager_kill_workers(manager);
912541b0
KS
987 }
988
989 i = udev_ctrl_get_set_children_max(ctrl_msg);
990 if (i >= 0) {
9f6445e3 991 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 992 arg_children_max = i;
1ef72b55
MS
993
994 (void) sd_notifyf(false,
995 "READY=1\n"
996 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
997 }
998
cb49a4f2 999 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1000 log_debug("udevd message (SYNC) received");
912541b0
KS
1001
1002 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1003 log_debug("udevd message (EXIT) received");
62d43dac 1004 manager_exit(manager);
c0c6806b
TG
1005 /* keep reference to block the client until we exit
1006 TODO: deal with several blocking exit requests */
1007 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1008 }
e4f66b77 1009
e82e8fa5 1010 return 1;
88f4b648 1011}
4a231017 1012
f3a740a5 1013static int synthesize_change(struct udev_device *dev) {
edd32000 1014 char filename[UTIL_PATH_SIZE];
f3a740a5 1015 int r;
edd32000 1016
f3a740a5 1017 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1018 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1019 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1020 bool part_table_read = false;
1021 bool has_partitions = false;
ede34445 1022 int fd;
f3a740a5
KS
1023 struct udev *udev = udev_device_get_udev(dev);
1024 _cleanup_udev_enumerate_unref_ struct udev_enumerate *e = NULL;
1025 struct udev_list_entry *item;
1026
ede34445 1027 /*
e9fc29f4
KS
1028 * Try to re-read the partition table. This only succeeds if
1029 * none of the devices is busy. The kernel returns 0 if no
1030 * partition table is found, and we will not get an event for
1031 * the disk.
ede34445 1032 */
02ba8fb3 1033 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1034 if (fd >= 0) {
02ba8fb3
KS
1035 r = flock(fd, LOCK_EX|LOCK_NB);
1036 if (r >= 0)
1037 r = ioctl(fd, BLKRRPART, 0);
1038
ede34445
KS
1039 close(fd);
1040 if (r >= 0)
e9fc29f4 1041 part_table_read = true;
ede34445
KS
1042 }
1043
e9fc29f4 1044 /* search for partitions */
f3a740a5
KS
1045 e = udev_enumerate_new(udev);
1046 if (!e)
1047 return -ENOMEM;
1048
1049 r = udev_enumerate_add_match_parent(e, dev);
1050 if (r < 0)
1051 return r;
1052
1053 r = udev_enumerate_add_match_subsystem(e, "block");
1054 if (r < 0)
1055 return r;
1056
1057 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1058 if (r < 0)
1059 return r;
e9fc29f4
KS
1060
1061 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1062 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1063
1064 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1065 if (!d)
1066 continue;
1067
1068 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1069 continue;
1070
1071 has_partitions = true;
1072 break;
1073 }
1074
1075 /*
1076 * We have partitions and re-read the table, the kernel already sent
1077 * out a "change" event for the disk, and "remove/add" for all
1078 * partitions.
1079 */
1080 if (part_table_read && has_partitions)
1081 return 0;
1082
1083 /*
1084 * We have partitions but re-reading the partition table did not
1085 * work, synthesize "change" for the disk and all partitions.
1086 */
1087 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1088 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1089 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1090
f3a740a5
KS
1091 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
1092 _cleanup_udev_device_unref_ struct udev_device *d = NULL;
1093
1094 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1095 if (!d)
1096 continue;
1097
1098 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1099 continue;
1100
1101 log_debug("device %s closed, synthesising partition '%s' 'change'",
1102 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1103 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1104 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1105 }
ede34445
KS
1106
1107 return 0;
f3a740a5
KS
1108 }
1109
ede34445
KS
1110 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1111 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1112 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1113
f3a740a5 1114 return 0;
edd32000
KS
1115}
1116
e82e8fa5 1117static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1118 Manager *manager = userdata;
0254e944 1119 union inotify_event_buffer buffer;
f7c1ad4f
LP
1120 struct inotify_event *e;
1121 ssize_t l;
912541b0 1122
c0c6806b 1123 assert(manager);
e82e8fa5
TG
1124
1125 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1126 if (l < 0) {
3742095b 1127 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1128 return 1;
912541b0 1129
f7c1ad4f 1130 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1131 }
1132
f7c1ad4f 1133 FOREACH_INOTIFY_EVENT(e, buffer, l) {
e82e8fa5 1134 _cleanup_udev_device_unref_ struct udev_device *dev = NULL;
912541b0 1135
c0c6806b 1136 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1137 if (!dev)
1138 continue;
912541b0 1139
f7c1ad4f 1140 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1141 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1142 synthesize_change(dev);
a8389097
TG
1143
1144 /* settle might be waiting on us to determine the queue
1145 * state. If we just handled an inotify event, we might have
1146 * generated a "change" event, but we won't have queued up
1147 * the resultant uevent yet. Do that.
1148 */
c0c6806b 1149 on_uevent(NULL, -1, 0, manager);
a8389097 1150 } else if (e->mask & IN_IGNORED)
c0c6806b 1151 udev_watch_end(manager->udev, dev);
912541b0
KS
1152 }
1153
e82e8fa5 1154 return 1;
bd284db1
SJR
1155}
1156
0561329d 1157static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1158 Manager *manager = userdata;
1159
1160 assert(manager);
1161
62d43dac 1162 manager_exit(manager);
912541b0 1163
e82e8fa5
TG
1164 return 1;
1165}
912541b0 1166
0561329d 1167static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1168 Manager *manager = userdata;
1169
1170 assert(manager);
1171
62d43dac 1172 manager_reload(manager);
912541b0 1173
e82e8fa5
TG
1174 return 1;
1175}
912541b0 1176
e82e8fa5 1177static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1178 Manager *manager = userdata;
1179
1180 assert(manager);
1181
e82e8fa5
TG
1182 for (;;) {
1183 pid_t pid;
1184 int status;
1185 struct worker *worker;
d1317d02 1186
e82e8fa5
TG
1187 pid = waitpid(-1, &status, WNOHANG);
1188 if (pid <= 0)
f29328d6 1189 break;
e82e8fa5 1190
4a0b58c4 1191 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1192 if (!worker) {
1193 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1194 continue;
912541b0 1195 }
e82e8fa5
TG
1196
1197 if (WIFEXITED(status)) {
1198 if (WEXITSTATUS(status) == 0)
1199 log_debug("worker ["PID_FMT"] exited", pid);
1200 else
1201 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1202 } else if (WIFSIGNALED(status)) {
76341acc 1203 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1204 } else if (WIFSTOPPED(status)) {
1205 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1206 continue;
e82e8fa5
TG
1207 } else if (WIFCONTINUED(status)) {
1208 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1209 continue;
e82e8fa5
TG
1210 } else
1211 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1212
1213 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1214 if (worker->event) {
1215 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1216 /* delete state from disk */
1217 udev_device_delete_db(worker->event->dev);
1218 udev_device_tag_index(worker->event->dev, NULL, false);
1219 /* forward kernel event without amending it */
c0c6806b 1220 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1221 }
1222 }
1223
1224 worker_free(worker);
912541b0 1225 }
e82e8fa5 1226
8302fe5a
TG
1227 /* we can start new workers, try to schedule events */
1228 event_queue_start(manager);
1229
e82e8fa5 1230 return 1;
f27125f9 1231}
1232
693d371d
TG
1233static int on_post(sd_event_source *s, void *userdata) {
1234 Manager *manager = userdata;
1235 int r;
1236
1237 assert(manager);
1238
40a57716 1239 if (LIST_IS_EMPTY(manager->events)) {
693d371d
TG
1240 /* no pending events */
1241 if (!hashmap_isempty(manager->workers)) {
1242 /* there are idle workers */
1243 log_debug("cleanup idle workers");
1244 manager_kill_workers(manager);
1245 } else {
1246 /* we are idle */
1247 if (manager->exit) {
1248 r = sd_event_exit(manager->event, 0);
1249 if (r < 0)
1250 return r;
1251 } else if (manager->cgroup)
1252 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1253 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1254 }
1255 }
1256
1257 return 1;
1258}
1259
fcff1e72 1260static int listen_fds(int *rctrl, int *rnetlink) {
f59118ec 1261 _cleanup_udev_unref_ struct udev *udev = NULL;
fcff1e72 1262 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1263 int fd, n, r;
912541b0 1264
fcff1e72
TG
1265 assert(rctrl);
1266 assert(rnetlink);
1267
912541b0 1268 n = sd_listen_fds(true);
fcff1e72
TG
1269 if (n < 0)
1270 return n;
912541b0
KS
1271
1272 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1273 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1274 if (ctrl_fd >= 0)
1275 return -EINVAL;
1276 ctrl_fd = fd;
912541b0
KS
1277 continue;
1278 }
1279
1280 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1281 if (netlink_fd >= 0)
1282 return -EINVAL;
1283 netlink_fd = fd;
912541b0
KS
1284 continue;
1285 }
1286
fcff1e72 1287 return -EINVAL;
912541b0
KS
1288 }
1289
f59118ec
TG
1290 if (ctrl_fd < 0) {
1291 _cleanup_udev_ctrl_unref_ struct udev_ctrl *ctrl = NULL;
1292
1293 udev = udev_new();
1294 if (!udev)
1295 return -ENOMEM;
1296
1297 ctrl = udev_ctrl_new(udev);
1298 if (!ctrl)
1299 return log_error_errno(EINVAL, "error initializing udev control socket");
1300
1301 r = udev_ctrl_enable_receiving(ctrl);
1302 if (r < 0)
1303 return log_error_errno(EINVAL, "error binding udev control socket");
1304
1305 fd = udev_ctrl_get_fd(ctrl);
1306 if (fd < 0)
1307 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1308
f59118ec
TG
1309 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1310 if (ctrl_fd < 0)
1311 return log_error_errno(errno, "could not dup ctrl fd: %m");
1312 }
1313
1314 if (netlink_fd < 0) {
1315 _cleanup_udev_monitor_unref_ struct udev_monitor *monitor = NULL;
1316
1317 if (!udev) {
1318 udev = udev_new();
1319 if (!udev)
1320 return -ENOMEM;
1321 }
1322
1323 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1324 if (!monitor)
1325 return log_error_errno(EINVAL, "error initializing netlink socket");
1326
1327 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1328
1329 r = udev_monitor_enable_receiving(monitor);
1330 if (r < 0)
1331 return log_error_errno(EINVAL, "error binding netlink socket");
1332
1333 fd = udev_monitor_get_fd(monitor);
1334 if (fd < 0)
1335 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1336
1337 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1338 if (netlink_fd < 0)
f59118ec
TG
1339 return log_error_errno(errno, "could not dup netlink fd: %m");
1340 }
fcff1e72
TG
1341
1342 *rctrl = ctrl_fd;
1343 *rnetlink = netlink_fd;
912541b0 1344
912541b0 1345 return 0;
7459bcdc
KS
1346}
1347
e6f86cac 1348/*
3f85ef0f 1349 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1350 * udev.log_priority=<level> syslog priority
1351 * udev.children_max=<number of workers> events are fully serialized if set to 1
1352 * udev.exec_delay=<number of seconds> delay execution of every executed program
1353 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1354 */
96287a49 1355static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1356 int r = 0;
e6f86cac 1357
614a823c 1358 assert(key);
e6f86cac 1359
614a823c
TG
1360 if (!value)
1361 return 0;
e6f86cac 1362
1d84ad94
LP
1363 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1364
1365 if (proc_cmdline_value_missing(key, value))
1366 return 0;
1367
92e72467
ZJS
1368 r = util_log_priority(value);
1369 if (r >= 0)
1370 log_set_max_level(r);
1d84ad94
LP
1371
1372 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1373
1374 if (proc_cmdline_value_missing(key, value))
1375 return 0;
1376
92e72467
ZJS
1377 r = safe_atou64(value, &arg_event_timeout_usec);
1378 if (r >= 0) {
1379 arg_event_timeout_usec *= USEC_PER_SEC;
1380 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1381 }
1d84ad94
LP
1382
1383 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1384
1385 if (proc_cmdline_value_missing(key, value))
1386 return 0;
1387
020328e1 1388 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1389
1390 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1391
1392 if (proc_cmdline_value_missing(key, value))
1393 return 0;
1394
614a823c 1395 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1396
1397 } else if (startswith(key, "udev."))
92e72467 1398 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1399
92e72467
ZJS
1400 if (r < 0)
1401 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1402
614a823c 1403 return 0;
e6f86cac
KS
1404}
1405
ed216e1f
TG
1406static void help(void) {
1407 printf("%s [OPTIONS...]\n\n"
1408 "Manages devices.\n\n"
5ac0162c 1409 " -h --help Print this message\n"
2d19c17e
MF
1410 " -V --version Print version of the program\n"
1411 " -d --daemon Detach and run in the background\n"
1412 " -D --debug Enable debug output\n"
1413 " -c --children-max=INT Set maximum number of workers\n"
1414 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1415 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1416 " -N --resolve-names=early|late|never\n"
5ac0162c 1417 " When to resolve users and groups\n"
ed216e1f
TG
1418 , program_invocation_short_name);
1419}
1420
bba7a484 1421static int parse_argv(int argc, char *argv[]) {
912541b0 1422 static const struct option options[] = {
bba7a484
TG
1423 { "daemon", no_argument, NULL, 'd' },
1424 { "debug", no_argument, NULL, 'D' },
1425 { "children-max", required_argument, NULL, 'c' },
1426 { "exec-delay", required_argument, NULL, 'e' },
1427 { "event-timeout", required_argument, NULL, 't' },
1428 { "resolve-names", required_argument, NULL, 'N' },
1429 { "help", no_argument, NULL, 'h' },
1430 { "version", no_argument, NULL, 'V' },
912541b0
KS
1431 {}
1432 };
689a97f5 1433
bba7a484 1434 int c;
689a97f5 1435
bba7a484
TG
1436 assert(argc >= 0);
1437 assert(argv);
912541b0 1438
e14b6f21 1439 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1440 int r;
912541b0 1441
bba7a484 1442 switch (c) {
912541b0 1443
912541b0 1444 case 'd':
bba7a484 1445 arg_daemonize = true;
912541b0
KS
1446 break;
1447 case 'c':
020328e1 1448 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1449 if (r < 0)
1450 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1451 break;
1452 case 'e':
6f5cf8a8
TG
1453 r = safe_atoi(optarg, &arg_exec_delay);
1454 if (r < 0)
1455 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1456 break;
9719859c 1457 case 't':
f1e8664e
TG
1458 r = safe_atou64(optarg, &arg_event_timeout_usec);
1459 if (r < 0)
65fea570 1460 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1461 else {
1462 arg_event_timeout_usec *= USEC_PER_SEC;
1463 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1464 }
9719859c 1465 break;
912541b0 1466 case 'D':
bba7a484 1467 arg_debug = true;
912541b0
KS
1468 break;
1469 case 'N':
090be865 1470 if (streq(optarg, "early")) {
bba7a484 1471 arg_resolve_names = 1;
090be865 1472 } else if (streq(optarg, "late")) {
bba7a484 1473 arg_resolve_names = 0;
090be865 1474 } else if (streq(optarg, "never")) {
bba7a484 1475 arg_resolve_names = -1;
912541b0 1476 } else {
9f6445e3 1477 log_error("resolve-names must be early, late or never");
bba7a484 1478 return 0;
912541b0
KS
1479 }
1480 break;
1481 case 'h':
ed216e1f 1482 help();
bba7a484 1483 return 0;
912541b0 1484 case 'V':
948aaa7c 1485 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1486 return 0;
1487 case '?':
1488 return -EINVAL;
912541b0 1489 default:
bba7a484
TG
1490 assert_not_reached("Unhandled option");
1491
912541b0
KS
1492 }
1493 }
1494
bba7a484
TG
1495 return 1;
1496}
1497
b7f74dd4 1498static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1499 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1500 int r, fd_worker, one = 1;
c0c6806b
TG
1501
1502 assert(ret);
11b1dd8c
TG
1503 assert(fd_ctrl >= 0);
1504 assert(fd_uevent >= 0);
c0c6806b
TG
1505
1506 manager = new0(Manager, 1);
1507 if (!manager)
1508 return log_oom();
1509
e237d8cb
TG
1510 manager->fd_inotify = -1;
1511 manager->worker_watch[WRITE_END] = -1;
1512 manager->worker_watch[READ_END] = -1;
1513
c0c6806b
TG
1514 manager->udev = udev_new();
1515 if (!manager->udev)
1516 return log_error_errno(errno, "could not allocate udev context: %m");
1517
b2d21d93
TG
1518 udev_builtin_init(manager->udev);
1519
ecb17862
TG
1520 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1521 if (!manager->rules)
1522 return log_error_errno(ENOMEM, "error reading rules");
1523
40a57716 1524 LIST_HEAD_INIT(manager->events);
ecb17862
TG
1525 udev_list_init(manager->udev, &manager->properties, true);
1526
c26d1879
TG
1527 manager->cgroup = cgroup;
1528
f59118ec
TG
1529 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1530 if (!manager->ctrl)
1531 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1532
f59118ec
TG
1533 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1534 if (!manager->monitor)
1535 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1536
1537 /* unnamed socket from workers to the main daemon */
1538 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1539 if (r < 0)
1540 return log_error_errno(errno, "error creating socketpair: %m");
1541
693d371d 1542 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1543
693d371d 1544 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1545 if (r < 0)
1546 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1547
1548 manager->fd_inotify = udev_watch_init(manager->udev);
1549 if (manager->fd_inotify < 0)
1550 return log_error_errno(ENOMEM, "error initializing inotify");
1551
1552 udev_watch_restore(manager->udev);
1553
1554 /* block and listen to all signals on signalfd */
72c0a2c2 1555 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1556
49f997f3
TG
1557 r = sd_event_default(&manager->event);
1558 if (r < 0)
709f6e46 1559 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1560
693d371d
TG
1561 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1562 if (r < 0)
1563 return log_error_errno(r, "error creating sigint event source: %m");
1564
1565 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1566 if (r < 0)
1567 return log_error_errno(r, "error creating sigterm event source: %m");
1568
1569 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1570 if (r < 0)
1571 return log_error_errno(r, "error creating sighup event source: %m");
1572
1573 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1574 if (r < 0)
1575 return log_error_errno(r, "error creating sigchld event source: %m");
1576
1577 r = sd_event_set_watchdog(manager->event, true);
1578 if (r < 0)
1579 return log_error_errno(r, "error creating watchdog event source: %m");
1580
11b1dd8c 1581 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1582 if (r < 0)
1583 return log_error_errno(r, "error creating ctrl event source: %m");
1584
1585 /* This needs to be after the inotify and uevent handling, to make sure
1586 * that the ping is send back after fully processing the pending uevents
1587 * (including the synthetic ones we may create due to inotify events).
1588 */
1589 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1590 if (r < 0)
1591 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1592
1593 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1594 if (r < 0)
1595 return log_error_errno(r, "error creating inotify event source: %m");
1596
11b1dd8c 1597 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1598 if (r < 0)
1599 return log_error_errno(r, "error creating uevent event source: %m");
1600
1601 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1602 if (r < 0)
1603 return log_error_errno(r, "error creating worker event source: %m");
1604
1605 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1606 if (r < 0)
1607 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1608
11b1dd8c
TG
1609 *ret = manager;
1610 manager = NULL;
1611
86c3bece 1612 return 0;
c0c6806b
TG
1613}
1614
077fc5e2 1615static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1616 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1617 int r;
1618
1619 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1620 if (r < 0) {
1621 r = log_error_errno(r, "failed to allocate manager object: %m");
1622 goto exit;
1623 }
1624
1625 r = udev_rules_apply_static_dev_perms(manager->rules);
1626 if (r < 0)
1627 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1628
1ef72b55
MS
1629 (void) sd_notifyf(false,
1630 "READY=1\n"
1631 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1632
1633 r = sd_event_loop(manager->event);
1634 if (r < 0) {
1635 log_error_errno(r, "event loop failed: %m");
1636 goto exit;
1637 }
1638
1639 sd_event_get_exit_code(manager->event, &r);
1640
1641exit:
1642 sd_notify(false,
1643 "STOPPING=1\n"
1644 "STATUS=Shutting down...");
1645 if (manager)
1646 udev_ctrl_cleanup(manager->ctrl);
1647 return r;
1648}
1649
1650int main(int argc, char *argv[]) {
c26d1879 1651 _cleanup_free_ char *cgroup = NULL;
efa1606e 1652 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1653 int r;
bba7a484 1654
bba7a484 1655 log_set_target(LOG_TARGET_AUTO);
b237a168 1656 udev_parse_config();
bba7a484
TG
1657 log_parse_environment();
1658 log_open();
1659
bba7a484
TG
1660 r = parse_argv(argc, argv);
1661 if (r <= 0)
1662 goto exit;
1663
1d84ad94 1664 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1665 if (r < 0)
1666 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1667
78d3e041
KS
1668 if (arg_debug) {
1669 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1670 log_set_max_level(LOG_DEBUG);
78d3e041 1671 }
bba7a484 1672
fba868fa
LP
1673 r = must_be_root();
1674 if (r < 0)
912541b0 1675 goto exit;
912541b0 1676
712cebf1
TG
1677 if (arg_children_max == 0) {
1678 cpu_set_t cpu_set;
ebc164ef 1679
712cebf1 1680 arg_children_max = 8;
d457ff83 1681
ece174c5 1682 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1683 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1684
712cebf1 1685 log_debug("set children_max to %u", arg_children_max);
d457ff83 1686 }
912541b0 1687
712cebf1
TG
1688 /* set umask before creating any file/directory */
1689 r = chdir("/");
1690 if (r < 0) {
1691 r = log_error_errno(errno, "could not change dir to /: %m");
1692 goto exit;
1693 }
194bbe33 1694
712cebf1 1695 umask(022);
912541b0 1696
c3dacc8b 1697 r = mac_selinux_init();
712cebf1
TG
1698 if (r < 0) {
1699 log_error_errno(r, "could not initialize labelling: %m");
1700 goto exit;
912541b0
KS
1701 }
1702
dae8b82e
ZJS
1703 r = mkdir_errno_wrapper("/run/udev", 0755);
1704 if (r < 0 && r != -EEXIST) {
1705 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1706 goto exit;
1707 }
1708
03cfe0d5 1709 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1710
c26d1879
TG
1711 if (getppid() == 1) {
1712 /* get our own cgroup, we regularly kill everything udev has left behind
1713 we only do this on systemd systems, and only if we are directly spawned
1714 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1715 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1716 if (r < 0) {
a2d61f07 1717 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1718 log_debug_errno(r, "did not find dedicated cgroup: %m");
1719 else
1720 log_warning_errno(r, "failed to get cgroup: %m");
1721 }
c26d1879
TG
1722 }
1723
b7f74dd4
TG
1724 r = listen_fds(&fd_ctrl, &fd_uevent);
1725 if (r < 0) {
1726 r = log_error_errno(r, "could not listen on fds: %m");
1727 goto exit;
1728 }
1729
bba7a484 1730 if (arg_daemonize) {
912541b0 1731 pid_t pid;
912541b0 1732
948aaa7c 1733 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1734
40e749b5 1735 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1736 if (log_get_max_level() < LOG_DEBUG) {
1737 r = make_null_stdio();
1738 if (r < 0)
1739 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1740 }
1741
9fc932bf 1742
40e749b5 1743
912541b0
KS
1744 pid = fork();
1745 switch (pid) {
1746 case 0:
1747 break;
1748 case -1:
6af5e6a4 1749 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1750 goto exit;
1751 default:
f53d1fcd
TG
1752 mac_selinux_finish();
1753 log_close();
1754 _exit(EXIT_SUCCESS);
912541b0
KS
1755 }
1756
1757 setsid();
1758
ad118bda 1759 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1760 }
912541b0 1761
077fc5e2 1762 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1763
53921bfa 1764exit:
cc56fafe 1765 mac_selinux_finish();
baa30fbc 1766 log_close();
6af5e6a4 1767 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1768}