]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
man: systemd.offline-updates: Document system-update-pre.target usage
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032 6 *
7fafc032
KS
7 */
8
7fafc032 9#include <errno.h>
618234a5
LP
10#include <fcntl.h>
11#include <getopt.h>
12#include <signal.h>
13#include <stdbool.h>
14#include <stddef.h>
7fafc032
KS
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
618234a5 18#include <sys/epoll.h>
3ebdb81e 19#include <sys/file.h>
618234a5
LP
20#include <sys/inotify.h>
21#include <sys/ioctl.h>
22#include <sys/mount.h>
1e03b754 23#include <sys/prctl.h>
1e03b754 24#include <sys/signalfd.h>
618234a5 25#include <sys/socket.h>
dc117daa 26#include <sys/stat.h>
618234a5
LP
27#include <sys/time.h>
28#include <sys/wait.h>
29#include <unistd.h>
7fafc032 30
392ef7a2 31#include "sd-daemon.h"
693d371d 32#include "sd-event.h"
8314de1d 33
b5efdb8a 34#include "alloc-util.h"
194bbe33 35#include "cgroup-util.h"
618234a5 36#include "cpu-set-util.h"
5ba2dc25 37#include "dev-setup.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
40a57716 44#include "list.h"
618234a5 45#include "netlink-util.h"
6bedfcbb 46#include "parse-util.h"
4e731273 47#include "proc-cmdline.h"
618234a5
LP
48#include "process-util.h"
49#include "selinux-util.h"
50#include "signal-util.h"
8f328d36 51#include "socket-util.h"
07630cea 52#include "string-util.h"
618234a5
LP
53#include "terminal-util.h"
54#include "udev-util.h"
55#include "udev.h"
ee104e11 56#include "user-util.h"
7fafc032 57
bba7a484
TG
58static bool arg_debug = false;
59static int arg_daemonize = false;
60static int arg_resolve_names = 1;
020328e1 61static unsigned arg_children_max;
bba7a484
TG
62static int arg_exec_delay;
63static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
64static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
65
66typedef struct Manager {
67 struct udev *udev;
693d371d 68 sd_event *event;
c0c6806b 69 Hashmap *workers;
40a57716 70 LIST_HEAD(struct event, events);
c26d1879 71 const char *cgroup;
cb49a4f2 72 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 73
ecb17862 74 struct udev_rules *rules;
c0c6806b
TG
75 struct udev_list properties;
76
77 struct udev_monitor *monitor;
78 struct udev_ctrl *ctrl;
79 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 80 int fd_inotify;
e237d8cb
TG
81 int worker_watch[2];
82
693d371d
TG
83 sd_event_source *ctrl_event;
84 sd_event_source *uevent_event;
85 sd_event_source *inotify_event;
86
7c4c7e89
TG
87 usec_t last_usec;
88
c0c6806b 89 bool stop_exec_queue:1;
c0c6806b
TG
90 bool exit:1;
91} Manager;
1e03b754 92
1e03b754 93enum event_state {
912541b0
KS
94 EVENT_UNDEF,
95 EVENT_QUEUED,
96 EVENT_RUNNING,
1e03b754
KS
97};
98
99struct event {
40a57716 100 LIST_FIELDS(struct event, event);
cb49a4f2 101 Manager *manager;
912541b0
KS
102 struct udev *udev;
103 struct udev_device *dev;
6969c349 104 struct udev_device *dev_kernel;
c6aa11f2 105 struct worker *worker;
912541b0 106 enum event_state state;
912541b0
KS
107 unsigned long long int delaying_seqnum;
108 unsigned long long int seqnum;
109 const char *devpath;
110 size_t devpath_len;
111 const char *devpath_old;
112 dev_t devnum;
912541b0 113 int ifindex;
ea6039a3 114 bool is_block;
693d371d
TG
115 sd_event_source *timeout_warning;
116 sd_event_source *timeout;
1e03b754
KS
117};
118
ecb17862 119static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 120
1e03b754 121enum worker_state {
912541b0
KS
122 WORKER_UNDEF,
123 WORKER_RUNNING,
124 WORKER_IDLE,
125 WORKER_KILLED,
1e03b754
KS
126};
127
128struct worker {
c0c6806b 129 Manager *manager;
912541b0
KS
130 int refcount;
131 pid_t pid;
132 struct udev_monitor *monitor;
133 enum worker_state state;
134 struct event *event;
1e03b754
KS
135};
136
137/* passed from worker to main process */
138struct worker_message {
1e03b754
KS
139};
140
c6aa11f2 141static void event_free(struct event *event) {
cb49a4f2
TG
142 int r;
143
c6aa11f2
TG
144 if (!event)
145 return;
40a57716 146 assert(event->manager);
c6aa11f2 147
40a57716 148 LIST_REMOVE(event, event->manager->events, event);
912541b0 149 udev_device_unref(event->dev);
6969c349 150 udev_device_unref(event->dev_kernel);
c6aa11f2 151
693d371d
TG
152 sd_event_source_unref(event->timeout_warning);
153 sd_event_source_unref(event->timeout);
154
c6aa11f2
TG
155 if (event->worker)
156 event->worker->event = NULL;
157
40a57716 158 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 159 /* only clean up the queue from the process that created it */
df0ff127 160 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
161 r = unlink("/run/udev/queue");
162 if (r < 0)
163 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
164 }
165 }
166
912541b0 167 free(event);
aa8734ff 168}
7a770250 169
c6aa11f2
TG
170static void worker_free(struct worker *worker) {
171 if (!worker)
172 return;
bc113de9 173
c0c6806b
TG
174 assert(worker->manager);
175
4a0b58c4 176 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 177 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
178 event_free(worker->event);
179
c6aa11f2 180 free(worker);
ff2c503d
KS
181}
182
c0c6806b 183static void manager_workers_free(Manager *manager) {
a505965d
TG
184 struct worker *worker;
185 Iterator i;
ff2c503d 186
c0c6806b
TG
187 assert(manager);
188
189 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 190 worker_free(worker);
a505965d 191
c0c6806b 192 manager->workers = hashmap_free(manager->workers);
fc465079
KS
193}
194
c0c6806b 195static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
196 _cleanup_free_ struct worker *worker = NULL;
197 int r;
3a19b32a
TG
198
199 assert(ret);
c0c6806b 200 assert(manager);
3a19b32a
TG
201 assert(worker_monitor);
202 assert(pid > 1);
203
204 worker = new0(struct worker, 1);
205 if (!worker)
206 return -ENOMEM;
207
39c19cf1 208 worker->refcount = 1;
c0c6806b 209 worker->manager = manager;
3a19b32a
TG
210 /* close monitor, but keep address around */
211 udev_monitor_disconnect(worker_monitor);
212 worker->monitor = udev_monitor_ref(worker_monitor);
213 worker->pid = pid;
a505965d 214
c0c6806b 215 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
216 if (r < 0)
217 return r;
218
4a0b58c4 219 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
220 if (r < 0)
221 return r;
222
ae2a15bc 223 *ret = TAKE_PTR(worker);
3a19b32a
TG
224
225 return 0;
226}
227
4fa4d885
TG
228static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
229 struct event *event = userdata;
230
231 assert(event);
232 assert(event->worker);
233
234 kill_and_sigcont(event->worker->pid, SIGKILL);
235 event->worker->state = WORKER_KILLED;
236
237 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
238
239 return 1;
240}
241
242static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
243 struct event *event = userdata;
244
245 assert(event);
246
247 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
248
249 return 1;
250}
251
39c19cf1 252static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
253 sd_event *e;
254 uint64_t usec;
693d371d 255
c6aa11f2 256 assert(worker);
693d371d 257 assert(worker->manager);
c6aa11f2
TG
258 assert(event);
259 assert(!event->worker);
260 assert(!worker->event);
261
39c19cf1 262 worker->state = WORKER_RUNNING;
39c19cf1
TG
263 worker->event = event;
264 event->state = EVENT_RUNNING;
c6aa11f2 265 event->worker = worker;
693d371d
TG
266
267 e = worker->manager->event;
268
3285baa8 269 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 270
3285baa8 271 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
272 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
273
3285baa8 274 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 275 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
276}
277
e237d8cb
TG
278static void manager_free(Manager *manager) {
279 if (!manager)
280 return;
281
b2d21d93
TG
282 udev_builtin_exit(manager->udev);
283
693d371d
TG
284 sd_event_source_unref(manager->ctrl_event);
285 sd_event_source_unref(manager->uevent_event);
286 sd_event_source_unref(manager->inotify_event);
287
e237d8cb 288 udev_unref(manager->udev);
693d371d 289 sd_event_unref(manager->event);
e237d8cb
TG
290 manager_workers_free(manager);
291 event_queue_cleanup(manager, EVENT_UNDEF);
292
293 udev_monitor_unref(manager->monitor);
294 udev_ctrl_unref(manager->ctrl);
295 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
296
297 udev_list_cleanup(&manager->properties);
298 udev_rules_unref(manager->rules);
e237d8cb 299
e237d8cb
TG
300 safe_close(manager->fd_inotify);
301 safe_close_pair(manager->worker_watch);
302
303 free(manager);
304}
305
306DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
307
9a73bd7c
TG
308static int worker_send_message(int fd) {
309 struct worker_message message = {};
310
311 return loop_write(fd, &message, sizeof(message), false);
312}
313
c0c6806b 314static void worker_spawn(Manager *manager, struct event *event) {
912541b0 315 struct udev *udev = event->udev;
8e766630 316 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 317 pid_t pid;
b6aab8ef 318 int r = 0;
912541b0
KS
319
320 /* listen for new events */
321 worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
322 if (worker_monitor == NULL)
323 return;
324 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 325 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
326 r = udev_monitor_enable_receiving(worker_monitor);
327 if (r < 0)
328 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 329
912541b0
KS
330 pid = fork();
331 switch (pid) {
332 case 0: {
333 struct udev_device *dev = NULL;
4afd3348 334 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 335 int fd_monitor;
e237d8cb 336 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
337 struct epoll_event ep_signal = { .events = EPOLLIN };
338 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 339 sigset_t mask;
912541b0 340
43095991 341 /* take initial device from queue */
1cc6c93a 342 dev = TAKE_PTR(event->dev);
912541b0 343
39fd2ca1
TG
344 unsetenv("NOTIFY_SOCKET");
345
c0c6806b 346 manager_workers_free(manager);
ecb17862 347 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 348
e237d8cb 349 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 350 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 351 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 352 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 353
693d371d
TG
354 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
355 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
356 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
357
358 manager->event = sd_event_unref(manager->event);
359
912541b0
KS
360 sigfillset(&mask);
361 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
362 if (fd_signal < 0) {
6af5e6a4 363 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
364 goto out;
365 }
2dd9f98d
TG
366 ep_signal.data.fd = fd_signal;
367
368 fd_monitor = udev_monitor_get_fd(worker_monitor);
369 ep_monitor.data.fd = fd_monitor;
912541b0
KS
370
371 fd_ep = epoll_create1(EPOLL_CLOEXEC);
372 if (fd_ep < 0) {
6af5e6a4 373 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
374 goto out;
375 }
376
912541b0
KS
377 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
378 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 379 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
380 goto out;
381 }
382
045e00cf
ZJS
383 /* Request TERM signal if parent exits.
384 Ignore error, not much we can do in that case. */
385 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 386
045e00cf 387 /* Reset OOM score, we only protect the main daemon. */
ad118bda 388 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 389
912541b0
KS
390 for (;;) {
391 struct udev_event *udev_event;
6af5e6a4 392 int fd_lock = -1;
912541b0 393
3b64e4d4
TG
394 assert(dev);
395
9f6445e3 396 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
397 udev_event = udev_event_new(dev);
398 if (udev_event == NULL) {
6af5e6a4 399 r = -ENOMEM;
912541b0
KS
400 goto out;
401 }
402
bba7a484
TG
403 if (arg_exec_delay > 0)
404 udev_event->exec_delay = arg_exec_delay;
912541b0 405
3ebdb81e 406 /*
2e5b17d0 407 * Take a shared lock on the device node; this establishes
3ebdb81e 408 * a concept of device "ownership" to serialize device
2e5b17d0 409 * access. External processes holding an exclusive lock will
3ebdb81e 410 * cause udev to skip the event handling; in the case udev
2e5b17d0 411 * acquired the lock, the external process can block until
3ebdb81e
KS
412 * udev has finished its event handling.
413 */
2e5b17d0
KS
414 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
415 streq_ptr("block", udev_device_get_subsystem(dev)) &&
416 !startswith(udev_device_get_sysname(dev), "dm-") &&
417 !startswith(udev_device_get_sysname(dev), "md")) {
3ebdb81e
KS
418 struct udev_device *d = dev;
419
420 if (streq_ptr("partition", udev_device_get_devtype(d)))
421 d = udev_device_get_parent(d);
422
423 if (d) {
424 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
425 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 426 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 427 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
428 goto skip;
429 }
430 }
431 }
432
4c83d994
TG
433 /* needed for renaming netifs */
434 udev_event->rtnl = rtnl;
435
912541b0 436 /* apply rules, create node, symlinks */
adeba500
KS
437 udev_event_execute_rules(udev_event,
438 arg_event_timeout_usec, arg_event_timeout_warn_usec,
c0c6806b 439 &manager->properties,
8314de1d 440 manager->rules);
adeba500
KS
441
442 udev_event_execute_run(udev_event,
8314de1d 443 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 444
523c620b
TG
445 if (udev_event->rtnl)
446 /* in case rtnl was initialized */
1c4baffc 447 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 448
912541b0 449 /* apply/restore inotify watch */
bf9bead1 450 if (udev_event->inotify_watch) {
912541b0
KS
451 udev_watch_begin(udev, dev);
452 udev_device_update_db(dev);
453 }
454
3d06f418 455 safe_close(fd_lock);
3ebdb81e 456
912541b0
KS
457 /* send processed event back to libudev listeners */
458 udev_monitor_send_device(worker_monitor, NULL, dev);
459
3ebdb81e 460skip:
4914cb2d 461 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 462
912541b0 463 /* send udevd the result of the event execution */
e237d8cb 464 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 465 if (r < 0)
9a73bd7c 466 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 467 udev_device_get_seqnum(dev));
912541b0
KS
468
469 udev_device_unref(dev);
470 dev = NULL;
471
73814ca2 472 udev_event_unref(udev_event);
47e737dc 473
912541b0
KS
474 /* wait for more device messages from main udevd, or term signal */
475 while (dev == NULL) {
476 struct epoll_event ev[4];
477 int fdcount;
478 int i;
479
8fef0ff2 480 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
481 if (fdcount < 0) {
482 if (errno == EINTR)
483 continue;
6af5e6a4 484 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
485 goto out;
486 }
487
488 for (i = 0; i < fdcount; i++) {
489 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
490 dev = udev_monitor_receive_device(worker_monitor);
491 break;
492 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
493 struct signalfd_siginfo fdsi;
494 ssize_t size;
495
496 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
497 if (size != sizeof(struct signalfd_siginfo))
498 continue;
499 switch (fdsi.ssi_signo) {
500 case SIGTERM:
501 goto out;
502 }
503 }
504 }
505 }
506 }
82063a88 507out:
912541b0 508 udev_device_unref(dev);
e237d8cb 509 manager_free(manager);
baa30fbc 510 log_close();
8b46c3fc 511 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
512 }
513 case -1:
912541b0 514 event->state = EVENT_QUEUED;
56f64d95 515 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
516 break;
517 default:
e03c7cc2
TG
518 {
519 struct worker *worker;
520
c0c6806b 521 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 522 if (r < 0)
e03c7cc2 523 return;
e03c7cc2 524
39c19cf1
TG
525 worker_attach_event(worker, event);
526
1fa2f38f 527 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
528 break;
529 }
e03c7cc2 530 }
7fafc032
KS
531}
532
c0c6806b 533static void event_run(Manager *manager, struct event *event) {
a505965d
TG
534 struct worker *worker;
535 Iterator i;
912541b0 536
c0c6806b
TG
537 assert(manager);
538 assert(event);
539
540 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
541 ssize_t count;
542
543 if (worker->state != WORKER_IDLE)
544 continue;
545
c0c6806b 546 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 547 if (count < 0) {
1fa2f38f
ZJS
548 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
549 worker->pid, count);
912541b0
KS
550 kill(worker->pid, SIGKILL);
551 worker->state = WORKER_KILLED;
552 continue;
553 }
39c19cf1 554 worker_attach_event(worker, event);
912541b0
KS
555 return;
556 }
557
c0c6806b 558 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 559 if (arg_children_max > 1)
c0c6806b 560 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
561 return;
562 }
563
564 /* start new worker and pass initial device */
c0c6806b 565 worker_spawn(manager, event);
1e03b754
KS
566}
567
ecb17862 568static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 569 struct event *event;
cb49a4f2 570 int r;
912541b0 571
ecb17862
TG
572 assert(manager);
573 assert(dev);
574
040e6896
TG
575 /* only one process can add events to the queue */
576 if (manager->pid == 0)
df0ff127 577 manager->pid = getpid_cached();
040e6896 578
df0ff127 579 assert(manager->pid == getpid_cached());
cb49a4f2 580
955d98c9 581 event = new0(struct event, 1);
cb49a4f2
TG
582 if (!event)
583 return -ENOMEM;
912541b0
KS
584
585 event->udev = udev_device_get_udev(dev);
cb49a4f2 586 event->manager = manager;
912541b0 587 event->dev = dev;
6969c349
TG
588 event->dev_kernel = udev_device_shallow_clone(dev);
589 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
590 event->seqnum = udev_device_get_seqnum(dev);
591 event->devpath = udev_device_get_devpath(dev);
592 event->devpath_len = strlen(event->devpath);
593 event->devpath_old = udev_device_get_devpath_old(dev);
594 event->devnum = udev_device_get_devnum(dev);
ea6039a3 595 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
596 event->ifindex = udev_device_get_ifindex(dev);
597
9f6445e3 598 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
599 udev_device_get_action(dev), udev_device_get_subsystem(dev));
600
601 event->state = EVENT_QUEUED;
cb49a4f2 602
40a57716 603 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
604 r = touch("/run/udev/queue");
605 if (r < 0)
606 log_warning_errno(r, "could not touch /run/udev/queue: %m");
607 }
608
40a57716 609 LIST_APPEND(event, manager->events, event);
cb49a4f2 610
912541b0 611 return 0;
fc465079
KS
612}
613
c0c6806b 614static void manager_kill_workers(Manager *manager) {
a505965d
TG
615 struct worker *worker;
616 Iterator i;
1e03b754 617
c0c6806b
TG
618 assert(manager);
619
620 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
621 if (worker->state == WORKER_KILLED)
622 continue;
1e03b754 623
912541b0
KS
624 worker->state = WORKER_KILLED;
625 kill(worker->pid, SIGTERM);
626 }
1e03b754
KS
627}
628
e3196993 629/* lookup event for identical, parent, child device */
ecb17862 630static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 631 struct event *loop_event;
912541b0
KS
632 size_t common;
633
634 /* check if queue contains events we depend on */
40a57716 635 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 636 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
637 if (loop_event->seqnum < event->delaying_seqnum)
638 continue;
639
640 /* event we checked earlier still exists, no need to check again */
641 if (loop_event->seqnum == event->delaying_seqnum)
642 return true;
643
644 /* found ourself, no later event can block us */
645 if (loop_event->seqnum >= event->seqnum)
646 break;
647
648 /* check major/minor */
649 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
650 return true;
651
652 /* check network device ifindex */
653 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
654 return true;
655
656 /* check our old name */
090be865 657 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
658 event->delaying_seqnum = loop_event->seqnum;
659 return true;
660 }
661
662 /* compare devpath */
663 common = MIN(loop_event->devpath_len, event->devpath_len);
664
665 /* one devpath is contained in the other? */
666 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
667 continue;
668
669 /* identical device event found */
670 if (loop_event->devpath_len == event->devpath_len) {
671 /* devices names might have changed/swapped in the meantime */
672 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
673 continue;
674 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
675 continue;
676 event->delaying_seqnum = loop_event->seqnum;
677 return true;
678 }
679
680 /* parent device event found */
681 if (event->devpath[common] == '/') {
682 event->delaying_seqnum = loop_event->seqnum;
683 return true;
684 }
685
686 /* child device event found */
687 if (loop_event->devpath[common] == '/') {
688 event->delaying_seqnum = loop_event->seqnum;
689 return true;
690 }
691
692 /* no matching device */
693 continue;
694 }
695
696 return false;
7fafc032
KS
697}
698
693d371d
TG
699static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
700 Manager *manager = userdata;
701
702 assert(manager);
703
704 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
705
706 sd_event_exit(manager->event, -ETIMEDOUT);
707
708 return 1;
709}
710
62d43dac 711static void manager_exit(Manager *manager) {
693d371d
TG
712 uint64_t usec;
713 int r;
62d43dac
TG
714
715 assert(manager);
716
717 manager->exit = true;
718
b79aacbf
TG
719 sd_notify(false,
720 "STOPPING=1\n"
721 "STATUS=Starting shutdown...");
722
62d43dac 723 /* close sources of new events and discard buffered events */
693d371d 724 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 725 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 726
693d371d 727 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 728 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 729
693d371d 730 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 731 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
732
733 /* discard queued events and kill workers */
734 event_queue_cleanup(manager, EVENT_QUEUED);
735 manager_kill_workers(manager);
693d371d 736
3285baa8 737 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 738
3285baa8 739 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
740 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
741 if (r < 0)
742 return;
62d43dac
TG
743}
744
745/* reload requested, HUP signal received, rules changed, builtin changed */
746static void manager_reload(Manager *manager) {
747
748 assert(manager);
749
b79aacbf
TG
750 sd_notify(false,
751 "RELOADING=1\n"
752 "STATUS=Flushing configuration...");
753
62d43dac
TG
754 manager_kill_workers(manager);
755 manager->rules = udev_rules_unref(manager->rules);
756 udev_builtin_exit(manager->udev);
b79aacbf 757
1ef72b55
MS
758 sd_notifyf(false,
759 "READY=1\n"
760 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
761}
762
c0c6806b 763static void event_queue_start(Manager *manager) {
40a57716 764 struct event *event;
693d371d 765 usec_t usec;
8ab44e3f 766
c0c6806b
TG
767 assert(manager);
768
40a57716 769 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
770 manager->exit || manager->stop_exec_queue)
771 return;
772
3285baa8 773 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
774 /* check for changed config, every 3 seconds at most */
775 if (manager->last_usec == 0 ||
776 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
777 if (udev_rules_check_timestamp(manager->rules) ||
778 udev_builtin_validate(manager->udev))
779 manager_reload(manager);
693d371d 780
38a03f06 781 manager->last_usec = usec;
7c4c7e89
TG
782 }
783
784 udev_builtin_init(manager->udev);
785
786 if (!manager->rules) {
787 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
788 if (!manager->rules)
789 return;
790 }
791
40a57716 792 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
793 if (event->state != EVENT_QUEUED)
794 continue;
0bc74ea7 795
912541b0 796 /* do not start event if parent or child event is still running */
ecb17862 797 if (is_devpath_busy(manager, event))
912541b0 798 continue;
fc465079 799
c0c6806b 800 event_run(manager, event);
912541b0 801 }
1e03b754
KS
802}
803
ecb17862 804static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 805 struct event *event, *tmp;
ff2c503d 806
40a57716 807 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
808 if (match_type != EVENT_UNDEF && match_type != event->state)
809 continue;
ff2c503d 810
c6aa11f2 811 event_free(event);
912541b0 812 }
ff2c503d
KS
813}
814
e82e8fa5 815static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
816 Manager *manager = userdata;
817
818 assert(manager);
819
912541b0
KS
820 for (;;) {
821 struct worker_message msg;
979558f3
TG
822 struct iovec iovec = {
823 .iov_base = &msg,
824 .iov_len = sizeof(msg),
825 };
826 union {
827 struct cmsghdr cmsghdr;
828 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
829 } control = {};
830 struct msghdr msghdr = {
831 .msg_iov = &iovec,
832 .msg_iovlen = 1,
833 .msg_control = &control,
834 .msg_controllen = sizeof(control),
835 };
836 struct cmsghdr *cmsg;
912541b0 837 ssize_t size;
979558f3 838 struct ucred *ucred = NULL;
a505965d 839 struct worker *worker;
912541b0 840
e82e8fa5 841 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 842 if (size < 0) {
738a7907
TG
843 if (errno == EINTR)
844 continue;
845 else if (errno == EAGAIN)
846 /* nothing more to read */
847 break;
979558f3 848
e82e8fa5 849 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
850 } else if (size != sizeof(struct worker_message)) {
851 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 852 continue;
979558f3
TG
853 }
854
2a1288ff 855 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
856 if (cmsg->cmsg_level == SOL_SOCKET &&
857 cmsg->cmsg_type == SCM_CREDENTIALS &&
858 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
859 ucred = (struct ucred*) CMSG_DATA(cmsg);
860 }
861
862 if (!ucred || ucred->pid <= 0) {
863 log_warning_errno(EIO, "ignoring worker message without valid PID");
864 continue;
865 }
912541b0
KS
866
867 /* lookup worker who sent the signal */
4a0b58c4 868 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
869 if (!worker) {
870 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
871 continue;
912541b0 872 }
c0bbfd72 873
a505965d
TG
874 if (worker->state != WORKER_KILLED)
875 worker->state = WORKER_IDLE;
876
877 /* worker returned */
878 event_free(worker->event);
912541b0 879 }
e82e8fa5 880
8302fe5a
TG
881 /* we have free workers, try to schedule events */
882 event_queue_start(manager);
883
e82e8fa5
TG
884 return 1;
885}
886
887static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 888 Manager *manager = userdata;
e82e8fa5
TG
889 struct udev_device *dev;
890 int r;
891
c0c6806b 892 assert(manager);
e82e8fa5 893
c0c6806b 894 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
895 if (dev) {
896 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 897 r = event_queue_insert(manager, dev);
e82e8fa5
TG
898 if (r < 0)
899 udev_device_unref(dev);
8302fe5a
TG
900 else
901 /* we have fresh events, try to schedule them */
902 event_queue_start(manager);
e82e8fa5
TG
903 }
904
905 return 1;
88f4b648
KS
906}
907
3b47c739 908/* receive the udevd message from userspace */
e82e8fa5 909static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 910 Manager *manager = userdata;
8e766630
LP
911 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
912 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0
KS
913 const char *str;
914 int i;
915
c0c6806b 916 assert(manager);
e4f66b77 917
c0c6806b 918 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 919 if (!ctrl_conn)
e82e8fa5 920 return 1;
912541b0
KS
921
922 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 923 if (!ctrl_msg)
e82e8fa5 924 return 1;
912541b0
KS
925
926 i = udev_ctrl_get_set_log_level(ctrl_msg);
927 if (i >= 0) {
ed14edc0 928 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 929 log_set_max_level(i);
c0c6806b 930 manager_kill_workers(manager);
912541b0
KS
931 }
932
933 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 934 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 935 manager->stop_exec_queue = true;
912541b0
KS
936 }
937
938 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 939 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 940 manager->stop_exec_queue = false;
8302fe5a 941 event_queue_start(manager);
912541b0
KS
942 }
943
944 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 945 log_debug("udevd message (RELOAD) received");
62d43dac 946 manager_reload(manager);
912541b0
KS
947 }
948
949 str = udev_ctrl_get_set_env(ctrl_msg);
950 if (str != NULL) {
c0c6806b 951 _cleanup_free_ char *key = NULL;
912541b0
KS
952
953 key = strdup(str);
c0c6806b 954 if (key) {
912541b0
KS
955 char *val;
956
957 val = strchr(key, '=');
958 if (val != NULL) {
959 val[0] = '\0';
960 val = &val[1];
961 if (val[0] == '\0') {
9f6445e3 962 log_debug("udevd message (ENV) received, unset '%s'", key);
c0c6806b 963 udev_list_entry_add(&manager->properties, key, NULL);
912541b0 964 } else {
9f6445e3 965 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
c0c6806b 966 udev_list_entry_add(&manager->properties, key, val);
912541b0 967 }
c0c6806b 968 } else
9f6445e3 969 log_error("wrong key format '%s'", key);
912541b0 970 }
c0c6806b 971 manager_kill_workers(manager);
912541b0
KS
972 }
973
974 i = udev_ctrl_get_set_children_max(ctrl_msg);
975 if (i >= 0) {
9f6445e3 976 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 977 arg_children_max = i;
1ef72b55
MS
978
979 (void) sd_notifyf(false,
980 "READY=1\n"
981 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
982 }
983
cb49a4f2 984 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 985 log_debug("udevd message (SYNC) received");
912541b0
KS
986
987 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 988 log_debug("udevd message (EXIT) received");
62d43dac 989 manager_exit(manager);
c0c6806b
TG
990 /* keep reference to block the client until we exit
991 TODO: deal with several blocking exit requests */
992 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 993 }
e4f66b77 994
e82e8fa5 995 return 1;
88f4b648 996}
4a231017 997
f3a740a5 998static int synthesize_change(struct udev_device *dev) {
edd32000 999 char filename[UTIL_PATH_SIZE];
f3a740a5 1000 int r;
edd32000 1001
f3a740a5 1002 if (streq_ptr("block", udev_device_get_subsystem(dev)) &&
ede34445 1003 streq_ptr("disk", udev_device_get_devtype(dev)) &&
638ca89c 1004 !startswith(udev_device_get_sysname(dev), "dm-")) {
e9fc29f4
KS
1005 bool part_table_read = false;
1006 bool has_partitions = false;
ede34445 1007 int fd;
f3a740a5 1008 struct udev *udev = udev_device_get_udev(dev);
8e766630 1009 _cleanup_(udev_enumerate_unrefp) struct udev_enumerate *e = NULL;
f3a740a5
KS
1010 struct udev_list_entry *item;
1011
ede34445 1012 /*
e9fc29f4
KS
1013 * Try to re-read the partition table. This only succeeds if
1014 * none of the devices is busy. The kernel returns 0 if no
1015 * partition table is found, and we will not get an event for
1016 * the disk.
ede34445 1017 */
02ba8fb3 1018 fd = open(udev_device_get_devnode(dev), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1019 if (fd >= 0) {
02ba8fb3
KS
1020 r = flock(fd, LOCK_EX|LOCK_NB);
1021 if (r >= 0)
1022 r = ioctl(fd, BLKRRPART, 0);
1023
ede34445
KS
1024 close(fd);
1025 if (r >= 0)
e9fc29f4 1026 part_table_read = true;
ede34445
KS
1027 }
1028
e9fc29f4 1029 /* search for partitions */
f3a740a5
KS
1030 e = udev_enumerate_new(udev);
1031 if (!e)
1032 return -ENOMEM;
1033
1034 r = udev_enumerate_add_match_parent(e, dev);
1035 if (r < 0)
1036 return r;
1037
1038 r = udev_enumerate_add_match_subsystem(e, "block");
1039 if (r < 0)
1040 return r;
1041
1042 r = udev_enumerate_scan_devices(e);
47a3fa0f
TA
1043 if (r < 0)
1044 return r;
e9fc29f4
KS
1045
1046 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
8e766630 1047 _cleanup_(udev_device_unrefp) struct udev_device *d = NULL;
e9fc29f4
KS
1048
1049 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1050 if (!d)
1051 continue;
1052
1053 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1054 continue;
1055
1056 has_partitions = true;
1057 break;
1058 }
1059
1060 /*
1061 * We have partitions and re-read the table, the kernel already sent
1062 * out a "change" event for the disk, and "remove/add" for all
1063 * partitions.
1064 */
1065 if (part_table_read && has_partitions)
1066 return 0;
1067
1068 /*
1069 * We have partitions but re-reading the partition table did not
1070 * work, synthesize "change" for the disk and all partitions.
1071 */
1072 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1073 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1074 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1075
f3a740a5 1076 udev_list_entry_foreach(item, udev_enumerate_get_list_entry(e)) {
8e766630 1077 _cleanup_(udev_device_unrefp) struct udev_device *d = NULL;
f3a740a5
KS
1078
1079 d = udev_device_new_from_syspath(udev, udev_list_entry_get_name(item));
1080 if (!d)
1081 continue;
1082
1083 if (!streq_ptr("partition", udev_device_get_devtype(d)))
1084 continue;
1085
1086 log_debug("device %s closed, synthesising partition '%s' 'change'",
1087 udev_device_get_devnode(dev), udev_device_get_devnode(d));
1088 strscpyl(filename, sizeof(filename), udev_device_get_syspath(d), "/uevent", NULL);
4c1fc3e4 1089 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1090 }
ede34445
KS
1091
1092 return 0;
f3a740a5
KS
1093 }
1094
ede34445
KS
1095 log_debug("device %s closed, synthesising 'change'", udev_device_get_devnode(dev));
1096 strscpyl(filename, sizeof(filename), udev_device_get_syspath(dev), "/uevent", NULL);
4c1fc3e4 1097 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1098
f3a740a5 1099 return 0;
edd32000
KS
1100}
1101
e82e8fa5 1102static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1103 Manager *manager = userdata;
0254e944 1104 union inotify_event_buffer buffer;
f7c1ad4f
LP
1105 struct inotify_event *e;
1106 ssize_t l;
912541b0 1107
c0c6806b 1108 assert(manager);
e82e8fa5
TG
1109
1110 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1111 if (l < 0) {
3742095b 1112 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1113 return 1;
912541b0 1114
f7c1ad4f 1115 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1116 }
1117
f7c1ad4f 1118 FOREACH_INOTIFY_EVENT(e, buffer, l) {
8e766630 1119 _cleanup_(udev_device_unrefp) struct udev_device *dev = NULL;
912541b0 1120
c0c6806b 1121 dev = udev_watch_lookup(manager->udev, e->wd);
edd32000
KS
1122 if (!dev)
1123 continue;
912541b0 1124
f7c1ad4f 1125 log_debug("inotify event: %x for %s", e->mask, udev_device_get_devnode(dev));
a8389097 1126 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1127 synthesize_change(dev);
a8389097
TG
1128
1129 /* settle might be waiting on us to determine the queue
1130 * state. If we just handled an inotify event, we might have
1131 * generated a "change" event, but we won't have queued up
1132 * the resultant uevent yet. Do that.
1133 */
c0c6806b 1134 on_uevent(NULL, -1, 0, manager);
a8389097 1135 } else if (e->mask & IN_IGNORED)
c0c6806b 1136 udev_watch_end(manager->udev, dev);
912541b0
KS
1137 }
1138
e82e8fa5 1139 return 1;
bd284db1
SJR
1140}
1141
0561329d 1142static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1143 Manager *manager = userdata;
1144
1145 assert(manager);
1146
62d43dac 1147 manager_exit(manager);
912541b0 1148
e82e8fa5
TG
1149 return 1;
1150}
912541b0 1151
0561329d 1152static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1153 Manager *manager = userdata;
1154
1155 assert(manager);
1156
62d43dac 1157 manager_reload(manager);
912541b0 1158
e82e8fa5
TG
1159 return 1;
1160}
912541b0 1161
e82e8fa5 1162static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1163 Manager *manager = userdata;
1164
1165 assert(manager);
1166
e82e8fa5
TG
1167 for (;;) {
1168 pid_t pid;
1169 int status;
1170 struct worker *worker;
d1317d02 1171
e82e8fa5
TG
1172 pid = waitpid(-1, &status, WNOHANG);
1173 if (pid <= 0)
f29328d6 1174 break;
e82e8fa5 1175
4a0b58c4 1176 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1177 if (!worker) {
1178 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1179 continue;
912541b0 1180 }
e82e8fa5
TG
1181
1182 if (WIFEXITED(status)) {
1183 if (WEXITSTATUS(status) == 0)
1184 log_debug("worker ["PID_FMT"] exited", pid);
1185 else
1186 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1187 } else if (WIFSIGNALED(status)) {
76341acc 1188 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1189 } else if (WIFSTOPPED(status)) {
1190 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1191 continue;
e82e8fa5
TG
1192 } else if (WIFCONTINUED(status)) {
1193 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1194 continue;
e82e8fa5
TG
1195 } else
1196 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1197
1198 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1199 if (worker->event) {
1200 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1201 /* delete state from disk */
1202 udev_device_delete_db(worker->event->dev);
1203 udev_device_tag_index(worker->event->dev, NULL, false);
1204 /* forward kernel event without amending it */
c0c6806b 1205 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1206 }
1207 }
1208
1209 worker_free(worker);
912541b0 1210 }
e82e8fa5 1211
8302fe5a
TG
1212 /* we can start new workers, try to schedule events */
1213 event_queue_start(manager);
1214
e82e8fa5 1215 return 1;
f27125f9 1216}
1217
693d371d
TG
1218static int on_post(sd_event_source *s, void *userdata) {
1219 Manager *manager = userdata;
1220 int r;
1221
1222 assert(manager);
1223
40a57716 1224 if (LIST_IS_EMPTY(manager->events)) {
693d371d
TG
1225 /* no pending events */
1226 if (!hashmap_isempty(manager->workers)) {
1227 /* there are idle workers */
1228 log_debug("cleanup idle workers");
1229 manager_kill_workers(manager);
1230 } else {
1231 /* we are idle */
1232 if (manager->exit) {
1233 r = sd_event_exit(manager->event, 0);
1234 if (r < 0)
1235 return r;
1236 } else if (manager->cgroup)
1237 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1238 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1239 }
1240 }
1241
1242 return 1;
1243}
1244
fcff1e72 1245static int listen_fds(int *rctrl, int *rnetlink) {
8e766630 1246 _cleanup_(udev_unrefp) struct udev *udev = NULL;
fcff1e72 1247 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1248 int fd, n, r;
912541b0 1249
fcff1e72
TG
1250 assert(rctrl);
1251 assert(rnetlink);
1252
912541b0 1253 n = sd_listen_fds(true);
fcff1e72
TG
1254 if (n < 0)
1255 return n;
912541b0
KS
1256
1257 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1258 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1259 if (ctrl_fd >= 0)
1260 return -EINVAL;
1261 ctrl_fd = fd;
912541b0
KS
1262 continue;
1263 }
1264
1265 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1266 if (netlink_fd >= 0)
1267 return -EINVAL;
1268 netlink_fd = fd;
912541b0
KS
1269 continue;
1270 }
1271
fcff1e72 1272 return -EINVAL;
912541b0
KS
1273 }
1274
f59118ec 1275 if (ctrl_fd < 0) {
8e766630 1276 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec
TG
1277
1278 udev = udev_new();
1279 if (!udev)
1280 return -ENOMEM;
1281
1282 ctrl = udev_ctrl_new(udev);
1283 if (!ctrl)
1284 return log_error_errno(EINVAL, "error initializing udev control socket");
1285
1286 r = udev_ctrl_enable_receiving(ctrl);
1287 if (r < 0)
1288 return log_error_errno(EINVAL, "error binding udev control socket");
1289
1290 fd = udev_ctrl_get_fd(ctrl);
1291 if (fd < 0)
1292 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1293
f59118ec
TG
1294 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1295 if (ctrl_fd < 0)
1296 return log_error_errno(errno, "could not dup ctrl fd: %m");
1297 }
1298
1299 if (netlink_fd < 0) {
8e766630 1300 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec
TG
1301
1302 if (!udev) {
1303 udev = udev_new();
1304 if (!udev)
1305 return -ENOMEM;
1306 }
1307
1308 monitor = udev_monitor_new_from_netlink(udev, "kernel");
1309 if (!monitor)
1310 return log_error_errno(EINVAL, "error initializing netlink socket");
1311
1312 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1313
1314 r = udev_monitor_enable_receiving(monitor);
1315 if (r < 0)
1316 return log_error_errno(EINVAL, "error binding netlink socket");
1317
1318 fd = udev_monitor_get_fd(monitor);
1319 if (fd < 0)
1320 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1321
1322 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1323 if (netlink_fd < 0)
f59118ec
TG
1324 return log_error_errno(errno, "could not dup netlink fd: %m");
1325 }
fcff1e72
TG
1326
1327 *rctrl = ctrl_fd;
1328 *rnetlink = netlink_fd;
912541b0 1329
912541b0 1330 return 0;
7459bcdc
KS
1331}
1332
e6f86cac 1333/*
3f85ef0f 1334 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1335 * udev.log_priority=<level> syslog priority
1336 * udev.children_max=<number of workers> events are fully serialized if set to 1
1337 * udev.exec_delay=<number of seconds> delay execution of every executed program
1338 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1339 */
96287a49 1340static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1341 int r = 0;
e6f86cac 1342
614a823c 1343 assert(key);
e6f86cac 1344
614a823c
TG
1345 if (!value)
1346 return 0;
e6f86cac 1347
1d84ad94
LP
1348 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1349
1350 if (proc_cmdline_value_missing(key, value))
1351 return 0;
1352
92e72467
ZJS
1353 r = util_log_priority(value);
1354 if (r >= 0)
1355 log_set_max_level(r);
1d84ad94
LP
1356
1357 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1358
1359 if (proc_cmdline_value_missing(key, value))
1360 return 0;
1361
92e72467
ZJS
1362 r = safe_atou64(value, &arg_event_timeout_usec);
1363 if (r >= 0) {
1364 arg_event_timeout_usec *= USEC_PER_SEC;
1365 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1366 }
1d84ad94
LP
1367
1368 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1369
1370 if (proc_cmdline_value_missing(key, value))
1371 return 0;
1372
020328e1 1373 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1374
1375 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1376
1377 if (proc_cmdline_value_missing(key, value))
1378 return 0;
1379
614a823c 1380 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1381
1382 } else if (startswith(key, "udev."))
92e72467 1383 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1384
92e72467
ZJS
1385 if (r < 0)
1386 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1387
614a823c 1388 return 0;
e6f86cac
KS
1389}
1390
ed216e1f
TG
1391static void help(void) {
1392 printf("%s [OPTIONS...]\n\n"
1393 "Manages devices.\n\n"
5ac0162c 1394 " -h --help Print this message\n"
2d19c17e
MF
1395 " -V --version Print version of the program\n"
1396 " -d --daemon Detach and run in the background\n"
1397 " -D --debug Enable debug output\n"
1398 " -c --children-max=INT Set maximum number of workers\n"
1399 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1400 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1401 " -N --resolve-names=early|late|never\n"
5ac0162c 1402 " When to resolve users and groups\n"
ed216e1f
TG
1403 , program_invocation_short_name);
1404}
1405
bba7a484 1406static int parse_argv(int argc, char *argv[]) {
912541b0 1407 static const struct option options[] = {
bba7a484
TG
1408 { "daemon", no_argument, NULL, 'd' },
1409 { "debug", no_argument, NULL, 'D' },
1410 { "children-max", required_argument, NULL, 'c' },
1411 { "exec-delay", required_argument, NULL, 'e' },
1412 { "event-timeout", required_argument, NULL, 't' },
1413 { "resolve-names", required_argument, NULL, 'N' },
1414 { "help", no_argument, NULL, 'h' },
1415 { "version", no_argument, NULL, 'V' },
912541b0
KS
1416 {}
1417 };
689a97f5 1418
bba7a484 1419 int c;
689a97f5 1420
bba7a484
TG
1421 assert(argc >= 0);
1422 assert(argv);
912541b0 1423
e14b6f21 1424 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1425 int r;
912541b0 1426
bba7a484 1427 switch (c) {
912541b0 1428
912541b0 1429 case 'd':
bba7a484 1430 arg_daemonize = true;
912541b0
KS
1431 break;
1432 case 'c':
020328e1 1433 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1434 if (r < 0)
1435 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1436 break;
1437 case 'e':
6f5cf8a8
TG
1438 r = safe_atoi(optarg, &arg_exec_delay);
1439 if (r < 0)
1440 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1441 break;
9719859c 1442 case 't':
f1e8664e
TG
1443 r = safe_atou64(optarg, &arg_event_timeout_usec);
1444 if (r < 0)
65fea570 1445 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1446 else {
1447 arg_event_timeout_usec *= USEC_PER_SEC;
1448 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1449 }
9719859c 1450 break;
912541b0 1451 case 'D':
bba7a484 1452 arg_debug = true;
912541b0
KS
1453 break;
1454 case 'N':
090be865 1455 if (streq(optarg, "early")) {
bba7a484 1456 arg_resolve_names = 1;
090be865 1457 } else if (streq(optarg, "late")) {
bba7a484 1458 arg_resolve_names = 0;
090be865 1459 } else if (streq(optarg, "never")) {
bba7a484 1460 arg_resolve_names = -1;
912541b0 1461 } else {
9f6445e3 1462 log_error("resolve-names must be early, late or never");
bba7a484 1463 return 0;
912541b0
KS
1464 }
1465 break;
1466 case 'h':
ed216e1f 1467 help();
bba7a484 1468 return 0;
912541b0 1469 case 'V':
948aaa7c 1470 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1471 return 0;
1472 case '?':
1473 return -EINVAL;
912541b0 1474 default:
bba7a484
TG
1475 assert_not_reached("Unhandled option");
1476
912541b0
KS
1477 }
1478 }
1479
bba7a484
TG
1480 return 1;
1481}
1482
b7f74dd4 1483static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1484 _cleanup_(manager_freep) Manager *manager = NULL;
11b1dd8c 1485 int r, fd_worker, one = 1;
c0c6806b
TG
1486
1487 assert(ret);
11b1dd8c
TG
1488 assert(fd_ctrl >= 0);
1489 assert(fd_uevent >= 0);
c0c6806b
TG
1490
1491 manager = new0(Manager, 1);
1492 if (!manager)
1493 return log_oom();
1494
e237d8cb
TG
1495 manager->fd_inotify = -1;
1496 manager->worker_watch[WRITE_END] = -1;
1497 manager->worker_watch[READ_END] = -1;
1498
c0c6806b
TG
1499 manager->udev = udev_new();
1500 if (!manager->udev)
1501 return log_error_errno(errno, "could not allocate udev context: %m");
1502
b2d21d93
TG
1503 udev_builtin_init(manager->udev);
1504
ecb17862
TG
1505 manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
1506 if (!manager->rules)
1507 return log_error_errno(ENOMEM, "error reading rules");
1508
40a57716 1509 LIST_HEAD_INIT(manager->events);
ecb17862
TG
1510 udev_list_init(manager->udev, &manager->properties, true);
1511
c26d1879
TG
1512 manager->cgroup = cgroup;
1513
f59118ec
TG
1514 manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
1515 if (!manager->ctrl)
1516 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1517
f59118ec
TG
1518 manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
1519 if (!manager->monitor)
1520 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1521
1522 /* unnamed socket from workers to the main daemon */
1523 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1524 if (r < 0)
1525 return log_error_errno(errno, "error creating socketpair: %m");
1526
693d371d 1527 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1528
693d371d 1529 r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
e237d8cb
TG
1530 if (r < 0)
1531 return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
1532
1533 manager->fd_inotify = udev_watch_init(manager->udev);
1534 if (manager->fd_inotify < 0)
1535 return log_error_errno(ENOMEM, "error initializing inotify");
1536
1537 udev_watch_restore(manager->udev);
1538
1539 /* block and listen to all signals on signalfd */
72c0a2c2 1540 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1541
49f997f3
TG
1542 r = sd_event_default(&manager->event);
1543 if (r < 0)
709f6e46 1544 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1545
693d371d
TG
1546 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1547 if (r < 0)
1548 return log_error_errno(r, "error creating sigint event source: %m");
1549
1550 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1551 if (r < 0)
1552 return log_error_errno(r, "error creating sigterm event source: %m");
1553
1554 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1555 if (r < 0)
1556 return log_error_errno(r, "error creating sighup event source: %m");
1557
1558 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1559 if (r < 0)
1560 return log_error_errno(r, "error creating sigchld event source: %m");
1561
1562 r = sd_event_set_watchdog(manager->event, true);
1563 if (r < 0)
1564 return log_error_errno(r, "error creating watchdog event source: %m");
1565
11b1dd8c 1566 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1567 if (r < 0)
1568 return log_error_errno(r, "error creating ctrl event source: %m");
1569
1570 /* This needs to be after the inotify and uevent handling, to make sure
1571 * that the ping is send back after fully processing the pending uevents
1572 * (including the synthetic ones we may create due to inotify events).
1573 */
1574 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1575 if (r < 0)
1576 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1577
1578 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1579 if (r < 0)
1580 return log_error_errno(r, "error creating inotify event source: %m");
1581
11b1dd8c 1582 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1583 if (r < 0)
1584 return log_error_errno(r, "error creating uevent event source: %m");
1585
1586 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1587 if (r < 0)
1588 return log_error_errno(r, "error creating worker event source: %m");
1589
1590 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1591 if (r < 0)
1592 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1593
1cc6c93a 1594 *ret = TAKE_PTR(manager);
11b1dd8c 1595
86c3bece 1596 return 0;
c0c6806b
TG
1597}
1598
077fc5e2 1599static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1600 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1601 int r;
1602
1603 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1604 if (r < 0) {
1605 r = log_error_errno(r, "failed to allocate manager object: %m");
1606 goto exit;
1607 }
1608
1609 r = udev_rules_apply_static_dev_perms(manager->rules);
1610 if (r < 0)
1611 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1612
1ef72b55
MS
1613 (void) sd_notifyf(false,
1614 "READY=1\n"
1615 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1616
1617 r = sd_event_loop(manager->event);
1618 if (r < 0) {
1619 log_error_errno(r, "event loop failed: %m");
1620 goto exit;
1621 }
1622
1623 sd_event_get_exit_code(manager->event, &r);
1624
1625exit:
1626 sd_notify(false,
1627 "STOPPING=1\n"
1628 "STATUS=Shutting down...");
1629 if (manager)
1630 udev_ctrl_cleanup(manager->ctrl);
1631 return r;
1632}
1633
1634int main(int argc, char *argv[]) {
c26d1879 1635 _cleanup_free_ char *cgroup = NULL;
efa1606e 1636 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1637 int r;
bba7a484 1638
bba7a484 1639 log_set_target(LOG_TARGET_AUTO);
b237a168 1640 udev_parse_config();
bba7a484
TG
1641 log_parse_environment();
1642 log_open();
1643
bba7a484
TG
1644 r = parse_argv(argc, argv);
1645 if (r <= 0)
1646 goto exit;
1647
1d84ad94 1648 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1649 if (r < 0)
1650 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1651
78d3e041
KS
1652 if (arg_debug) {
1653 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1654 log_set_max_level(LOG_DEBUG);
78d3e041 1655 }
bba7a484 1656
fba868fa
LP
1657 r = must_be_root();
1658 if (r < 0)
912541b0 1659 goto exit;
912541b0 1660
712cebf1
TG
1661 if (arg_children_max == 0) {
1662 cpu_set_t cpu_set;
e438c57a 1663 unsigned long mem_limit;
ebc164ef 1664
712cebf1 1665 arg_children_max = 8;
d457ff83 1666
ece174c5 1667 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1668 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1669
e438c57a
MW
1670 mem_limit = physical_memory() / (128LU*1024*1024);
1671 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1672
712cebf1 1673 log_debug("set children_max to %u", arg_children_max);
d457ff83 1674 }
912541b0 1675
712cebf1
TG
1676 /* set umask before creating any file/directory */
1677 r = chdir("/");
1678 if (r < 0) {
1679 r = log_error_errno(errno, "could not change dir to /: %m");
1680 goto exit;
1681 }
194bbe33 1682
712cebf1 1683 umask(022);
912541b0 1684
c3dacc8b 1685 r = mac_selinux_init();
712cebf1
TG
1686 if (r < 0) {
1687 log_error_errno(r, "could not initialize labelling: %m");
1688 goto exit;
912541b0
KS
1689 }
1690
dae8b82e
ZJS
1691 r = mkdir_errno_wrapper("/run/udev", 0755);
1692 if (r < 0 && r != -EEXIST) {
1693 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1694 goto exit;
1695 }
1696
03cfe0d5 1697 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1698
c26d1879
TG
1699 if (getppid() == 1) {
1700 /* get our own cgroup, we regularly kill everything udev has left behind
1701 we only do this on systemd systems, and only if we are directly spawned
1702 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1703 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1704 if (r < 0) {
a2d61f07 1705 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1706 log_debug_errno(r, "did not find dedicated cgroup: %m");
1707 else
1708 log_warning_errno(r, "failed to get cgroup: %m");
1709 }
c26d1879
TG
1710 }
1711
b7f74dd4
TG
1712 r = listen_fds(&fd_ctrl, &fd_uevent);
1713 if (r < 0) {
1714 r = log_error_errno(r, "could not listen on fds: %m");
1715 goto exit;
1716 }
1717
bba7a484 1718 if (arg_daemonize) {
912541b0 1719 pid_t pid;
912541b0 1720
948aaa7c 1721 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1722
40e749b5 1723 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1724 if (log_get_max_level() < LOG_DEBUG) {
1725 r = make_null_stdio();
1726 if (r < 0)
1727 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1728 }
1729
912541b0
KS
1730 pid = fork();
1731 switch (pid) {
1732 case 0:
1733 break;
1734 case -1:
6af5e6a4 1735 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1736 goto exit;
1737 default:
f53d1fcd
TG
1738 mac_selinux_finish();
1739 log_close();
1740 _exit(EXIT_SUCCESS);
912541b0
KS
1741 }
1742
1743 setsid();
1744
ad118bda 1745 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1746 }
912541b0 1747
077fc5e2 1748 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1749
53921bfa 1750exit:
cc56fafe 1751 mac_selinux_finish();
baa30fbc 1752 log_close();
6af5e6a4 1753 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1754}