]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
Merge pull request #10650 from yuwata/udevadm-trigger-use-write-string-file
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
07a26e42 55#include "udev-builtin.h"
7d68eb1b 56#include "udev-ctrl.h"
618234a5 57#include "udev-util.h"
70068602 58#include "udev-watch.h"
618234a5 59#include "udev.h"
ee104e11 60#include "user-util.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
64static int arg_resolve_names = 1;
020328e1 65static unsigned arg_children_max;
bba7a484
TG
66static int arg_exec_delay;
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
693d371d 71 sd_event *event;
c0c6806b 72 Hashmap *workers;
40a57716 73 LIST_HEAD(struct event, events);
c26d1879 74 const char *cgroup;
cb49a4f2 75 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 76
ecb17862 77 struct udev_rules *rules;
9b5150b6 78 Hashmap *properties;
c0c6806b
TG
79
80 struct udev_monitor *monitor;
81 struct udev_ctrl *ctrl;
82 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 83 int fd_inotify;
e237d8cb
TG
84 int worker_watch[2];
85
693d371d
TG
86 sd_event_source *ctrl_event;
87 sd_event_source *uevent_event;
88 sd_event_source *inotify_event;
89
7c4c7e89
TG
90 usec_t last_usec;
91
c0c6806b 92 bool stop_exec_queue:1;
c0c6806b
TG
93 bool exit:1;
94} Manager;
1e03b754 95
1e03b754 96enum event_state {
912541b0
KS
97 EVENT_UNDEF,
98 EVENT_QUEUED,
99 EVENT_RUNNING,
1e03b754
KS
100};
101
102struct event {
40a57716 103 LIST_FIELDS(struct event, event);
cb49a4f2 104 Manager *manager;
912541b0 105 struct udev_device *dev;
6969c349 106 struct udev_device *dev_kernel;
c6aa11f2 107 struct worker *worker;
912541b0 108 enum event_state state;
912541b0
KS
109 unsigned long long int delaying_seqnum;
110 unsigned long long int seqnum;
111 const char *devpath;
112 size_t devpath_len;
113 const char *devpath_old;
114 dev_t devnum;
912541b0 115 int ifindex;
ea6039a3 116 bool is_block;
693d371d
TG
117 sd_event_source *timeout_warning;
118 sd_event_source *timeout;
1e03b754
KS
119};
120
ecb17862 121static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 122
1e03b754 123enum worker_state {
912541b0
KS
124 WORKER_UNDEF,
125 WORKER_RUNNING,
126 WORKER_IDLE,
127 WORKER_KILLED,
1e03b754
KS
128};
129
130struct worker {
c0c6806b 131 Manager *manager;
912541b0
KS
132 pid_t pid;
133 struct udev_monitor *monitor;
134 enum worker_state state;
135 struct event *event;
1e03b754
KS
136};
137
138/* passed from worker to main process */
139struct worker_message {
1e03b754
KS
140};
141
c6aa11f2 142static void event_free(struct event *event) {
cb49a4f2
TG
143 int r;
144
c6aa11f2
TG
145 if (!event)
146 return;
40a57716 147 assert(event->manager);
c6aa11f2 148
40a57716 149 LIST_REMOVE(event, event->manager->events, event);
912541b0 150 udev_device_unref(event->dev);
6969c349 151 udev_device_unref(event->dev_kernel);
c6aa11f2 152
693d371d
TG
153 sd_event_source_unref(event->timeout_warning);
154 sd_event_source_unref(event->timeout);
155
c6aa11f2
TG
156 if (event->worker)
157 event->worker->event = NULL;
158
40a57716 159 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 160 /* only clean up the queue from the process that created it */
df0ff127 161 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
162 r = unlink("/run/udev/queue");
163 if (r < 0)
164 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
165 }
166 }
167
912541b0 168 free(event);
aa8734ff 169}
7a770250 170
c6aa11f2
TG
171static void worker_free(struct worker *worker) {
172 if (!worker)
173 return;
bc113de9 174
c0c6806b
TG
175 assert(worker->manager);
176
4a0b58c4 177 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 178 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
179 event_free(worker->event);
180
c6aa11f2 181 free(worker);
ff2c503d
KS
182}
183
c0c6806b 184static void manager_workers_free(Manager *manager) {
a505965d
TG
185 struct worker *worker;
186 Iterator i;
ff2c503d 187
c0c6806b
TG
188 assert(manager);
189
190 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 191 worker_free(worker);
a505965d 192
c0c6806b 193 manager->workers = hashmap_free(manager->workers);
fc465079
KS
194}
195
c0c6806b 196static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
197 _cleanup_free_ struct worker *worker = NULL;
198 int r;
3a19b32a
TG
199
200 assert(ret);
c0c6806b 201 assert(manager);
3a19b32a
TG
202 assert(worker_monitor);
203 assert(pid > 1);
204
205 worker = new0(struct worker, 1);
206 if (!worker)
207 return -ENOMEM;
208
c0c6806b 209 worker->manager = manager;
3a19b32a
TG
210 /* close monitor, but keep address around */
211 udev_monitor_disconnect(worker_monitor);
212 worker->monitor = udev_monitor_ref(worker_monitor);
213 worker->pid = pid;
a505965d 214
c0c6806b 215 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
216 if (r < 0)
217 return r;
218
4a0b58c4 219 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
220 if (r < 0)
221 return r;
222
ae2a15bc 223 *ret = TAKE_PTR(worker);
3a19b32a
TG
224
225 return 0;
226}
227
4fa4d885
TG
228static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
229 struct event *event = userdata;
230
231 assert(event);
232 assert(event->worker);
233
234 kill_and_sigcont(event->worker->pid, SIGKILL);
235 event->worker->state = WORKER_KILLED;
236
237 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
238
239 return 1;
240}
241
242static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
243 struct event *event = userdata;
244
245 assert(event);
246
247 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
248
249 return 1;
250}
251
39c19cf1 252static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
253 sd_event *e;
254 uint64_t usec;
693d371d 255
c6aa11f2 256 assert(worker);
693d371d 257 assert(worker->manager);
c6aa11f2
TG
258 assert(event);
259 assert(!event->worker);
260 assert(!worker->event);
261
39c19cf1 262 worker->state = WORKER_RUNNING;
39c19cf1
TG
263 worker->event = event;
264 event->state = EVENT_RUNNING;
c6aa11f2 265 event->worker = worker;
693d371d
TG
266
267 e = worker->manager->event;
268
3285baa8 269 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 270
3285baa8 271 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
272 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
273
3285baa8 274 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 275 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
276}
277
e237d8cb
TG
278static void manager_free(Manager *manager) {
279 if (!manager)
280 return;
281
2024ed61 282 udev_builtin_exit();
b2d21d93 283
693d371d
TG
284 sd_event_source_unref(manager->ctrl_event);
285 sd_event_source_unref(manager->uevent_event);
286 sd_event_source_unref(manager->inotify_event);
287
693d371d 288 sd_event_unref(manager->event);
e237d8cb
TG
289 manager_workers_free(manager);
290 event_queue_cleanup(manager, EVENT_UNDEF);
291
292 udev_monitor_unref(manager->monitor);
293 udev_ctrl_unref(manager->ctrl);
294 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
295
9b5150b6 296 hashmap_free_free_free(manager->properties);
e237d8cb 297 udev_rules_unref(manager->rules);
e237d8cb 298
e237d8cb
TG
299 safe_close(manager->fd_inotify);
300 safe_close_pair(manager->worker_watch);
301
302 free(manager);
303}
304
305DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
306
9a73bd7c
TG
307static int worker_send_message(int fd) {
308 struct worker_message message = {};
309
310 return loop_write(fd, &message, sizeof(message), false);
311}
312
fee854ee
RK
313static bool shall_lock_device(struct udev_device *dev) {
314 const char *sysname;
315
316 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
317 return false;
318
319 sysname = udev_device_get_sysname(dev);
320 return !startswith(sysname, "dm-") &&
321 !startswith(sysname, "md") &&
322 !startswith(sysname, "drbd");
323}
324
c0c6806b 325static void worker_spawn(Manager *manager, struct event *event) {
8e766630 326 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 327 pid_t pid;
b6aab8ef 328 int r = 0;
912541b0
KS
329
330 /* listen for new events */
2024ed61 331 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
332 if (worker_monitor == NULL)
333 return;
334 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 335 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
336 r = udev_monitor_enable_receiving(worker_monitor);
337 if (r < 0)
338 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 339
912541b0
KS
340 pid = fork();
341 switch (pid) {
342 case 0: {
343 struct udev_device *dev = NULL;
4afd3348 344 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 345 int fd_monitor;
e237d8cb 346 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
347 struct epoll_event ep_signal = { .events = EPOLLIN };
348 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 349 sigset_t mask;
912541b0 350
43095991 351 /* take initial device from queue */
1cc6c93a 352 dev = TAKE_PTR(event->dev);
912541b0 353
39fd2ca1
TG
354 unsetenv("NOTIFY_SOCKET");
355
c0c6806b 356 manager_workers_free(manager);
ecb17862 357 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 358
e237d8cb 359 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 360 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 361 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 362 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 363
693d371d
TG
364 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
365 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
366 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
367
368 manager->event = sd_event_unref(manager->event);
369
912541b0
KS
370 sigfillset(&mask);
371 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
372 if (fd_signal < 0) {
6af5e6a4 373 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
374 goto out;
375 }
2dd9f98d
TG
376 ep_signal.data.fd = fd_signal;
377
378 fd_monitor = udev_monitor_get_fd(worker_monitor);
379 ep_monitor.data.fd = fd_monitor;
912541b0
KS
380
381 fd_ep = epoll_create1(EPOLL_CLOEXEC);
382 if (fd_ep < 0) {
6af5e6a4 383 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
384 goto out;
385 }
386
912541b0
KS
387 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
388 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 389 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
390 goto out;
391 }
392
045e00cf
ZJS
393 /* Request TERM signal if parent exits.
394 Ignore error, not much we can do in that case. */
395 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 396
045e00cf 397 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
398 r = set_oom_score_adjust(0);
399 if (r < 0)
400 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 401
912541b0 402 for (;;) {
c1118ceb 403 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 404 int fd_lock = -1;
912541b0 405
3b64e4d4
TG
406 assert(dev);
407
9f6445e3 408 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
409 udev_event = udev_event_new(dev);
410 if (udev_event == NULL) {
6af5e6a4 411 r = -ENOMEM;
912541b0
KS
412 goto out;
413 }
414
bba7a484
TG
415 if (arg_exec_delay > 0)
416 udev_event->exec_delay = arg_exec_delay;
912541b0 417
3ebdb81e 418 /*
2e5b17d0 419 * Take a shared lock on the device node; this establishes
3ebdb81e 420 * a concept of device "ownership" to serialize device
2e5b17d0 421 * access. External processes holding an exclusive lock will
3ebdb81e 422 * cause udev to skip the event handling; in the case udev
2e5b17d0 423 * acquired the lock, the external process can block until
3ebdb81e
KS
424 * udev has finished its event handling.
425 */
2e5b17d0 426 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 427 shall_lock_device(dev)) {
3ebdb81e
KS
428 struct udev_device *d = dev;
429
430 if (streq_ptr("partition", udev_device_get_devtype(d)))
431 d = udev_device_get_parent(d);
432
433 if (d) {
434 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
435 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 436 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 437 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
438 goto skip;
439 }
440 }
441 }
442
4c83d994
TG
443 /* needed for renaming netifs */
444 udev_event->rtnl = rtnl;
445
912541b0 446 /* apply rules, create node, symlinks */
adeba500
KS
447 udev_event_execute_rules(udev_event,
448 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 449 manager->properties,
8314de1d 450 manager->rules);
adeba500
KS
451
452 udev_event_execute_run(udev_event,
8314de1d 453 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 454
523c620b
TG
455 if (udev_event->rtnl)
456 /* in case rtnl was initialized */
1c4baffc 457 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 458
912541b0 459 /* apply/restore inotify watch */
bf9bead1 460 if (udev_event->inotify_watch) {
70068602 461 udev_watch_begin(dev->device);
912541b0
KS
462 udev_device_update_db(dev);
463 }
464
3d06f418 465 safe_close(fd_lock);
3ebdb81e 466
912541b0
KS
467 /* send processed event back to libudev listeners */
468 udev_monitor_send_device(worker_monitor, NULL, dev);
469
3ebdb81e 470skip:
4914cb2d 471 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 472
912541b0 473 /* send udevd the result of the event execution */
e237d8cb 474 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 475 if (r < 0)
9a73bd7c 476 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 477 udev_device_get_seqnum(dev));
912541b0
KS
478
479 udev_device_unref(dev);
480 dev = NULL;
481
912541b0
KS
482 /* wait for more device messages from main udevd, or term signal */
483 while (dev == NULL) {
484 struct epoll_event ev[4];
485 int fdcount;
486 int i;
487
8fef0ff2 488 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
489 if (fdcount < 0) {
490 if (errno == EINTR)
491 continue;
6af5e6a4 492 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
493 goto out;
494 }
495
496 for (i = 0; i < fdcount; i++) {
497 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
498 dev = udev_monitor_receive_device(worker_monitor);
499 break;
500 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
501 struct signalfd_siginfo fdsi;
502 ssize_t size;
503
504 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
505 if (size != sizeof(struct signalfd_siginfo))
506 continue;
507 switch (fdsi.ssi_signo) {
508 case SIGTERM:
509 goto out;
510 }
511 }
512 }
513 }
514 }
82063a88 515out:
912541b0 516 udev_device_unref(dev);
e237d8cb 517 manager_free(manager);
baa30fbc 518 log_close();
8b46c3fc 519 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
520 }
521 case -1:
912541b0 522 event->state = EVENT_QUEUED;
56f64d95 523 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
524 break;
525 default:
e03c7cc2
TG
526 {
527 struct worker *worker;
528
c0c6806b 529 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 530 if (r < 0)
e03c7cc2 531 return;
e03c7cc2 532
39c19cf1
TG
533 worker_attach_event(worker, event);
534
1fa2f38f 535 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
536 break;
537 }
e03c7cc2 538 }
7fafc032
KS
539}
540
c0c6806b 541static void event_run(Manager *manager, struct event *event) {
a505965d
TG
542 struct worker *worker;
543 Iterator i;
912541b0 544
c0c6806b
TG
545 assert(manager);
546 assert(event);
547
548 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
549 ssize_t count;
550
551 if (worker->state != WORKER_IDLE)
552 continue;
553
c0c6806b 554 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 555 if (count < 0) {
1fa2f38f
ZJS
556 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
557 worker->pid, count);
cb542e84 558 (void) kill(worker->pid, SIGKILL);
912541b0
KS
559 worker->state = WORKER_KILLED;
560 continue;
561 }
39c19cf1 562 worker_attach_event(worker, event);
912541b0
KS
563 return;
564 }
565
c0c6806b 566 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 567 if (arg_children_max > 1)
c0c6806b 568 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
569 return;
570 }
571
572 /* start new worker and pass initial device */
c0c6806b 573 worker_spawn(manager, event);
1e03b754
KS
574}
575
ecb17862 576static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 577 struct event *event;
cb49a4f2 578 int r;
912541b0 579
ecb17862
TG
580 assert(manager);
581 assert(dev);
582
040e6896
TG
583 /* only one process can add events to the queue */
584 if (manager->pid == 0)
df0ff127 585 manager->pid = getpid_cached();
040e6896 586
df0ff127 587 assert(manager->pid == getpid_cached());
cb49a4f2 588
955d98c9 589 event = new0(struct event, 1);
cb49a4f2
TG
590 if (!event)
591 return -ENOMEM;
912541b0 592
cb49a4f2 593 event->manager = manager;
912541b0 594 event->dev = dev;
6969c349
TG
595 event->dev_kernel = udev_device_shallow_clone(dev);
596 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
597 event->seqnum = udev_device_get_seqnum(dev);
598 event->devpath = udev_device_get_devpath(dev);
599 event->devpath_len = strlen(event->devpath);
600 event->devpath_old = udev_device_get_devpath_old(dev);
601 event->devnum = udev_device_get_devnum(dev);
ea6039a3 602 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
603 event->ifindex = udev_device_get_ifindex(dev);
604
9f6445e3 605 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
606 udev_device_get_action(dev), udev_device_get_subsystem(dev));
607
608 event->state = EVENT_QUEUED;
cb49a4f2 609
40a57716 610 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
611 r = touch("/run/udev/queue");
612 if (r < 0)
613 log_warning_errno(r, "could not touch /run/udev/queue: %m");
614 }
615
40a57716 616 LIST_APPEND(event, manager->events, event);
cb49a4f2 617
912541b0 618 return 0;
fc465079
KS
619}
620
c0c6806b 621static void manager_kill_workers(Manager *manager) {
a505965d
TG
622 struct worker *worker;
623 Iterator i;
1e03b754 624
c0c6806b
TG
625 assert(manager);
626
627 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
628 if (worker->state == WORKER_KILLED)
629 continue;
1e03b754 630
912541b0 631 worker->state = WORKER_KILLED;
cb542e84 632 (void) kill(worker->pid, SIGTERM);
912541b0 633 }
1e03b754
KS
634}
635
e3196993 636/* lookup event for identical, parent, child device */
ecb17862 637static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 638 struct event *loop_event;
912541b0
KS
639 size_t common;
640
641 /* check if queue contains events we depend on */
40a57716 642 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 643 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
644 if (loop_event->seqnum < event->delaying_seqnum)
645 continue;
646
647 /* event we checked earlier still exists, no need to check again */
648 if (loop_event->seqnum == event->delaying_seqnum)
649 return true;
650
651 /* found ourself, no later event can block us */
652 if (loop_event->seqnum >= event->seqnum)
653 break;
654
655 /* check major/minor */
656 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
657 return true;
658
659 /* check network device ifindex */
660 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
661 return true;
662
663 /* check our old name */
090be865 664 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
665 event->delaying_seqnum = loop_event->seqnum;
666 return true;
667 }
668
669 /* compare devpath */
670 common = MIN(loop_event->devpath_len, event->devpath_len);
671
672 /* one devpath is contained in the other? */
673 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
674 continue;
675
676 /* identical device event found */
677 if (loop_event->devpath_len == event->devpath_len) {
678 /* devices names might have changed/swapped in the meantime */
679 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
680 continue;
681 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
682 continue;
683 event->delaying_seqnum = loop_event->seqnum;
684 return true;
685 }
686
687 /* parent device event found */
688 if (event->devpath[common] == '/') {
689 event->delaying_seqnum = loop_event->seqnum;
690 return true;
691 }
692
693 /* child device event found */
694 if (loop_event->devpath[common] == '/') {
695 event->delaying_seqnum = loop_event->seqnum;
696 return true;
697 }
698
699 /* no matching device */
700 continue;
701 }
702
703 return false;
7fafc032
KS
704}
705
693d371d
TG
706static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
707 Manager *manager = userdata;
708
709 assert(manager);
710
711 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
712
713 sd_event_exit(manager->event, -ETIMEDOUT);
714
715 return 1;
716}
717
62d43dac 718static void manager_exit(Manager *manager) {
693d371d
TG
719 uint64_t usec;
720 int r;
62d43dac
TG
721
722 assert(manager);
723
724 manager->exit = true;
725
b79aacbf
TG
726 sd_notify(false,
727 "STOPPING=1\n"
728 "STATUS=Starting shutdown...");
729
62d43dac 730 /* close sources of new events and discard buffered events */
693d371d 731 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 732 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 733
693d371d 734 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 735 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 736
693d371d 737 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 738 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
739
740 /* discard queued events and kill workers */
741 event_queue_cleanup(manager, EVENT_QUEUED);
742 manager_kill_workers(manager);
693d371d 743
3285baa8 744 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 745
3285baa8 746 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
747 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
748 if (r < 0)
749 return;
62d43dac
TG
750}
751
752/* reload requested, HUP signal received, rules changed, builtin changed */
753static void manager_reload(Manager *manager) {
754
755 assert(manager);
756
b79aacbf
TG
757 sd_notify(false,
758 "RELOADING=1\n"
759 "STATUS=Flushing configuration...");
760
62d43dac
TG
761 manager_kill_workers(manager);
762 manager->rules = udev_rules_unref(manager->rules);
2024ed61 763 udev_builtin_exit();
b79aacbf 764
1ef72b55
MS
765 sd_notifyf(false,
766 "READY=1\n"
767 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
768}
769
c0c6806b 770static void event_queue_start(Manager *manager) {
40a57716 771 struct event *event;
693d371d 772 usec_t usec;
8ab44e3f 773
c0c6806b
TG
774 assert(manager);
775
40a57716 776 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
777 manager->exit || manager->stop_exec_queue)
778 return;
779
3285baa8 780 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
781 /* check for changed config, every 3 seconds at most */
782 if (manager->last_usec == 0 ||
783 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
784 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 785 udev_builtin_validate())
38a03f06 786 manager_reload(manager);
693d371d 787
38a03f06 788 manager->last_usec = usec;
7c4c7e89
TG
789 }
790
2024ed61 791 udev_builtin_init();
7c4c7e89
TG
792
793 if (!manager->rules) {
2024ed61 794 manager->rules = udev_rules_new(arg_resolve_names);
7c4c7e89
TG
795 if (!manager->rules)
796 return;
797 }
798
40a57716 799 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
800 if (event->state != EVENT_QUEUED)
801 continue;
0bc74ea7 802
912541b0 803 /* do not start event if parent or child event is still running */
ecb17862 804 if (is_devpath_busy(manager, event))
912541b0 805 continue;
fc465079 806
c0c6806b 807 event_run(manager, event);
912541b0 808 }
1e03b754
KS
809}
810
ecb17862 811static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 812 struct event *event, *tmp;
ff2c503d 813
40a57716 814 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
815 if (match_type != EVENT_UNDEF && match_type != event->state)
816 continue;
ff2c503d 817
c6aa11f2 818 event_free(event);
912541b0 819 }
ff2c503d
KS
820}
821
e82e8fa5 822static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
823 Manager *manager = userdata;
824
825 assert(manager);
826
912541b0
KS
827 for (;;) {
828 struct worker_message msg;
979558f3
TG
829 struct iovec iovec = {
830 .iov_base = &msg,
831 .iov_len = sizeof(msg),
832 };
833 union {
834 struct cmsghdr cmsghdr;
835 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
836 } control = {};
837 struct msghdr msghdr = {
838 .msg_iov = &iovec,
839 .msg_iovlen = 1,
840 .msg_control = &control,
841 .msg_controllen = sizeof(control),
842 };
843 struct cmsghdr *cmsg;
912541b0 844 ssize_t size;
979558f3 845 struct ucred *ucred = NULL;
a505965d 846 struct worker *worker;
912541b0 847
e82e8fa5 848 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 849 if (size < 0) {
738a7907
TG
850 if (errno == EINTR)
851 continue;
852 else if (errno == EAGAIN)
853 /* nothing more to read */
854 break;
979558f3 855
e82e8fa5 856 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
857 } else if (size != sizeof(struct worker_message)) {
858 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 859 continue;
979558f3
TG
860 }
861
2a1288ff 862 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
863 if (cmsg->cmsg_level == SOL_SOCKET &&
864 cmsg->cmsg_type == SCM_CREDENTIALS &&
865 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
866 ucred = (struct ucred*) CMSG_DATA(cmsg);
867 }
868
869 if (!ucred || ucred->pid <= 0) {
870 log_warning_errno(EIO, "ignoring worker message without valid PID");
871 continue;
872 }
912541b0
KS
873
874 /* lookup worker who sent the signal */
4a0b58c4 875 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
876 if (!worker) {
877 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
878 continue;
912541b0 879 }
c0bbfd72 880
a505965d
TG
881 if (worker->state != WORKER_KILLED)
882 worker->state = WORKER_IDLE;
883
884 /* worker returned */
885 event_free(worker->event);
912541b0 886 }
e82e8fa5 887
8302fe5a
TG
888 /* we have free workers, try to schedule events */
889 event_queue_start(manager);
890
e82e8fa5
TG
891 return 1;
892}
893
894static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 895 Manager *manager = userdata;
e82e8fa5
TG
896 struct udev_device *dev;
897 int r;
898
c0c6806b 899 assert(manager);
e82e8fa5 900
c0c6806b 901 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
902 if (dev) {
903 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 904 r = event_queue_insert(manager, dev);
e82e8fa5
TG
905 if (r < 0)
906 udev_device_unref(dev);
8302fe5a
TG
907 else
908 /* we have fresh events, try to schedule them */
909 event_queue_start(manager);
e82e8fa5
TG
910 }
911
912 return 1;
88f4b648
KS
913}
914
3b47c739 915/* receive the udevd message from userspace */
e82e8fa5 916static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 917 Manager *manager = userdata;
8e766630
LP
918 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
919 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 920 const char *str;
9b5150b6 921 int i, r;
912541b0 922
c0c6806b 923 assert(manager);
e4f66b77 924
c0c6806b 925 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 926 if (!ctrl_conn)
e82e8fa5 927 return 1;
912541b0
KS
928
929 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 930 if (!ctrl_msg)
e82e8fa5 931 return 1;
912541b0
KS
932
933 i = udev_ctrl_get_set_log_level(ctrl_msg);
934 if (i >= 0) {
ed14edc0 935 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 936 log_set_max_level(i);
c0c6806b 937 manager_kill_workers(manager);
912541b0
KS
938 }
939
940 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 941 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 942 manager->stop_exec_queue = true;
912541b0
KS
943 }
944
945 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 946 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 947 manager->stop_exec_queue = false;
8302fe5a 948 event_queue_start(manager);
912541b0
KS
949 }
950
951 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 952 log_debug("udevd message (RELOAD) received");
62d43dac 953 manager_reload(manager);
912541b0
KS
954 }
955
956 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
957 if (str) {
958 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
959 char *eq;
960
961 eq = strchr(str, '=');
962 if (!eq) {
963 log_error("Invalid key format '%s'", str);
964 return 1;
965 }
966
967 key = strndup(str, eq - str);
968 if (!key) {
969 log_oom();
970 return 1;
971 }
972
973 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
974
975 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
976 if (r < 0) {
977 log_oom();
978 return 1;
912541b0 979 }
9b5150b6
YW
980
981 eq++;
982 if (!isempty(eq)) {
983 log_debug("udevd message (ENV) received, unset '%s'", key);
984
985 r = hashmap_put(manager->properties, key, NULL);
986 if (r < 0) {
987 log_oom();
988 return 1;
989 }
990 } else {
991 val = strdup(eq);
992 if (!val) {
993 log_oom();
994 return 1;
995 }
996
997 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
998
999 r = hashmap_put(manager->properties, key, val);
1000 if (r < 0) {
1001 log_oom();
1002 return 1;
1003 }
1004 }
1005
1006 key = val = NULL;
c0c6806b 1007 manager_kill_workers(manager);
912541b0
KS
1008 }
1009
1010 i = udev_ctrl_get_set_children_max(ctrl_msg);
1011 if (i >= 0) {
9f6445e3 1012 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1013 arg_children_max = i;
1ef72b55
MS
1014
1015 (void) sd_notifyf(false,
1016 "READY=1\n"
1017 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1018 }
1019
cb49a4f2 1020 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1021 log_debug("udevd message (SYNC) received");
912541b0
KS
1022
1023 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1024 log_debug("udevd message (EXIT) received");
62d43dac 1025 manager_exit(manager);
c0c6806b
TG
1026 /* keep reference to block the client until we exit
1027 TODO: deal with several blocking exit requests */
1028 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1029 }
e4f66b77 1030
e82e8fa5 1031 return 1;
88f4b648 1032}
4a231017 1033
70068602
YW
1034static int synthesize_change(sd_device *dev) {
1035 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1036 char filename[PATH_MAX];
f3a740a5 1037 int r;
edd32000 1038
70068602
YW
1039 r = sd_device_get_subsystem(dev, &subsystem);
1040 if (r < 0)
1041 return r;
1042
1043 r = sd_device_get_sysname(dev, &sysname);
1044 if (r < 0)
1045 return r;
1046
1047 r = sd_device_get_devname(dev, &devname);
1048 if (r < 0)
1049 return r;
1050
1051 r = sd_device_get_syspath(dev, &syspath);
1052 if (r < 0)
1053 return r;
1054
1055 r = sd_device_get_devtype(dev, &devtype);
1056 if (r < 0)
1057 return r;
1058
1059 if (streq_ptr("block", subsystem) &&
1060 streq_ptr("disk", devtype) &&
1061 !startswith(sysname, "dm-")) {
1062 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1063 bool part_table_read = false, has_partitions = false;
1064 sd_device *d;
ede34445 1065 int fd;
f3a740a5 1066
ede34445 1067 /*
e9fc29f4
KS
1068 * Try to re-read the partition table. This only succeeds if
1069 * none of the devices is busy. The kernel returns 0 if no
1070 * partition table is found, and we will not get an event for
1071 * the disk.
ede34445 1072 */
70068602 1073 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1074 if (fd >= 0) {
02ba8fb3
KS
1075 r = flock(fd, LOCK_EX|LOCK_NB);
1076 if (r >= 0)
1077 r = ioctl(fd, BLKRRPART, 0);
1078
ede34445
KS
1079 close(fd);
1080 if (r >= 0)
e9fc29f4 1081 part_table_read = true;
ede34445
KS
1082 }
1083
e9fc29f4 1084 /* search for partitions */
70068602 1085 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1086 if (r < 0)
1087 return r;
1088
70068602 1089 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1090 if (r < 0)
1091 return r;
1092
70068602 1093 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1094 if (r < 0)
1095 return r;
e9fc29f4 1096
70068602
YW
1097 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1098 if (r < 0)
1099 return r;
e9fc29f4 1100
70068602
YW
1101 FOREACH_DEVICE(e, d) {
1102 const char *t;
e9fc29f4 1103
70068602
YW
1104 if (sd_device_get_devtype(d, &t) < 0 ||
1105 !streq("partition", t))
e9fc29f4
KS
1106 continue;
1107
1108 has_partitions = true;
1109 break;
1110 }
1111
1112 /*
1113 * We have partitions and re-read the table, the kernel already sent
1114 * out a "change" event for the disk, and "remove/add" for all
1115 * partitions.
1116 */
1117 if (part_table_read && has_partitions)
1118 return 0;
1119
1120 /*
1121 * We have partitions but re-reading the partition table did not
1122 * work, synthesize "change" for the disk and all partitions.
1123 */
70068602
YW
1124 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1125 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1126 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1127
70068602
YW
1128 FOREACH_DEVICE(e, d) {
1129 const char *t, *n, *s;
f3a740a5 1130
70068602
YW
1131 if (sd_device_get_devtype(d, &t) < 0 ||
1132 !streq("partition", t))
f3a740a5
KS
1133 continue;
1134
70068602
YW
1135 if (sd_device_get_devname(d, &n) < 0 ||
1136 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1137 continue;
1138
70068602
YW
1139 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1140 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1141 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1142 }
ede34445
KS
1143
1144 return 0;
f3a740a5
KS
1145 }
1146
70068602
YW
1147 log_debug("Device %s is closed, synthesising 'change'", devname);
1148 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1149 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1150
f3a740a5 1151 return 0;
edd32000
KS
1152}
1153
e82e8fa5 1154static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1155 Manager *manager = userdata;
0254e944 1156 union inotify_event_buffer buffer;
f7c1ad4f
LP
1157 struct inotify_event *e;
1158 ssize_t l;
912541b0 1159
c0c6806b 1160 assert(manager);
e82e8fa5
TG
1161
1162 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1163 if (l < 0) {
3742095b 1164 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1165 return 1;
912541b0 1166
f7c1ad4f 1167 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1168 }
1169
f7c1ad4f 1170 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1171 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1172 const char *devnode;
1173
7fe3324c 1174 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1175 continue;
912541b0 1176
70068602 1177 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1178 continue;
912541b0 1179
7fe3324c 1180 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
a8389097 1181 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1182 synthesize_change(dev);
a8389097
TG
1183
1184 /* settle might be waiting on us to determine the queue
1185 * state. If we just handled an inotify event, we might have
1186 * generated a "change" event, but we won't have queued up
1187 * the resultant uevent yet. Do that.
1188 */
c0c6806b 1189 on_uevent(NULL, -1, 0, manager);
a8389097 1190 } else if (e->mask & IN_IGNORED)
2024ed61 1191 udev_watch_end(dev);
912541b0
KS
1192 }
1193
e82e8fa5 1194 return 1;
bd284db1
SJR
1195}
1196
0561329d 1197static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1198 Manager *manager = userdata;
1199
1200 assert(manager);
1201
62d43dac 1202 manager_exit(manager);
912541b0 1203
e82e8fa5
TG
1204 return 1;
1205}
912541b0 1206
0561329d 1207static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1208 Manager *manager = userdata;
1209
1210 assert(manager);
1211
62d43dac 1212 manager_reload(manager);
912541b0 1213
e82e8fa5
TG
1214 return 1;
1215}
912541b0 1216
e82e8fa5 1217static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1218 Manager *manager = userdata;
1219
1220 assert(manager);
1221
e82e8fa5
TG
1222 for (;;) {
1223 pid_t pid;
1224 int status;
1225 struct worker *worker;
d1317d02 1226
e82e8fa5
TG
1227 pid = waitpid(-1, &status, WNOHANG);
1228 if (pid <= 0)
f29328d6 1229 break;
e82e8fa5 1230
4a0b58c4 1231 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1232 if (!worker) {
1233 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1234 continue;
912541b0 1235 }
e82e8fa5
TG
1236
1237 if (WIFEXITED(status)) {
1238 if (WEXITSTATUS(status) == 0)
1239 log_debug("worker ["PID_FMT"] exited", pid);
1240 else
1241 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1242 } else if (WIFSIGNALED(status)) {
76341acc 1243 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1244 } else if (WIFSTOPPED(status)) {
1245 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1246 continue;
e82e8fa5
TG
1247 } else if (WIFCONTINUED(status)) {
1248 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1249 continue;
e82e8fa5
TG
1250 } else
1251 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1252
1253 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1254 if (worker->event) {
1255 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1256 /* delete state from disk */
1257 udev_device_delete_db(worker->event->dev);
1258 udev_device_tag_index(worker->event->dev, NULL, false);
1259 /* forward kernel event without amending it */
c0c6806b 1260 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1261 }
1262 }
1263
1264 worker_free(worker);
912541b0 1265 }
e82e8fa5 1266
8302fe5a
TG
1267 /* we can start new workers, try to schedule events */
1268 event_queue_start(manager);
1269
e82e8fa5 1270 return 1;
f27125f9 1271}
1272
693d371d
TG
1273static int on_post(sd_event_source *s, void *userdata) {
1274 Manager *manager = userdata;
1275 int r;
1276
1277 assert(manager);
1278
40a57716 1279 if (LIST_IS_EMPTY(manager->events)) {
693d371d
TG
1280 /* no pending events */
1281 if (!hashmap_isempty(manager->workers)) {
1282 /* there are idle workers */
1283 log_debug("cleanup idle workers");
1284 manager_kill_workers(manager);
1285 } else {
1286 /* we are idle */
1287 if (manager->exit) {
1288 r = sd_event_exit(manager->event, 0);
1289 if (r < 0)
1290 return r;
1291 } else if (manager->cgroup)
1292 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1293 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1294 }
1295 }
1296
1297 return 1;
1298}
1299
fcff1e72
TG
1300static int listen_fds(int *rctrl, int *rnetlink) {
1301 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1302 int fd, n, r;
912541b0 1303
fcff1e72
TG
1304 assert(rctrl);
1305 assert(rnetlink);
1306
912541b0 1307 n = sd_listen_fds(true);
fcff1e72
TG
1308 if (n < 0)
1309 return n;
912541b0
KS
1310
1311 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1312 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1313 if (ctrl_fd >= 0)
1314 return -EINVAL;
1315 ctrl_fd = fd;
912541b0
KS
1316 continue;
1317 }
1318
1319 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1320 if (netlink_fd >= 0)
1321 return -EINVAL;
1322 netlink_fd = fd;
912541b0
KS
1323 continue;
1324 }
1325
fcff1e72 1326 return -EINVAL;
912541b0
KS
1327 }
1328
f59118ec 1329 if (ctrl_fd < 0) {
8e766630 1330 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1331
2024ed61 1332 ctrl = udev_ctrl_new();
f59118ec
TG
1333 if (!ctrl)
1334 return log_error_errno(EINVAL, "error initializing udev control socket");
1335
1336 r = udev_ctrl_enable_receiving(ctrl);
1337 if (r < 0)
1338 return log_error_errno(EINVAL, "error binding udev control socket");
1339
1340 fd = udev_ctrl_get_fd(ctrl);
1341 if (fd < 0)
1342 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1343
f59118ec
TG
1344 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1345 if (ctrl_fd < 0)
1346 return log_error_errno(errno, "could not dup ctrl fd: %m");
1347 }
1348
1349 if (netlink_fd < 0) {
8e766630 1350 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1351
2024ed61 1352 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1353 if (!monitor)
1354 return log_error_errno(EINVAL, "error initializing netlink socket");
1355
1356 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1357
1358 r = udev_monitor_enable_receiving(monitor);
1359 if (r < 0)
1360 return log_error_errno(EINVAL, "error binding netlink socket");
1361
1362 fd = udev_monitor_get_fd(monitor);
1363 if (fd < 0)
1364 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1365
1366 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1367 if (netlink_fd < 0)
f59118ec
TG
1368 return log_error_errno(errno, "could not dup netlink fd: %m");
1369 }
fcff1e72
TG
1370
1371 *rctrl = ctrl_fd;
1372 *rnetlink = netlink_fd;
912541b0 1373
912541b0 1374 return 0;
7459bcdc
KS
1375}
1376
e6f86cac 1377/*
3f85ef0f 1378 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1379 * udev.log_priority=<level> syslog priority
1380 * udev.children_max=<number of workers> events are fully serialized if set to 1
1381 * udev.exec_delay=<number of seconds> delay execution of every executed program
1382 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1383 */
96287a49 1384static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1385 int r = 0;
e6f86cac 1386
614a823c 1387 assert(key);
e6f86cac 1388
614a823c
TG
1389 if (!value)
1390 return 0;
e6f86cac 1391
1d84ad94
LP
1392 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1393
1394 if (proc_cmdline_value_missing(key, value))
1395 return 0;
1396
92e72467
ZJS
1397 r = util_log_priority(value);
1398 if (r >= 0)
1399 log_set_max_level(r);
1d84ad94
LP
1400
1401 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1402
1403 if (proc_cmdline_value_missing(key, value))
1404 return 0;
1405
92e72467
ZJS
1406 r = safe_atou64(value, &arg_event_timeout_usec);
1407 if (r >= 0) {
1408 arg_event_timeout_usec *= USEC_PER_SEC;
1409 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1410 }
1d84ad94
LP
1411
1412 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1413
1414 if (proc_cmdline_value_missing(key, value))
1415 return 0;
1416
020328e1 1417 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1418
1419 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1420
1421 if (proc_cmdline_value_missing(key, value))
1422 return 0;
1423
614a823c 1424 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1425
1426 } else if (startswith(key, "udev."))
92e72467 1427 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1428
92e72467
ZJS
1429 if (r < 0)
1430 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1431
614a823c 1432 return 0;
e6f86cac
KS
1433}
1434
37ec0fdd
LP
1435static int help(void) {
1436 _cleanup_free_ char *link = NULL;
1437 int r;
1438
1439 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1440 if (r < 0)
1441 return log_oom();
1442
ed216e1f
TG
1443 printf("%s [OPTIONS...]\n\n"
1444 "Manages devices.\n\n"
5ac0162c 1445 " -h --help Print this message\n"
2d19c17e
MF
1446 " -V --version Print version of the program\n"
1447 " -d --daemon Detach and run in the background\n"
1448 " -D --debug Enable debug output\n"
1449 " -c --children-max=INT Set maximum number of workers\n"
1450 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1451 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1452 " -N --resolve-names=early|late|never\n"
5ac0162c 1453 " When to resolve users and groups\n"
37ec0fdd
LP
1454 "\nSee the %s for details.\n"
1455 , program_invocation_short_name
1456 , link
1457 );
1458
1459 return 0;
ed216e1f
TG
1460}
1461
bba7a484 1462static int parse_argv(int argc, char *argv[]) {
912541b0 1463 static const struct option options[] = {
bba7a484
TG
1464 { "daemon", no_argument, NULL, 'd' },
1465 { "debug", no_argument, NULL, 'D' },
1466 { "children-max", required_argument, NULL, 'c' },
1467 { "exec-delay", required_argument, NULL, 'e' },
1468 { "event-timeout", required_argument, NULL, 't' },
1469 { "resolve-names", required_argument, NULL, 'N' },
1470 { "help", no_argument, NULL, 'h' },
1471 { "version", no_argument, NULL, 'V' },
912541b0
KS
1472 {}
1473 };
689a97f5 1474
bba7a484 1475 int c;
689a97f5 1476
bba7a484
TG
1477 assert(argc >= 0);
1478 assert(argv);
912541b0 1479
e14b6f21 1480 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1481 int r;
912541b0 1482
bba7a484 1483 switch (c) {
912541b0 1484
912541b0 1485 case 'd':
bba7a484 1486 arg_daemonize = true;
912541b0
KS
1487 break;
1488 case 'c':
020328e1 1489 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1490 if (r < 0)
1491 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1492 break;
1493 case 'e':
6f5cf8a8
TG
1494 r = safe_atoi(optarg, &arg_exec_delay);
1495 if (r < 0)
1496 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1497 break;
9719859c 1498 case 't':
f1e8664e
TG
1499 r = safe_atou64(optarg, &arg_event_timeout_usec);
1500 if (r < 0)
65fea570 1501 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1502 else {
1503 arg_event_timeout_usec *= USEC_PER_SEC;
1504 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1505 }
9719859c 1506 break;
912541b0 1507 case 'D':
bba7a484 1508 arg_debug = true;
912541b0
KS
1509 break;
1510 case 'N':
090be865 1511 if (streq(optarg, "early")) {
bba7a484 1512 arg_resolve_names = 1;
090be865 1513 } else if (streq(optarg, "late")) {
bba7a484 1514 arg_resolve_names = 0;
090be865 1515 } else if (streq(optarg, "never")) {
bba7a484 1516 arg_resolve_names = -1;
912541b0 1517 } else {
9f6445e3 1518 log_error("resolve-names must be early, late or never");
bba7a484 1519 return 0;
912541b0
KS
1520 }
1521 break;
1522 case 'h':
37ec0fdd 1523 return help();
912541b0 1524 case 'V':
948aaa7c 1525 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1526 return 0;
1527 case '?':
1528 return -EINVAL;
912541b0 1529 default:
bba7a484
TG
1530 assert_not_reached("Unhandled option");
1531
912541b0
KS
1532 }
1533 }
1534
bba7a484
TG
1535 return 1;
1536}
1537
b7f74dd4 1538static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1539 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1540 int r, fd_worker;
c0c6806b
TG
1541
1542 assert(ret);
11b1dd8c
TG
1543 assert(fd_ctrl >= 0);
1544 assert(fd_uevent >= 0);
c0c6806b
TG
1545
1546 manager = new0(Manager, 1);
1547 if (!manager)
1548 return log_oom();
1549
e237d8cb
TG
1550 manager->fd_inotify = -1;
1551 manager->worker_watch[WRITE_END] = -1;
1552 manager->worker_watch[READ_END] = -1;
1553
2024ed61 1554 udev_builtin_init();
b2d21d93 1555
2024ed61 1556 manager->rules = udev_rules_new(arg_resolve_names);
ecb17862
TG
1557 if (!manager->rules)
1558 return log_error_errno(ENOMEM, "error reading rules");
1559
40a57716 1560 LIST_HEAD_INIT(manager->events);
ecb17862 1561
c26d1879
TG
1562 manager->cgroup = cgroup;
1563
2024ed61 1564 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1565 if (!manager->ctrl)
1566 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1567
2024ed61 1568 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1569 if (!manager->monitor)
1570 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1571
1572 /* unnamed socket from workers to the main daemon */
1573 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1574 if (r < 0)
1575 return log_error_errno(errno, "error creating socketpair: %m");
1576
693d371d 1577 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1578
2ff48e98 1579 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1580 if (r < 0)
2ff48e98 1581 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1582
b7759e04
YW
1583 r = udev_watch_init();
1584 if (r < 0)
1585 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1586 manager->fd_inotify = r;
e237d8cb 1587
2024ed61 1588 udev_watch_restore();
e237d8cb
TG
1589
1590 /* block and listen to all signals on signalfd */
72c0a2c2 1591 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1592
49f997f3
TG
1593 r = sd_event_default(&manager->event);
1594 if (r < 0)
709f6e46 1595 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1596
693d371d
TG
1597 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1598 if (r < 0)
1599 return log_error_errno(r, "error creating sigint event source: %m");
1600
1601 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1602 if (r < 0)
1603 return log_error_errno(r, "error creating sigterm event source: %m");
1604
1605 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1606 if (r < 0)
1607 return log_error_errno(r, "error creating sighup event source: %m");
1608
1609 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1610 if (r < 0)
1611 return log_error_errno(r, "error creating sigchld event source: %m");
1612
1613 r = sd_event_set_watchdog(manager->event, true);
1614 if (r < 0)
1615 return log_error_errno(r, "error creating watchdog event source: %m");
1616
11b1dd8c 1617 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1618 if (r < 0)
1619 return log_error_errno(r, "error creating ctrl event source: %m");
1620
1621 /* This needs to be after the inotify and uevent handling, to make sure
1622 * that the ping is send back after fully processing the pending uevents
1623 * (including the synthetic ones we may create due to inotify events).
1624 */
1625 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1626 if (r < 0)
1627 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1628
1629 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1630 if (r < 0)
1631 return log_error_errno(r, "error creating inotify event source: %m");
1632
11b1dd8c 1633 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1634 if (r < 0)
1635 return log_error_errno(r, "error creating uevent event source: %m");
1636
1637 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1638 if (r < 0)
1639 return log_error_errno(r, "error creating worker event source: %m");
1640
1641 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1642 if (r < 0)
1643 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1644
1cc6c93a 1645 *ret = TAKE_PTR(manager);
11b1dd8c 1646
86c3bece 1647 return 0;
c0c6806b
TG
1648}
1649
077fc5e2 1650static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1651 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1652 int r;
1653
1654 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1655 if (r < 0) {
1656 r = log_error_errno(r, "failed to allocate manager object: %m");
1657 goto exit;
1658 }
1659
1660 r = udev_rules_apply_static_dev_perms(manager->rules);
1661 if (r < 0)
1662 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1663
1ef72b55
MS
1664 (void) sd_notifyf(false,
1665 "READY=1\n"
1666 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1667
1668 r = sd_event_loop(manager->event);
1669 if (r < 0) {
1670 log_error_errno(r, "event loop failed: %m");
1671 goto exit;
1672 }
1673
1674 sd_event_get_exit_code(manager->event, &r);
1675
1676exit:
1677 sd_notify(false,
1678 "STOPPING=1\n"
1679 "STATUS=Shutting down...");
1680 if (manager)
1681 udev_ctrl_cleanup(manager->ctrl);
1682 return r;
1683}
1684
1685int main(int argc, char *argv[]) {
c26d1879 1686 _cleanup_free_ char *cgroup = NULL;
efa1606e 1687 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1688 int r;
bba7a484 1689
bba7a484 1690 log_set_target(LOG_TARGET_AUTO);
b237a168 1691 udev_parse_config();
bba7a484
TG
1692 log_parse_environment();
1693 log_open();
1694
bba7a484
TG
1695 r = parse_argv(argc, argv);
1696 if (r <= 0)
1697 goto exit;
1698
1d84ad94 1699 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1700 if (r < 0)
1701 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1702
78d3e041
KS
1703 if (arg_debug) {
1704 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1705 log_set_max_level(LOG_DEBUG);
78d3e041 1706 }
bba7a484 1707
fba868fa
LP
1708 r = must_be_root();
1709 if (r < 0)
912541b0 1710 goto exit;
912541b0 1711
712cebf1
TG
1712 if (arg_children_max == 0) {
1713 cpu_set_t cpu_set;
e438c57a 1714 unsigned long mem_limit;
ebc164ef 1715
712cebf1 1716 arg_children_max = 8;
d457ff83 1717
ece174c5 1718 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1719 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1720
e438c57a
MW
1721 mem_limit = physical_memory() / (128LU*1024*1024);
1722 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1723
712cebf1 1724 log_debug("set children_max to %u", arg_children_max);
d457ff83 1725 }
912541b0 1726
712cebf1
TG
1727 /* set umask before creating any file/directory */
1728 r = chdir("/");
1729 if (r < 0) {
1730 r = log_error_errno(errno, "could not change dir to /: %m");
1731 goto exit;
1732 }
194bbe33 1733
712cebf1 1734 umask(022);
912541b0 1735
c3dacc8b 1736 r = mac_selinux_init();
712cebf1
TG
1737 if (r < 0) {
1738 log_error_errno(r, "could not initialize labelling: %m");
1739 goto exit;
912541b0
KS
1740 }
1741
dae8b82e
ZJS
1742 r = mkdir_errno_wrapper("/run/udev", 0755);
1743 if (r < 0 && r != -EEXIST) {
1744 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1745 goto exit;
1746 }
1747
03cfe0d5 1748 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1749
c26d1879
TG
1750 if (getppid() == 1) {
1751 /* get our own cgroup, we regularly kill everything udev has left behind
1752 we only do this on systemd systems, and only if we are directly spawned
1753 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1754 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1755 if (r < 0) {
a2d61f07 1756 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1757 log_debug_errno(r, "did not find dedicated cgroup: %m");
1758 else
1759 log_warning_errno(r, "failed to get cgroup: %m");
1760 }
c26d1879
TG
1761 }
1762
b7f74dd4
TG
1763 r = listen_fds(&fd_ctrl, &fd_uevent);
1764 if (r < 0) {
1765 r = log_error_errno(r, "could not listen on fds: %m");
1766 goto exit;
1767 }
1768
bba7a484 1769 if (arg_daemonize) {
912541b0 1770 pid_t pid;
912541b0 1771
948aaa7c 1772 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1773
40e749b5 1774 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1775 if (log_get_max_level() < LOG_DEBUG) {
1776 r = make_null_stdio();
1777 if (r < 0)
1778 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1779 }
1780
912541b0
KS
1781 pid = fork();
1782 switch (pid) {
1783 case 0:
1784 break;
1785 case -1:
6af5e6a4 1786 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1787 goto exit;
1788 default:
f53d1fcd
TG
1789 mac_selinux_finish();
1790 log_close();
1791 _exit(EXIT_SUCCESS);
912541b0
KS
1792 }
1793
1794 setsid();
1795
76cdddfb
YW
1796 r = set_oom_score_adjust(-1000);
1797 if (r < 0)
1798 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1799 }
912541b0 1800
077fc5e2 1801 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1802
53921bfa 1803exit:
cc56fafe 1804 mac_selinux_finish();
baa30fbc 1805 log_close();
6af5e6a4 1806 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1807}