]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udevd: wait 3 seconds before killing worker processes
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
07a26e42 55#include "udev-builtin.h"
7d68eb1b 56#include "udev-ctrl.h"
618234a5 57#include "udev-util.h"
70068602 58#include "udev-watch.h"
618234a5 59#include "udev.h"
ee104e11 60#include "user-util.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
64static int arg_resolve_names = 1;
020328e1 65static unsigned arg_children_max;
bba7a484
TG
66static int arg_exec_delay;
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
693d371d 71 sd_event *event;
c0c6806b 72 Hashmap *workers;
40a57716 73 LIST_HEAD(struct event, events);
c26d1879 74 const char *cgroup;
cb49a4f2 75 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 76
ecb17862 77 struct udev_rules *rules;
9b5150b6 78 Hashmap *properties;
c0c6806b
TG
79
80 struct udev_monitor *monitor;
81 struct udev_ctrl *ctrl;
82 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 83 int fd_inotify;
e237d8cb
TG
84 int worker_watch[2];
85
693d371d
TG
86 sd_event_source *ctrl_event;
87 sd_event_source *uevent_event;
88 sd_event_source *inotify_event;
eca195ec 89 sd_event_source *kill_workers_event;
693d371d 90
7c4c7e89
TG
91 usec_t last_usec;
92
c0c6806b 93 bool stop_exec_queue:1;
c0c6806b
TG
94 bool exit:1;
95} Manager;
1e03b754 96
1e03b754 97enum event_state {
912541b0
KS
98 EVENT_UNDEF,
99 EVENT_QUEUED,
100 EVENT_RUNNING,
1e03b754
KS
101};
102
103struct event {
40a57716 104 LIST_FIELDS(struct event, event);
cb49a4f2 105 Manager *manager;
912541b0 106 struct udev_device *dev;
6969c349 107 struct udev_device *dev_kernel;
c6aa11f2 108 struct worker *worker;
912541b0 109 enum event_state state;
912541b0
KS
110 unsigned long long int delaying_seqnum;
111 unsigned long long int seqnum;
112 const char *devpath;
113 size_t devpath_len;
114 const char *devpath_old;
115 dev_t devnum;
912541b0 116 int ifindex;
ea6039a3 117 bool is_block;
693d371d
TG
118 sd_event_source *timeout_warning;
119 sd_event_source *timeout;
1e03b754
KS
120};
121
ecb17862 122static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 123
1e03b754 124enum worker_state {
912541b0
KS
125 WORKER_UNDEF,
126 WORKER_RUNNING,
127 WORKER_IDLE,
128 WORKER_KILLED,
1e03b754
KS
129};
130
131struct worker {
c0c6806b 132 Manager *manager;
912541b0
KS
133 pid_t pid;
134 struct udev_monitor *monitor;
135 enum worker_state state;
136 struct event *event;
1e03b754
KS
137};
138
139/* passed from worker to main process */
140struct worker_message {
1e03b754
KS
141};
142
c6aa11f2 143static void event_free(struct event *event) {
cb49a4f2
TG
144 int r;
145
c6aa11f2
TG
146 if (!event)
147 return;
40a57716 148 assert(event->manager);
c6aa11f2 149
40a57716 150 LIST_REMOVE(event, event->manager->events, event);
912541b0 151 udev_device_unref(event->dev);
6969c349 152 udev_device_unref(event->dev_kernel);
c6aa11f2 153
693d371d
TG
154 sd_event_source_unref(event->timeout_warning);
155 sd_event_source_unref(event->timeout);
156
c6aa11f2
TG
157 if (event->worker)
158 event->worker->event = NULL;
159
40a57716 160 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 161 /* only clean up the queue from the process that created it */
df0ff127 162 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
163 r = unlink("/run/udev/queue");
164 if (r < 0)
165 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
166 }
167 }
168
912541b0 169 free(event);
aa8734ff 170}
7a770250 171
c6aa11f2
TG
172static void worker_free(struct worker *worker) {
173 if (!worker)
174 return;
bc113de9 175
c0c6806b
TG
176 assert(worker->manager);
177
4a0b58c4 178 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 179 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
180 event_free(worker->event);
181
c6aa11f2 182 free(worker);
ff2c503d
KS
183}
184
c0c6806b 185static void manager_workers_free(Manager *manager) {
a505965d
TG
186 struct worker *worker;
187 Iterator i;
ff2c503d 188
c0c6806b
TG
189 assert(manager);
190
191 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 192 worker_free(worker);
a505965d 193
c0c6806b 194 manager->workers = hashmap_free(manager->workers);
fc465079
KS
195}
196
c0c6806b 197static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
198 _cleanup_free_ struct worker *worker = NULL;
199 int r;
3a19b32a
TG
200
201 assert(ret);
c0c6806b 202 assert(manager);
3a19b32a
TG
203 assert(worker_monitor);
204 assert(pid > 1);
205
206 worker = new0(struct worker, 1);
207 if (!worker)
208 return -ENOMEM;
209
c0c6806b 210 worker->manager = manager;
3a19b32a
TG
211 /* close monitor, but keep address around */
212 udev_monitor_disconnect(worker_monitor);
213 worker->monitor = udev_monitor_ref(worker_monitor);
214 worker->pid = pid;
a505965d 215
c0c6806b 216 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
217 if (r < 0)
218 return r;
219
4a0b58c4 220 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
221 if (r < 0)
222 return r;
223
ae2a15bc 224 *ret = TAKE_PTR(worker);
3a19b32a
TG
225
226 return 0;
227}
228
4fa4d885
TG
229static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
230 struct event *event = userdata;
231
232 assert(event);
233 assert(event->worker);
234
235 kill_and_sigcont(event->worker->pid, SIGKILL);
236 event->worker->state = WORKER_KILLED;
237
238 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
239
240 return 1;
241}
242
243static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
244 struct event *event = userdata;
245
246 assert(event);
247
248 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
249
250 return 1;
251}
252
39c19cf1 253static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
254 sd_event *e;
255 uint64_t usec;
693d371d 256
c6aa11f2 257 assert(worker);
693d371d 258 assert(worker->manager);
c6aa11f2
TG
259 assert(event);
260 assert(!event->worker);
261 assert(!worker->event);
262
39c19cf1 263 worker->state = WORKER_RUNNING;
39c19cf1
TG
264 worker->event = event;
265 event->state = EVENT_RUNNING;
c6aa11f2 266 event->worker = worker;
693d371d
TG
267
268 e = worker->manager->event;
269
3285baa8 270 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 271
3285baa8 272 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
273 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
274
3285baa8 275 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 276 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
277}
278
e237d8cb
TG
279static void manager_free(Manager *manager) {
280 if (!manager)
281 return;
282
2024ed61 283 udev_builtin_exit();
b2d21d93 284
693d371d
TG
285 sd_event_source_unref(manager->ctrl_event);
286 sd_event_source_unref(manager->uevent_event);
287 sd_event_source_unref(manager->inotify_event);
eca195ec 288 sd_event_source_unref(manager->kill_workers_event);
693d371d 289
693d371d 290 sd_event_unref(manager->event);
e237d8cb
TG
291 manager_workers_free(manager);
292 event_queue_cleanup(manager, EVENT_UNDEF);
293
294 udev_monitor_unref(manager->monitor);
295 udev_ctrl_unref(manager->ctrl);
296 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
297
9b5150b6 298 hashmap_free_free_free(manager->properties);
e237d8cb 299 udev_rules_unref(manager->rules);
e237d8cb 300
e237d8cb
TG
301 safe_close(manager->fd_inotify);
302 safe_close_pair(manager->worker_watch);
303
304 free(manager);
305}
306
307DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
308
9a73bd7c
TG
309static int worker_send_message(int fd) {
310 struct worker_message message = {};
311
312 return loop_write(fd, &message, sizeof(message), false);
313}
314
fee854ee
RK
315static bool shall_lock_device(struct udev_device *dev) {
316 const char *sysname;
317
318 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
319 return false;
320
321 sysname = udev_device_get_sysname(dev);
322 return !startswith(sysname, "dm-") &&
323 !startswith(sysname, "md") &&
324 !startswith(sysname, "drbd");
325}
326
c0c6806b 327static void worker_spawn(Manager *manager, struct event *event) {
8e766630 328 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 329 pid_t pid;
b6aab8ef 330 int r = 0;
912541b0
KS
331
332 /* listen for new events */
2024ed61 333 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
334 if (worker_monitor == NULL)
335 return;
336 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 337 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
338 r = udev_monitor_enable_receiving(worker_monitor);
339 if (r < 0)
340 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 341
912541b0
KS
342 pid = fork();
343 switch (pid) {
344 case 0: {
345 struct udev_device *dev = NULL;
4afd3348 346 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 347 int fd_monitor;
e237d8cb 348 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
349 struct epoll_event ep_signal = { .events = EPOLLIN };
350 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 351 sigset_t mask;
912541b0 352
43095991 353 /* take initial device from queue */
1cc6c93a 354 dev = TAKE_PTR(event->dev);
912541b0 355
39fd2ca1
TG
356 unsetenv("NOTIFY_SOCKET");
357
c0c6806b 358 manager_workers_free(manager);
ecb17862 359 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 360
e237d8cb 361 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 364 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 365
693d371d
TG
366 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
367 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
368 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
eca195ec 369 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
693d371d
TG
370
371 manager->event = sd_event_unref(manager->event);
372
912541b0
KS
373 sigfillset(&mask);
374 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
375 if (fd_signal < 0) {
6af5e6a4 376 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
377 goto out;
378 }
2dd9f98d
TG
379 ep_signal.data.fd = fd_signal;
380
381 fd_monitor = udev_monitor_get_fd(worker_monitor);
382 ep_monitor.data.fd = fd_monitor;
912541b0
KS
383
384 fd_ep = epoll_create1(EPOLL_CLOEXEC);
385 if (fd_ep < 0) {
6af5e6a4 386 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
387 goto out;
388 }
389
912541b0
KS
390 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
391 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 392 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
393 goto out;
394 }
395
045e00cf
ZJS
396 /* Request TERM signal if parent exits.
397 Ignore error, not much we can do in that case. */
398 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 399
045e00cf 400 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
401 r = set_oom_score_adjust(0);
402 if (r < 0)
403 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 404
912541b0 405 for (;;) {
c1118ceb 406 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 407 int fd_lock = -1;
912541b0 408
3b64e4d4
TG
409 assert(dev);
410
9f6445e3 411 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
412 udev_event = udev_event_new(dev);
413 if (udev_event == NULL) {
6af5e6a4 414 r = -ENOMEM;
912541b0
KS
415 goto out;
416 }
417
bba7a484
TG
418 if (arg_exec_delay > 0)
419 udev_event->exec_delay = arg_exec_delay;
912541b0 420
3ebdb81e 421 /*
2e5b17d0 422 * Take a shared lock on the device node; this establishes
3ebdb81e 423 * a concept of device "ownership" to serialize device
2e5b17d0 424 * access. External processes holding an exclusive lock will
3ebdb81e 425 * cause udev to skip the event handling; in the case udev
2e5b17d0 426 * acquired the lock, the external process can block until
3ebdb81e
KS
427 * udev has finished its event handling.
428 */
2e5b17d0 429 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 430 shall_lock_device(dev)) {
3ebdb81e
KS
431 struct udev_device *d = dev;
432
433 if (streq_ptr("partition", udev_device_get_devtype(d)))
434 d = udev_device_get_parent(d);
435
436 if (d) {
437 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
438 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 439 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 440 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
441 goto skip;
442 }
443 }
444 }
445
4c83d994
TG
446 /* needed for renaming netifs */
447 udev_event->rtnl = rtnl;
448
912541b0 449 /* apply rules, create node, symlinks */
adeba500
KS
450 udev_event_execute_rules(udev_event,
451 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 452 manager->properties,
8314de1d 453 manager->rules);
adeba500
KS
454
455 udev_event_execute_run(udev_event,
8314de1d 456 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 457
523c620b
TG
458 if (udev_event->rtnl)
459 /* in case rtnl was initialized */
1c4baffc 460 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 461
912541b0 462 /* apply/restore inotify watch */
bf9bead1 463 if (udev_event->inotify_watch) {
70068602 464 udev_watch_begin(dev->device);
912541b0
KS
465 udev_device_update_db(dev);
466 }
467
3d06f418 468 safe_close(fd_lock);
3ebdb81e 469
912541b0
KS
470 /* send processed event back to libudev listeners */
471 udev_monitor_send_device(worker_monitor, NULL, dev);
472
3ebdb81e 473skip:
4914cb2d 474 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 475
912541b0 476 /* send udevd the result of the event execution */
e237d8cb 477 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 478 if (r < 0)
9a73bd7c 479 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 480 udev_device_get_seqnum(dev));
912541b0
KS
481
482 udev_device_unref(dev);
483 dev = NULL;
484
912541b0
KS
485 /* wait for more device messages from main udevd, or term signal */
486 while (dev == NULL) {
487 struct epoll_event ev[4];
488 int fdcount;
489 int i;
490
8fef0ff2 491 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
492 if (fdcount < 0) {
493 if (errno == EINTR)
494 continue;
6af5e6a4 495 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
496 goto out;
497 }
498
499 for (i = 0; i < fdcount; i++) {
500 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
501 dev = udev_monitor_receive_device(worker_monitor);
502 break;
503 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
504 struct signalfd_siginfo fdsi;
505 ssize_t size;
506
507 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
508 if (size != sizeof(struct signalfd_siginfo))
509 continue;
510 switch (fdsi.ssi_signo) {
511 case SIGTERM:
512 goto out;
513 }
514 }
515 }
516 }
517 }
82063a88 518out:
912541b0 519 udev_device_unref(dev);
e237d8cb 520 manager_free(manager);
baa30fbc 521 log_close();
8b46c3fc 522 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
523 }
524 case -1:
912541b0 525 event->state = EVENT_QUEUED;
56f64d95 526 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
527 break;
528 default:
e03c7cc2
TG
529 {
530 struct worker *worker;
531
c0c6806b 532 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 533 if (r < 0)
e03c7cc2 534 return;
e03c7cc2 535
39c19cf1
TG
536 worker_attach_event(worker, event);
537
1fa2f38f 538 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
539 break;
540 }
e03c7cc2 541 }
7fafc032
KS
542}
543
c0c6806b 544static void event_run(Manager *manager, struct event *event) {
a505965d
TG
545 struct worker *worker;
546 Iterator i;
912541b0 547
c0c6806b
TG
548 assert(manager);
549 assert(event);
550
551 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
552 ssize_t count;
553
554 if (worker->state != WORKER_IDLE)
555 continue;
556
c0c6806b 557 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 558 if (count < 0) {
1fa2f38f
ZJS
559 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
560 worker->pid, count);
cb542e84 561 (void) kill(worker->pid, SIGKILL);
912541b0
KS
562 worker->state = WORKER_KILLED;
563 continue;
564 }
39c19cf1 565 worker_attach_event(worker, event);
912541b0
KS
566 return;
567 }
568
c0c6806b 569 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 570 if (arg_children_max > 1)
c0c6806b 571 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
572 return;
573 }
574
575 /* start new worker and pass initial device */
c0c6806b 576 worker_spawn(manager, event);
1e03b754
KS
577}
578
ecb17862 579static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 580 struct event *event;
cb49a4f2 581 int r;
912541b0 582
ecb17862
TG
583 assert(manager);
584 assert(dev);
585
040e6896
TG
586 /* only one process can add events to the queue */
587 if (manager->pid == 0)
df0ff127 588 manager->pid = getpid_cached();
040e6896 589
df0ff127 590 assert(manager->pid == getpid_cached());
cb49a4f2 591
955d98c9 592 event = new0(struct event, 1);
cb49a4f2
TG
593 if (!event)
594 return -ENOMEM;
912541b0 595
cb49a4f2 596 event->manager = manager;
912541b0 597 event->dev = dev;
6969c349
TG
598 event->dev_kernel = udev_device_shallow_clone(dev);
599 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
600 event->seqnum = udev_device_get_seqnum(dev);
601 event->devpath = udev_device_get_devpath(dev);
602 event->devpath_len = strlen(event->devpath);
603 event->devpath_old = udev_device_get_devpath_old(dev);
604 event->devnum = udev_device_get_devnum(dev);
ea6039a3 605 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
606 event->ifindex = udev_device_get_ifindex(dev);
607
9f6445e3 608 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
609 udev_device_get_action(dev), udev_device_get_subsystem(dev));
610
611 event->state = EVENT_QUEUED;
cb49a4f2 612
40a57716 613 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
614 r = touch("/run/udev/queue");
615 if (r < 0)
616 log_warning_errno(r, "could not touch /run/udev/queue: %m");
617 }
618
40a57716 619 LIST_APPEND(event, manager->events, event);
cb49a4f2 620
912541b0 621 return 0;
fc465079
KS
622}
623
c0c6806b 624static void manager_kill_workers(Manager *manager) {
a505965d
TG
625 struct worker *worker;
626 Iterator i;
1e03b754 627
c0c6806b
TG
628 assert(manager);
629
630 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
631 if (worker->state == WORKER_KILLED)
632 continue;
1e03b754 633
912541b0 634 worker->state = WORKER_KILLED;
cb542e84 635 (void) kill(worker->pid, SIGTERM);
912541b0 636 }
1e03b754
KS
637}
638
e3196993 639/* lookup event for identical, parent, child device */
ecb17862 640static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 641 struct event *loop_event;
912541b0
KS
642 size_t common;
643
644 /* check if queue contains events we depend on */
40a57716 645 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 646 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
647 if (loop_event->seqnum < event->delaying_seqnum)
648 continue;
649
650 /* event we checked earlier still exists, no need to check again */
651 if (loop_event->seqnum == event->delaying_seqnum)
652 return true;
653
654 /* found ourself, no later event can block us */
655 if (loop_event->seqnum >= event->seqnum)
656 break;
657
658 /* check major/minor */
659 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
660 return true;
661
662 /* check network device ifindex */
663 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
664 return true;
665
666 /* check our old name */
090be865 667 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
668 event->delaying_seqnum = loop_event->seqnum;
669 return true;
670 }
671
672 /* compare devpath */
673 common = MIN(loop_event->devpath_len, event->devpath_len);
674
675 /* one devpath is contained in the other? */
676 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
677 continue;
678
679 /* identical device event found */
680 if (loop_event->devpath_len == event->devpath_len) {
681 /* devices names might have changed/swapped in the meantime */
682 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
683 continue;
684 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
685 continue;
686 event->delaying_seqnum = loop_event->seqnum;
687 return true;
688 }
689
690 /* parent device event found */
691 if (event->devpath[common] == '/') {
692 event->delaying_seqnum = loop_event->seqnum;
693 return true;
694 }
695
696 /* child device event found */
697 if (loop_event->devpath[common] == '/') {
698 event->delaying_seqnum = loop_event->seqnum;
699 return true;
700 }
701
702 /* no matching device */
703 continue;
704 }
705
706 return false;
7fafc032
KS
707}
708
693d371d
TG
709static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
710 Manager *manager = userdata;
711
712 assert(manager);
713
714 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
715
716 sd_event_exit(manager->event, -ETIMEDOUT);
717
718 return 1;
719}
720
62d43dac 721static void manager_exit(Manager *manager) {
693d371d
TG
722 uint64_t usec;
723 int r;
62d43dac
TG
724
725 assert(manager);
726
727 manager->exit = true;
728
b79aacbf
TG
729 sd_notify(false,
730 "STOPPING=1\n"
731 "STATUS=Starting shutdown...");
732
62d43dac 733 /* close sources of new events and discard buffered events */
693d371d 734 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 735 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 736
693d371d 737 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 738 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 739
693d371d 740 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 741 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
742
743 /* discard queued events and kill workers */
744 event_queue_cleanup(manager, EVENT_QUEUED);
745 manager_kill_workers(manager);
693d371d 746
3285baa8 747 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 748
3285baa8 749 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
750 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
751 if (r < 0)
752 return;
62d43dac
TG
753}
754
755/* reload requested, HUP signal received, rules changed, builtin changed */
756static void manager_reload(Manager *manager) {
757
758 assert(manager);
759
b79aacbf
TG
760 sd_notify(false,
761 "RELOADING=1\n"
762 "STATUS=Flushing configuration...");
763
62d43dac
TG
764 manager_kill_workers(manager);
765 manager->rules = udev_rules_unref(manager->rules);
2024ed61 766 udev_builtin_exit();
b79aacbf 767
1ef72b55
MS
768 sd_notifyf(false,
769 "READY=1\n"
770 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
771}
772
eca195ec
YW
773static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
774 Manager *manager = userdata;
775
776 assert(manager);
777
778 log_debug("Cleanup idle workers");
779 manager_kill_workers(manager);
780
781 return 1;
782}
783
784static int manager_enable_kill_workers_event(Manager *manager) {
785 int enabled, r;
786
787 assert(manager);
788
789 if (!manager->kill_workers_event)
790 goto create_new;
791
792 r = sd_event_source_get_enabled(manager->kill_workers_event, &enabled);
793 if (r < 0) {
794 log_debug_errno(r, "Failed to query whether event source for killing idle workers is enabled or not, trying to create new event source: %m");
795 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
796 goto create_new;
797 }
798
799 if (enabled == SD_EVENT_ONESHOT)
800 return 0;
801
802 r = sd_event_source_set_time(manager->kill_workers_event, now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC);
803 if (r < 0) {
804 log_debug_errno(r, "Failed to set time to event source for killing idle workers, trying to create new event source: %m");
805 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
806 goto create_new;
807 }
808
809 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_ONESHOT);
810 if (r < 0) {
811 log_debug_errno(r, "Failed to enable event source for killing idle workers, trying to create new event source: %m");
812 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
813 goto create_new;
814 }
815
816 return 0;
817
818create_new:
819 r = sd_event_add_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
820 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC, on_kill_workers_event, manager);
821 if (r < 0)
822 return log_warning_errno(r, "Failed to create timer event for killing idle workers: %m");
823
824 return 0;
825}
826
827static int manager_disable_kill_workers_event(Manager *manager) {
828 int r;
829
830 if (!manager->kill_workers_event)
831 return 0;
832
833 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
834 if (r < 0)
835 return log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
836
837 return 0;
838}
839
c0c6806b 840static void event_queue_start(Manager *manager) {
40a57716 841 struct event *event;
693d371d 842 usec_t usec;
8ab44e3f 843
c0c6806b
TG
844 assert(manager);
845
40a57716 846 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
847 manager->exit || manager->stop_exec_queue)
848 return;
849
3285baa8 850 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
851 /* check for changed config, every 3 seconds at most */
852 if (manager->last_usec == 0 ||
853 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
854 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 855 udev_builtin_validate())
38a03f06 856 manager_reload(manager);
693d371d 857
38a03f06 858 manager->last_usec = usec;
7c4c7e89
TG
859 }
860
eca195ec
YW
861 (void) manager_disable_kill_workers_event(manager);
862
2024ed61 863 udev_builtin_init();
7c4c7e89
TG
864
865 if (!manager->rules) {
2024ed61 866 manager->rules = udev_rules_new(arg_resolve_names);
7c4c7e89
TG
867 if (!manager->rules)
868 return;
869 }
870
40a57716 871 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
872 if (event->state != EVENT_QUEUED)
873 continue;
0bc74ea7 874
912541b0 875 /* do not start event if parent or child event is still running */
ecb17862 876 if (is_devpath_busy(manager, event))
912541b0 877 continue;
fc465079 878
c0c6806b 879 event_run(manager, event);
912541b0 880 }
1e03b754
KS
881}
882
ecb17862 883static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 884 struct event *event, *tmp;
ff2c503d 885
40a57716 886 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
887 if (match_type != EVENT_UNDEF && match_type != event->state)
888 continue;
ff2c503d 889
c6aa11f2 890 event_free(event);
912541b0 891 }
ff2c503d
KS
892}
893
e82e8fa5 894static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
895 Manager *manager = userdata;
896
897 assert(manager);
898
912541b0
KS
899 for (;;) {
900 struct worker_message msg;
979558f3
TG
901 struct iovec iovec = {
902 .iov_base = &msg,
903 .iov_len = sizeof(msg),
904 };
905 union {
906 struct cmsghdr cmsghdr;
907 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
908 } control = {};
909 struct msghdr msghdr = {
910 .msg_iov = &iovec,
911 .msg_iovlen = 1,
912 .msg_control = &control,
913 .msg_controllen = sizeof(control),
914 };
915 struct cmsghdr *cmsg;
912541b0 916 ssize_t size;
979558f3 917 struct ucred *ucred = NULL;
a505965d 918 struct worker *worker;
912541b0 919
e82e8fa5 920 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 921 if (size < 0) {
738a7907
TG
922 if (errno == EINTR)
923 continue;
924 else if (errno == EAGAIN)
925 /* nothing more to read */
926 break;
979558f3 927
e82e8fa5 928 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
929 } else if (size != sizeof(struct worker_message)) {
930 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 931 continue;
979558f3
TG
932 }
933
2a1288ff 934 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
935 if (cmsg->cmsg_level == SOL_SOCKET &&
936 cmsg->cmsg_type == SCM_CREDENTIALS &&
937 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
938 ucred = (struct ucred*) CMSG_DATA(cmsg);
939 }
940
941 if (!ucred || ucred->pid <= 0) {
942 log_warning_errno(EIO, "ignoring worker message without valid PID");
943 continue;
944 }
912541b0
KS
945
946 /* lookup worker who sent the signal */
4a0b58c4 947 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
948 if (!worker) {
949 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
950 continue;
912541b0 951 }
c0bbfd72 952
a505965d
TG
953 if (worker->state != WORKER_KILLED)
954 worker->state = WORKER_IDLE;
955
956 /* worker returned */
957 event_free(worker->event);
912541b0 958 }
e82e8fa5 959
8302fe5a
TG
960 /* we have free workers, try to schedule events */
961 event_queue_start(manager);
962
e82e8fa5
TG
963 return 1;
964}
965
966static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 967 Manager *manager = userdata;
e82e8fa5
TG
968 struct udev_device *dev;
969 int r;
970
c0c6806b 971 assert(manager);
e82e8fa5 972
c0c6806b 973 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
974 if (dev) {
975 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 976 r = event_queue_insert(manager, dev);
e82e8fa5
TG
977 if (r < 0)
978 udev_device_unref(dev);
8302fe5a
TG
979 else
980 /* we have fresh events, try to schedule them */
981 event_queue_start(manager);
e82e8fa5
TG
982 }
983
984 return 1;
88f4b648
KS
985}
986
3b47c739 987/* receive the udevd message from userspace */
e82e8fa5 988static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 989 Manager *manager = userdata;
8e766630
LP
990 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
991 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 992 const char *str;
9b5150b6 993 int i, r;
912541b0 994
c0c6806b 995 assert(manager);
e4f66b77 996
c0c6806b 997 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 998 if (!ctrl_conn)
e82e8fa5 999 return 1;
912541b0
KS
1000
1001 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 1002 if (!ctrl_msg)
e82e8fa5 1003 return 1;
912541b0
KS
1004
1005 i = udev_ctrl_get_set_log_level(ctrl_msg);
1006 if (i >= 0) {
ed14edc0 1007 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 1008 log_set_max_level(i);
c0c6806b 1009 manager_kill_workers(manager);
912541b0
KS
1010 }
1011
1012 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 1013 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 1014 manager->stop_exec_queue = true;
912541b0
KS
1015 }
1016
1017 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 1018 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 1019 manager->stop_exec_queue = false;
8302fe5a 1020 event_queue_start(manager);
912541b0
KS
1021 }
1022
1023 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 1024 log_debug("udevd message (RELOAD) received");
62d43dac 1025 manager_reload(manager);
912541b0
KS
1026 }
1027
1028 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
1029 if (str) {
1030 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
1031 char *eq;
1032
1033 eq = strchr(str, '=');
1034 if (!eq) {
1035 log_error("Invalid key format '%s'", str);
1036 return 1;
1037 }
1038
1039 key = strndup(str, eq - str);
1040 if (!key) {
1041 log_oom();
1042 return 1;
1043 }
1044
1045 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1046
1047 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1048 if (r < 0) {
1049 log_oom();
1050 return 1;
912541b0 1051 }
9b5150b6
YW
1052
1053 eq++;
1054 if (!isempty(eq)) {
1055 log_debug("udevd message (ENV) received, unset '%s'", key);
1056
1057 r = hashmap_put(manager->properties, key, NULL);
1058 if (r < 0) {
1059 log_oom();
1060 return 1;
1061 }
1062 } else {
1063 val = strdup(eq);
1064 if (!val) {
1065 log_oom();
1066 return 1;
1067 }
1068
1069 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
1070
1071 r = hashmap_put(manager->properties, key, val);
1072 if (r < 0) {
1073 log_oom();
1074 return 1;
1075 }
1076 }
1077
1078 key = val = NULL;
c0c6806b 1079 manager_kill_workers(manager);
912541b0
KS
1080 }
1081
1082 i = udev_ctrl_get_set_children_max(ctrl_msg);
1083 if (i >= 0) {
9f6445e3 1084 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1085 arg_children_max = i;
1ef72b55
MS
1086
1087 (void) sd_notifyf(false,
1088 "READY=1\n"
1089 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1090 }
1091
cb49a4f2 1092 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1093 log_debug("udevd message (SYNC) received");
912541b0
KS
1094
1095 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1096 log_debug("udevd message (EXIT) received");
62d43dac 1097 manager_exit(manager);
c0c6806b
TG
1098 /* keep reference to block the client until we exit
1099 TODO: deal with several blocking exit requests */
1100 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1101 }
e4f66b77 1102
e82e8fa5 1103 return 1;
88f4b648 1104}
4a231017 1105
70068602
YW
1106static int synthesize_change(sd_device *dev) {
1107 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1108 char filename[PATH_MAX];
f3a740a5 1109 int r;
edd32000 1110
70068602
YW
1111 r = sd_device_get_subsystem(dev, &subsystem);
1112 if (r < 0)
1113 return r;
1114
1115 r = sd_device_get_sysname(dev, &sysname);
1116 if (r < 0)
1117 return r;
1118
1119 r = sd_device_get_devname(dev, &devname);
1120 if (r < 0)
1121 return r;
1122
1123 r = sd_device_get_syspath(dev, &syspath);
1124 if (r < 0)
1125 return r;
1126
1127 r = sd_device_get_devtype(dev, &devtype);
1128 if (r < 0)
1129 return r;
1130
1131 if (streq_ptr("block", subsystem) &&
1132 streq_ptr("disk", devtype) &&
1133 !startswith(sysname, "dm-")) {
1134 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1135 bool part_table_read = false, has_partitions = false;
1136 sd_device *d;
ede34445 1137 int fd;
f3a740a5 1138
ede34445 1139 /*
e9fc29f4
KS
1140 * Try to re-read the partition table. This only succeeds if
1141 * none of the devices is busy. The kernel returns 0 if no
1142 * partition table is found, and we will not get an event for
1143 * the disk.
ede34445 1144 */
70068602 1145 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1146 if (fd >= 0) {
02ba8fb3
KS
1147 r = flock(fd, LOCK_EX|LOCK_NB);
1148 if (r >= 0)
1149 r = ioctl(fd, BLKRRPART, 0);
1150
ede34445
KS
1151 close(fd);
1152 if (r >= 0)
e9fc29f4 1153 part_table_read = true;
ede34445
KS
1154 }
1155
e9fc29f4 1156 /* search for partitions */
70068602 1157 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1158 if (r < 0)
1159 return r;
1160
70068602 1161 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1162 if (r < 0)
1163 return r;
1164
70068602 1165 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1166 if (r < 0)
1167 return r;
e9fc29f4 1168
70068602
YW
1169 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1170 if (r < 0)
1171 return r;
e9fc29f4 1172
70068602
YW
1173 FOREACH_DEVICE(e, d) {
1174 const char *t;
e9fc29f4 1175
70068602
YW
1176 if (sd_device_get_devtype(d, &t) < 0 ||
1177 !streq("partition", t))
e9fc29f4
KS
1178 continue;
1179
1180 has_partitions = true;
1181 break;
1182 }
1183
1184 /*
1185 * We have partitions and re-read the table, the kernel already sent
1186 * out a "change" event for the disk, and "remove/add" for all
1187 * partitions.
1188 */
1189 if (part_table_read && has_partitions)
1190 return 0;
1191
1192 /*
1193 * We have partitions but re-reading the partition table did not
1194 * work, synthesize "change" for the disk and all partitions.
1195 */
70068602
YW
1196 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1197 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1198 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1199
70068602
YW
1200 FOREACH_DEVICE(e, d) {
1201 const char *t, *n, *s;
f3a740a5 1202
70068602
YW
1203 if (sd_device_get_devtype(d, &t) < 0 ||
1204 !streq("partition", t))
f3a740a5
KS
1205 continue;
1206
70068602
YW
1207 if (sd_device_get_devname(d, &n) < 0 ||
1208 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1209 continue;
1210
70068602
YW
1211 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1212 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1213 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1214 }
ede34445
KS
1215
1216 return 0;
f3a740a5
KS
1217 }
1218
70068602
YW
1219 log_debug("Device %s is closed, synthesising 'change'", devname);
1220 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1221 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1222
f3a740a5 1223 return 0;
edd32000
KS
1224}
1225
e82e8fa5 1226static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1227 Manager *manager = userdata;
0254e944 1228 union inotify_event_buffer buffer;
f7c1ad4f
LP
1229 struct inotify_event *e;
1230 ssize_t l;
912541b0 1231
c0c6806b 1232 assert(manager);
e82e8fa5 1233
eca195ec
YW
1234 (void) manager_disable_kill_workers_event(manager);
1235
e82e8fa5 1236 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1237 if (l < 0) {
3742095b 1238 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1239 return 1;
912541b0 1240
f7c1ad4f 1241 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1242 }
1243
f7c1ad4f 1244 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1245 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1246 const char *devnode;
1247
7fe3324c 1248 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1249 continue;
912541b0 1250
70068602 1251 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1252 continue;
912541b0 1253
7fe3324c 1254 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1255 if (e->mask & IN_CLOSE_WRITE)
edd32000 1256 synthesize_change(dev);
da143134 1257 else if (e->mask & IN_IGNORED)
2024ed61 1258 udev_watch_end(dev);
912541b0
KS
1259 }
1260
e82e8fa5 1261 return 1;
bd284db1
SJR
1262}
1263
0561329d 1264static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1265 Manager *manager = userdata;
1266
1267 assert(manager);
1268
62d43dac 1269 manager_exit(manager);
912541b0 1270
e82e8fa5
TG
1271 return 1;
1272}
912541b0 1273
0561329d 1274static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1275 Manager *manager = userdata;
1276
1277 assert(manager);
1278
62d43dac 1279 manager_reload(manager);
912541b0 1280
e82e8fa5
TG
1281 return 1;
1282}
912541b0 1283
e82e8fa5 1284static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1285 Manager *manager = userdata;
1286
1287 assert(manager);
1288
e82e8fa5
TG
1289 for (;;) {
1290 pid_t pid;
1291 int status;
1292 struct worker *worker;
d1317d02 1293
e82e8fa5
TG
1294 pid = waitpid(-1, &status, WNOHANG);
1295 if (pid <= 0)
f29328d6 1296 break;
e82e8fa5 1297
4a0b58c4 1298 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1299 if (!worker) {
1300 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1301 continue;
912541b0 1302 }
e82e8fa5
TG
1303
1304 if (WIFEXITED(status)) {
1305 if (WEXITSTATUS(status) == 0)
1306 log_debug("worker ["PID_FMT"] exited", pid);
1307 else
1308 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1309 } else if (WIFSIGNALED(status)) {
76341acc 1310 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1311 } else if (WIFSTOPPED(status)) {
1312 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1313 continue;
e82e8fa5
TG
1314 } else if (WIFCONTINUED(status)) {
1315 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1316 continue;
e82e8fa5
TG
1317 } else
1318 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1319
05e6d9c6
YW
1320 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1321 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1322 /* delete state from disk */
1323 udev_device_delete_db(worker->event->dev);
1324 udev_device_tag_index(worker->event->dev, NULL, false);
1325 /* forward kernel event without amending it */
1326 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1327 }
1328
1329 worker_free(worker);
912541b0 1330 }
e82e8fa5 1331
8302fe5a
TG
1332 /* we can start new workers, try to schedule events */
1333 event_queue_start(manager);
1334
eca195ec
YW
1335 /* Disable unnecessary cleanup event */
1336 if (hashmap_isempty(manager->workers) && manager->kill_workers_event)
1337 (void) sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
1338
e82e8fa5 1339 return 1;
f27125f9 1340}
1341
693d371d
TG
1342static int on_post(sd_event_source *s, void *userdata) {
1343 Manager *manager = userdata;
693d371d
TG
1344
1345 assert(manager);
1346
b6107f01
YW
1347 if (!LIST_IS_EMPTY(manager->events))
1348 return 1;
1349
1350 /* There are no pending events. Let's cleanup idle process. */
1351
1352 if (!hashmap_isempty(manager->workers)) {
1353 /* There are idle workers */
eca195ec 1354 (void) manager_enable_kill_workers_event(manager);
b6107f01 1355 return 1;
693d371d
TG
1356 }
1357
b6107f01
YW
1358 /* There are no idle workers. */
1359
1360 if (manager->exit)
1361 return sd_event_exit(manager->event, 0);
1362
1363 if (manager->cgroup)
1364 /* cleanup possible left-over processes in our cgroup */
1365 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1366
693d371d
TG
1367 return 1;
1368}
1369
fcff1e72
TG
1370static int listen_fds(int *rctrl, int *rnetlink) {
1371 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1372 int fd, n, r;
912541b0 1373
fcff1e72
TG
1374 assert(rctrl);
1375 assert(rnetlink);
1376
912541b0 1377 n = sd_listen_fds(true);
fcff1e72
TG
1378 if (n < 0)
1379 return n;
912541b0
KS
1380
1381 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1382 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1383 if (ctrl_fd >= 0)
1384 return -EINVAL;
1385 ctrl_fd = fd;
912541b0
KS
1386 continue;
1387 }
1388
1389 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1390 if (netlink_fd >= 0)
1391 return -EINVAL;
1392 netlink_fd = fd;
912541b0
KS
1393 continue;
1394 }
1395
fcff1e72 1396 return -EINVAL;
912541b0
KS
1397 }
1398
f59118ec 1399 if (ctrl_fd < 0) {
8e766630 1400 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1401
2024ed61 1402 ctrl = udev_ctrl_new();
f59118ec
TG
1403 if (!ctrl)
1404 return log_error_errno(EINVAL, "error initializing udev control socket");
1405
1406 r = udev_ctrl_enable_receiving(ctrl);
1407 if (r < 0)
1408 return log_error_errno(EINVAL, "error binding udev control socket");
1409
1410 fd = udev_ctrl_get_fd(ctrl);
1411 if (fd < 0)
1412 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1413
f59118ec
TG
1414 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1415 if (ctrl_fd < 0)
1416 return log_error_errno(errno, "could not dup ctrl fd: %m");
1417 }
1418
1419 if (netlink_fd < 0) {
8e766630 1420 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1421
2024ed61 1422 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1423 if (!monitor)
1424 return log_error_errno(EINVAL, "error initializing netlink socket");
1425
1426 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1427
1428 r = udev_monitor_enable_receiving(monitor);
1429 if (r < 0)
1430 return log_error_errno(EINVAL, "error binding netlink socket");
1431
1432 fd = udev_monitor_get_fd(monitor);
1433 if (fd < 0)
1434 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1435
1436 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1437 if (netlink_fd < 0)
f59118ec
TG
1438 return log_error_errno(errno, "could not dup netlink fd: %m");
1439 }
fcff1e72
TG
1440
1441 *rctrl = ctrl_fd;
1442 *rnetlink = netlink_fd;
912541b0 1443
912541b0 1444 return 0;
7459bcdc
KS
1445}
1446
e6f86cac 1447/*
3f85ef0f 1448 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1449 * udev.log_priority=<level> syslog priority
1450 * udev.children_max=<number of workers> events are fully serialized if set to 1
1451 * udev.exec_delay=<number of seconds> delay execution of every executed program
1452 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1453 */
96287a49 1454static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1455 int r = 0;
e6f86cac 1456
614a823c 1457 assert(key);
e6f86cac 1458
614a823c
TG
1459 if (!value)
1460 return 0;
e6f86cac 1461
1d84ad94
LP
1462 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1463
1464 if (proc_cmdline_value_missing(key, value))
1465 return 0;
1466
92e72467
ZJS
1467 r = util_log_priority(value);
1468 if (r >= 0)
1469 log_set_max_level(r);
1d84ad94
LP
1470
1471 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1472
1473 if (proc_cmdline_value_missing(key, value))
1474 return 0;
1475
92e72467
ZJS
1476 r = safe_atou64(value, &arg_event_timeout_usec);
1477 if (r >= 0) {
1478 arg_event_timeout_usec *= USEC_PER_SEC;
1479 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1480 }
1d84ad94
LP
1481
1482 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1483
1484 if (proc_cmdline_value_missing(key, value))
1485 return 0;
1486
020328e1 1487 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1488
1489 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1490
1491 if (proc_cmdline_value_missing(key, value))
1492 return 0;
1493
614a823c 1494 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1495
1496 } else if (startswith(key, "udev."))
92e72467 1497 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1498
92e72467
ZJS
1499 if (r < 0)
1500 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1501
614a823c 1502 return 0;
e6f86cac
KS
1503}
1504
37ec0fdd
LP
1505static int help(void) {
1506 _cleanup_free_ char *link = NULL;
1507 int r;
1508
1509 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1510 if (r < 0)
1511 return log_oom();
1512
ed216e1f
TG
1513 printf("%s [OPTIONS...]\n\n"
1514 "Manages devices.\n\n"
5ac0162c 1515 " -h --help Print this message\n"
2d19c17e
MF
1516 " -V --version Print version of the program\n"
1517 " -d --daemon Detach and run in the background\n"
1518 " -D --debug Enable debug output\n"
1519 " -c --children-max=INT Set maximum number of workers\n"
1520 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1521 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1522 " -N --resolve-names=early|late|never\n"
5ac0162c 1523 " When to resolve users and groups\n"
37ec0fdd
LP
1524 "\nSee the %s for details.\n"
1525 , program_invocation_short_name
1526 , link
1527 );
1528
1529 return 0;
ed216e1f
TG
1530}
1531
bba7a484 1532static int parse_argv(int argc, char *argv[]) {
912541b0 1533 static const struct option options[] = {
bba7a484
TG
1534 { "daemon", no_argument, NULL, 'd' },
1535 { "debug", no_argument, NULL, 'D' },
1536 { "children-max", required_argument, NULL, 'c' },
1537 { "exec-delay", required_argument, NULL, 'e' },
1538 { "event-timeout", required_argument, NULL, 't' },
1539 { "resolve-names", required_argument, NULL, 'N' },
1540 { "help", no_argument, NULL, 'h' },
1541 { "version", no_argument, NULL, 'V' },
912541b0
KS
1542 {}
1543 };
689a97f5 1544
bba7a484 1545 int c;
689a97f5 1546
bba7a484
TG
1547 assert(argc >= 0);
1548 assert(argv);
912541b0 1549
e14b6f21 1550 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1551 int r;
912541b0 1552
bba7a484 1553 switch (c) {
912541b0 1554
912541b0 1555 case 'd':
bba7a484 1556 arg_daemonize = true;
912541b0
KS
1557 break;
1558 case 'c':
020328e1 1559 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1560 if (r < 0)
1561 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1562 break;
1563 case 'e':
6f5cf8a8
TG
1564 r = safe_atoi(optarg, &arg_exec_delay);
1565 if (r < 0)
1566 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1567 break;
9719859c 1568 case 't':
f1e8664e
TG
1569 r = safe_atou64(optarg, &arg_event_timeout_usec);
1570 if (r < 0)
65fea570 1571 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1572 else {
1573 arg_event_timeout_usec *= USEC_PER_SEC;
1574 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1575 }
9719859c 1576 break;
912541b0 1577 case 'D':
bba7a484 1578 arg_debug = true;
912541b0
KS
1579 break;
1580 case 'N':
090be865 1581 if (streq(optarg, "early")) {
bba7a484 1582 arg_resolve_names = 1;
090be865 1583 } else if (streq(optarg, "late")) {
bba7a484 1584 arg_resolve_names = 0;
090be865 1585 } else if (streq(optarg, "never")) {
bba7a484 1586 arg_resolve_names = -1;
912541b0 1587 } else {
9f6445e3 1588 log_error("resolve-names must be early, late or never");
bba7a484 1589 return 0;
912541b0
KS
1590 }
1591 break;
1592 case 'h':
37ec0fdd 1593 return help();
912541b0 1594 case 'V':
948aaa7c 1595 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1596 return 0;
1597 case '?':
1598 return -EINVAL;
912541b0 1599 default:
bba7a484
TG
1600 assert_not_reached("Unhandled option");
1601
912541b0
KS
1602 }
1603 }
1604
bba7a484
TG
1605 return 1;
1606}
1607
b7f74dd4 1608static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1609 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1610 int r, fd_worker;
c0c6806b
TG
1611
1612 assert(ret);
11b1dd8c
TG
1613 assert(fd_ctrl >= 0);
1614 assert(fd_uevent >= 0);
c0c6806b
TG
1615
1616 manager = new0(Manager, 1);
1617 if (!manager)
1618 return log_oom();
1619
e237d8cb
TG
1620 manager->fd_inotify = -1;
1621 manager->worker_watch[WRITE_END] = -1;
1622 manager->worker_watch[READ_END] = -1;
1623
2024ed61 1624 udev_builtin_init();
b2d21d93 1625
2024ed61 1626 manager->rules = udev_rules_new(arg_resolve_names);
ecb17862
TG
1627 if (!manager->rules)
1628 return log_error_errno(ENOMEM, "error reading rules");
1629
40a57716 1630 LIST_HEAD_INIT(manager->events);
ecb17862 1631
c26d1879
TG
1632 manager->cgroup = cgroup;
1633
2024ed61 1634 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1635 if (!manager->ctrl)
1636 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1637
2024ed61 1638 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1639 if (!manager->monitor)
1640 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1641
1642 /* unnamed socket from workers to the main daemon */
1643 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1644 if (r < 0)
1645 return log_error_errno(errno, "error creating socketpair: %m");
1646
693d371d 1647 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1648
2ff48e98 1649 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1650 if (r < 0)
2ff48e98 1651 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1652
b7759e04
YW
1653 r = udev_watch_init();
1654 if (r < 0)
1655 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1656 manager->fd_inotify = r;
e237d8cb 1657
2024ed61 1658 udev_watch_restore();
e237d8cb
TG
1659
1660 /* block and listen to all signals on signalfd */
72c0a2c2 1661 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1662
49f997f3
TG
1663 r = sd_event_default(&manager->event);
1664 if (r < 0)
709f6e46 1665 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1666
693d371d
TG
1667 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1668 if (r < 0)
1669 return log_error_errno(r, "error creating sigint event source: %m");
1670
1671 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1672 if (r < 0)
1673 return log_error_errno(r, "error creating sigterm event source: %m");
1674
1675 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1676 if (r < 0)
1677 return log_error_errno(r, "error creating sighup event source: %m");
1678
1679 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1680 if (r < 0)
1681 return log_error_errno(r, "error creating sigchld event source: %m");
1682
1683 r = sd_event_set_watchdog(manager->event, true);
1684 if (r < 0)
1685 return log_error_errno(r, "error creating watchdog event source: %m");
1686
11b1dd8c 1687 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1688 if (r < 0)
1689 return log_error_errno(r, "error creating ctrl event source: %m");
1690
1691 /* This needs to be after the inotify and uevent handling, to make sure
1692 * that the ping is send back after fully processing the pending uevents
1693 * (including the synthetic ones we may create due to inotify events).
1694 */
1695 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1696 if (r < 0)
1697 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1698
1699 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1700 if (r < 0)
1701 return log_error_errno(r, "error creating inotify event source: %m");
1702
11b1dd8c 1703 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1704 if (r < 0)
1705 return log_error_errno(r, "error creating uevent event source: %m");
1706
1707 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1708 if (r < 0)
1709 return log_error_errno(r, "error creating worker event source: %m");
1710
1711 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1712 if (r < 0)
1713 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1714
1cc6c93a 1715 *ret = TAKE_PTR(manager);
11b1dd8c 1716
86c3bece 1717 return 0;
c0c6806b
TG
1718}
1719
077fc5e2 1720static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1721 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1722 int r;
1723
1724 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1725 if (r < 0) {
1726 r = log_error_errno(r, "failed to allocate manager object: %m");
1727 goto exit;
1728 }
1729
1730 r = udev_rules_apply_static_dev_perms(manager->rules);
1731 if (r < 0)
1732 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1733
1ef72b55
MS
1734 (void) sd_notifyf(false,
1735 "READY=1\n"
1736 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1737
1738 r = sd_event_loop(manager->event);
1739 if (r < 0) {
1740 log_error_errno(r, "event loop failed: %m");
1741 goto exit;
1742 }
1743
1744 sd_event_get_exit_code(manager->event, &r);
1745
1746exit:
1747 sd_notify(false,
1748 "STOPPING=1\n"
1749 "STATUS=Shutting down...");
1750 if (manager)
1751 udev_ctrl_cleanup(manager->ctrl);
1752 return r;
1753}
1754
1755int main(int argc, char *argv[]) {
c26d1879 1756 _cleanup_free_ char *cgroup = NULL;
efa1606e 1757 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1758 int r;
bba7a484 1759
bba7a484 1760 log_set_target(LOG_TARGET_AUTO);
b237a168 1761 udev_parse_config();
bba7a484
TG
1762 log_parse_environment();
1763 log_open();
1764
bba7a484
TG
1765 r = parse_argv(argc, argv);
1766 if (r <= 0)
1767 goto exit;
1768
1d84ad94 1769 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1770 if (r < 0)
1771 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1772
78d3e041
KS
1773 if (arg_debug) {
1774 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1775 log_set_max_level(LOG_DEBUG);
78d3e041 1776 }
bba7a484 1777
fba868fa
LP
1778 r = must_be_root();
1779 if (r < 0)
912541b0 1780 goto exit;
912541b0 1781
712cebf1
TG
1782 if (arg_children_max == 0) {
1783 cpu_set_t cpu_set;
e438c57a 1784 unsigned long mem_limit;
ebc164ef 1785
712cebf1 1786 arg_children_max = 8;
d457ff83 1787
ece174c5 1788 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1789 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1790
e438c57a
MW
1791 mem_limit = physical_memory() / (128LU*1024*1024);
1792 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1793
712cebf1 1794 log_debug("set children_max to %u", arg_children_max);
d457ff83 1795 }
912541b0 1796
712cebf1
TG
1797 /* set umask before creating any file/directory */
1798 r = chdir("/");
1799 if (r < 0) {
1800 r = log_error_errno(errno, "could not change dir to /: %m");
1801 goto exit;
1802 }
194bbe33 1803
712cebf1 1804 umask(022);
912541b0 1805
c3dacc8b 1806 r = mac_selinux_init();
712cebf1
TG
1807 if (r < 0) {
1808 log_error_errno(r, "could not initialize labelling: %m");
1809 goto exit;
912541b0
KS
1810 }
1811
dae8b82e
ZJS
1812 r = mkdir_errno_wrapper("/run/udev", 0755);
1813 if (r < 0 && r != -EEXIST) {
1814 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1815 goto exit;
1816 }
1817
03cfe0d5 1818 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1819
c26d1879
TG
1820 if (getppid() == 1) {
1821 /* get our own cgroup, we regularly kill everything udev has left behind
1822 we only do this on systemd systems, and only if we are directly spawned
1823 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1824 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1825 if (r < 0) {
a2d61f07 1826 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1827 log_debug_errno(r, "did not find dedicated cgroup: %m");
1828 else
1829 log_warning_errno(r, "failed to get cgroup: %m");
1830 }
c26d1879
TG
1831 }
1832
b7f74dd4
TG
1833 r = listen_fds(&fd_ctrl, &fd_uevent);
1834 if (r < 0) {
1835 r = log_error_errno(r, "could not listen on fds: %m");
1836 goto exit;
1837 }
1838
bba7a484 1839 if (arg_daemonize) {
912541b0 1840 pid_t pid;
912541b0 1841
948aaa7c 1842 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1843
40e749b5 1844 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1845 if (log_get_max_level() < LOG_DEBUG) {
1846 r = make_null_stdio();
1847 if (r < 0)
1848 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1849 }
1850
912541b0
KS
1851 pid = fork();
1852 switch (pid) {
1853 case 0:
1854 break;
1855 case -1:
6af5e6a4 1856 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1857 goto exit;
1858 default:
f53d1fcd
TG
1859 mac_selinux_finish();
1860 log_close();
1861 _exit(EXIT_SUCCESS);
912541b0
KS
1862 }
1863
1864 setsid();
1865
76cdddfb
YW
1866 r = set_oom_score_adjust(-1000);
1867 if (r < 0)
1868 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1869 }
912541b0 1870
077fc5e2 1871 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1872
53921bfa 1873exit:
cc56fafe 1874 mac_selinux_finish();
baa30fbc 1875 log_close();
6af5e6a4 1876 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1877}