]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev-event: make udev_event_new() take exec_delay
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
07a26e42 55#include "udev-builtin.h"
7d68eb1b 56#include "udev-ctrl.h"
618234a5 57#include "udev-util.h"
70068602 58#include "udev-watch.h"
618234a5 59#include "udev.h"
ee104e11 60#include "user-util.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
64static int arg_resolve_names = 1;
020328e1 65static unsigned arg_children_max;
bba7a484
TG
66static int arg_exec_delay;
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
693d371d 71 sd_event *event;
c0c6806b 72 Hashmap *workers;
40a57716 73 LIST_HEAD(struct event, events);
c26d1879 74 const char *cgroup;
cb49a4f2 75 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 76
ecb17862 77 struct udev_rules *rules;
9b5150b6 78 Hashmap *properties;
c0c6806b
TG
79
80 struct udev_monitor *monitor;
81 struct udev_ctrl *ctrl;
82 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 83 int fd_inotify;
e237d8cb
TG
84 int worker_watch[2];
85
693d371d
TG
86 sd_event_source *ctrl_event;
87 sd_event_source *uevent_event;
88 sd_event_source *inotify_event;
eca195ec 89 sd_event_source *kill_workers_event;
693d371d 90
7c4c7e89
TG
91 usec_t last_usec;
92
c0c6806b 93 bool stop_exec_queue:1;
c0c6806b
TG
94 bool exit:1;
95} Manager;
1e03b754 96
1e03b754 97enum event_state {
912541b0
KS
98 EVENT_UNDEF,
99 EVENT_QUEUED,
100 EVENT_RUNNING,
1e03b754
KS
101};
102
103struct event {
40a57716 104 LIST_FIELDS(struct event, event);
cb49a4f2 105 Manager *manager;
912541b0 106 struct udev_device *dev;
6969c349 107 struct udev_device *dev_kernel;
c6aa11f2 108 struct worker *worker;
912541b0 109 enum event_state state;
912541b0
KS
110 unsigned long long int delaying_seqnum;
111 unsigned long long int seqnum;
112 const char *devpath;
113 size_t devpath_len;
114 const char *devpath_old;
115 dev_t devnum;
912541b0 116 int ifindex;
ea6039a3 117 bool is_block;
693d371d
TG
118 sd_event_source *timeout_warning;
119 sd_event_source *timeout;
1e03b754
KS
120};
121
ecb17862 122static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 123
1e03b754 124enum worker_state {
912541b0
KS
125 WORKER_UNDEF,
126 WORKER_RUNNING,
127 WORKER_IDLE,
128 WORKER_KILLED,
1e03b754
KS
129};
130
131struct worker {
c0c6806b 132 Manager *manager;
912541b0
KS
133 pid_t pid;
134 struct udev_monitor *monitor;
135 enum worker_state state;
136 struct event *event;
1e03b754
KS
137};
138
139/* passed from worker to main process */
140struct worker_message {
1e03b754
KS
141};
142
c6aa11f2 143static void event_free(struct event *event) {
cb49a4f2
TG
144 int r;
145
c6aa11f2
TG
146 if (!event)
147 return;
40a57716 148 assert(event->manager);
c6aa11f2 149
40a57716 150 LIST_REMOVE(event, event->manager->events, event);
912541b0 151 udev_device_unref(event->dev);
6969c349 152 udev_device_unref(event->dev_kernel);
c6aa11f2 153
693d371d
TG
154 sd_event_source_unref(event->timeout_warning);
155 sd_event_source_unref(event->timeout);
156
c6aa11f2
TG
157 if (event->worker)
158 event->worker->event = NULL;
159
40a57716 160 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 161 /* only clean up the queue from the process that created it */
df0ff127 162 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
163 r = unlink("/run/udev/queue");
164 if (r < 0)
165 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
166 }
167 }
168
912541b0 169 free(event);
aa8734ff 170}
7a770250 171
c6aa11f2
TG
172static void worker_free(struct worker *worker) {
173 if (!worker)
174 return;
bc113de9 175
c0c6806b
TG
176 assert(worker->manager);
177
4a0b58c4 178 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 179 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
180 event_free(worker->event);
181
c6aa11f2 182 free(worker);
ff2c503d
KS
183}
184
c0c6806b 185static void manager_workers_free(Manager *manager) {
a505965d
TG
186 struct worker *worker;
187 Iterator i;
ff2c503d 188
c0c6806b
TG
189 assert(manager);
190
191 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 192 worker_free(worker);
a505965d 193
c0c6806b 194 manager->workers = hashmap_free(manager->workers);
fc465079
KS
195}
196
c0c6806b 197static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
198 _cleanup_free_ struct worker *worker = NULL;
199 int r;
3a19b32a
TG
200
201 assert(ret);
c0c6806b 202 assert(manager);
3a19b32a
TG
203 assert(worker_monitor);
204 assert(pid > 1);
205
206 worker = new0(struct worker, 1);
207 if (!worker)
208 return -ENOMEM;
209
c0c6806b 210 worker->manager = manager;
3a19b32a
TG
211 /* close monitor, but keep address around */
212 udev_monitor_disconnect(worker_monitor);
213 worker->monitor = udev_monitor_ref(worker_monitor);
214 worker->pid = pid;
a505965d 215
c0c6806b 216 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
217 if (r < 0)
218 return r;
219
4a0b58c4 220 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
221 if (r < 0)
222 return r;
223
ae2a15bc 224 *ret = TAKE_PTR(worker);
3a19b32a
TG
225
226 return 0;
227}
228
4fa4d885
TG
229static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
230 struct event *event = userdata;
231
232 assert(event);
233 assert(event->worker);
234
235 kill_and_sigcont(event->worker->pid, SIGKILL);
236 event->worker->state = WORKER_KILLED;
237
238 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
239
240 return 1;
241}
242
243static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
244 struct event *event = userdata;
245
246 assert(event);
247
248 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
249
250 return 1;
251}
252
39c19cf1 253static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
254 sd_event *e;
255 uint64_t usec;
693d371d 256
c6aa11f2 257 assert(worker);
693d371d 258 assert(worker->manager);
c6aa11f2
TG
259 assert(event);
260 assert(!event->worker);
261 assert(!worker->event);
262
39c19cf1 263 worker->state = WORKER_RUNNING;
39c19cf1
TG
264 worker->event = event;
265 event->state = EVENT_RUNNING;
c6aa11f2 266 event->worker = worker;
693d371d
TG
267
268 e = worker->manager->event;
269
3285baa8 270 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 271
3285baa8 272 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
273 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
274
3285baa8 275 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 276 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
277}
278
e237d8cb
TG
279static void manager_free(Manager *manager) {
280 if (!manager)
281 return;
282
2024ed61 283 udev_builtin_exit();
b2d21d93 284
693d371d
TG
285 sd_event_source_unref(manager->ctrl_event);
286 sd_event_source_unref(manager->uevent_event);
287 sd_event_source_unref(manager->inotify_event);
eca195ec 288 sd_event_source_unref(manager->kill_workers_event);
693d371d 289
693d371d 290 sd_event_unref(manager->event);
e237d8cb
TG
291 manager_workers_free(manager);
292 event_queue_cleanup(manager, EVENT_UNDEF);
293
294 udev_monitor_unref(manager->monitor);
295 udev_ctrl_unref(manager->ctrl);
296 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
297
9b5150b6 298 hashmap_free_free_free(manager->properties);
e237d8cb 299 udev_rules_unref(manager->rules);
e237d8cb 300
e237d8cb
TG
301 safe_close(manager->fd_inotify);
302 safe_close_pair(manager->worker_watch);
303
304 free(manager);
305}
306
307DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
308
9a73bd7c
TG
309static int worker_send_message(int fd) {
310 struct worker_message message = {};
311
312 return loop_write(fd, &message, sizeof(message), false);
313}
314
fee854ee
RK
315static bool shall_lock_device(struct udev_device *dev) {
316 const char *sysname;
317
318 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
319 return false;
320
321 sysname = udev_device_get_sysname(dev);
322 return !startswith(sysname, "dm-") &&
323 !startswith(sysname, "md") &&
324 !startswith(sysname, "drbd");
325}
326
c0c6806b 327static void worker_spawn(Manager *manager, struct event *event) {
8e766630 328 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 329 pid_t pid;
b6aab8ef 330 int r = 0;
912541b0
KS
331
332 /* listen for new events */
2024ed61 333 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
334 if (worker_monitor == NULL)
335 return;
336 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 337 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
338 r = udev_monitor_enable_receiving(worker_monitor);
339 if (r < 0)
340 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 341
912541b0
KS
342 pid = fork();
343 switch (pid) {
344 case 0: {
345 struct udev_device *dev = NULL;
4afd3348 346 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 347 int fd_monitor;
e237d8cb 348 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
349 struct epoll_event ep_signal = { .events = EPOLLIN };
350 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 351 sigset_t mask;
912541b0 352
43095991 353 /* take initial device from queue */
1cc6c93a 354 dev = TAKE_PTR(event->dev);
912541b0 355
39fd2ca1
TG
356 unsetenv("NOTIFY_SOCKET");
357
c0c6806b 358 manager_workers_free(manager);
ecb17862 359 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 360
e237d8cb 361 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 364 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 365
693d371d
TG
366 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
367 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
368 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
eca195ec 369 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
693d371d
TG
370
371 manager->event = sd_event_unref(manager->event);
372
912541b0
KS
373 sigfillset(&mask);
374 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
375 if (fd_signal < 0) {
6af5e6a4 376 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
377 goto out;
378 }
2dd9f98d
TG
379 ep_signal.data.fd = fd_signal;
380
381 fd_monitor = udev_monitor_get_fd(worker_monitor);
382 ep_monitor.data.fd = fd_monitor;
912541b0
KS
383
384 fd_ep = epoll_create1(EPOLL_CLOEXEC);
385 if (fd_ep < 0) {
6af5e6a4 386 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
387 goto out;
388 }
389
912541b0
KS
390 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
391 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 392 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
393 goto out;
394 }
395
045e00cf
ZJS
396 /* Request TERM signal if parent exits.
397 Ignore error, not much we can do in that case. */
398 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 399
045e00cf 400 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
401 r = set_oom_score_adjust(0);
402 if (r < 0)
403 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 404
912541b0 405 for (;;) {
c1118ceb 406 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 407 int fd_lock = -1;
912541b0 408
3b64e4d4
TG
409 assert(dev);
410
9f6445e3 411 log_debug("seq %llu running", udev_device_get_seqnum(dev));
0f86dc90
YW
412 udev_event = udev_event_new(dev, arg_exec_delay);
413 if (!udev_event) {
6af5e6a4 414 r = -ENOMEM;
912541b0
KS
415 goto out;
416 }
417
3ebdb81e 418 /*
2e5b17d0 419 * Take a shared lock on the device node; this establishes
3ebdb81e 420 * a concept of device "ownership" to serialize device
2e5b17d0 421 * access. External processes holding an exclusive lock will
3ebdb81e 422 * cause udev to skip the event handling; in the case udev
2e5b17d0 423 * acquired the lock, the external process can block until
3ebdb81e
KS
424 * udev has finished its event handling.
425 */
2e5b17d0 426 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 427 shall_lock_device(dev)) {
3ebdb81e
KS
428 struct udev_device *d = dev;
429
430 if (streq_ptr("partition", udev_device_get_devtype(d)))
431 d = udev_device_get_parent(d);
432
433 if (d) {
434 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
435 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 436 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 437 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
438 goto skip;
439 }
440 }
441 }
442
4c83d994
TG
443 /* needed for renaming netifs */
444 udev_event->rtnl = rtnl;
445
912541b0 446 /* apply rules, create node, symlinks */
adeba500
KS
447 udev_event_execute_rules(udev_event,
448 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 449 manager->properties,
8314de1d 450 manager->rules);
adeba500
KS
451
452 udev_event_execute_run(udev_event,
8314de1d 453 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 454
523c620b
TG
455 if (udev_event->rtnl)
456 /* in case rtnl was initialized */
1c4baffc 457 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 458
912541b0 459 /* apply/restore inotify watch */
bf9bead1 460 if (udev_event->inotify_watch) {
70068602 461 udev_watch_begin(dev->device);
912541b0
KS
462 udev_device_update_db(dev);
463 }
464
3d06f418 465 safe_close(fd_lock);
3ebdb81e 466
912541b0
KS
467 /* send processed event back to libudev listeners */
468 udev_monitor_send_device(worker_monitor, NULL, dev);
469
3ebdb81e 470skip:
4914cb2d 471 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 472
912541b0 473 /* send udevd the result of the event execution */
e237d8cb 474 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 475 if (r < 0)
9a73bd7c 476 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 477 udev_device_get_seqnum(dev));
912541b0
KS
478
479 udev_device_unref(dev);
480 dev = NULL;
481
912541b0
KS
482 /* wait for more device messages from main udevd, or term signal */
483 while (dev == NULL) {
484 struct epoll_event ev[4];
485 int fdcount;
486 int i;
487
8fef0ff2 488 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
489 if (fdcount < 0) {
490 if (errno == EINTR)
491 continue;
6af5e6a4 492 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
493 goto out;
494 }
495
496 for (i = 0; i < fdcount; i++) {
497 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
498 dev = udev_monitor_receive_device(worker_monitor);
499 break;
500 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
501 struct signalfd_siginfo fdsi;
502 ssize_t size;
503
504 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
505 if (size != sizeof(struct signalfd_siginfo))
506 continue;
507 switch (fdsi.ssi_signo) {
508 case SIGTERM:
509 goto out;
510 }
511 }
512 }
513 }
514 }
82063a88 515out:
912541b0 516 udev_device_unref(dev);
e237d8cb 517 manager_free(manager);
baa30fbc 518 log_close();
8b46c3fc 519 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
520 }
521 case -1:
912541b0 522 event->state = EVENT_QUEUED;
56f64d95 523 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
524 break;
525 default:
e03c7cc2
TG
526 {
527 struct worker *worker;
528
c0c6806b 529 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 530 if (r < 0)
e03c7cc2 531 return;
e03c7cc2 532
39c19cf1
TG
533 worker_attach_event(worker, event);
534
1fa2f38f 535 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
536 break;
537 }
e03c7cc2 538 }
7fafc032
KS
539}
540
c0c6806b 541static void event_run(Manager *manager, struct event *event) {
a505965d
TG
542 struct worker *worker;
543 Iterator i;
912541b0 544
c0c6806b
TG
545 assert(manager);
546 assert(event);
547
548 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
549 ssize_t count;
550
551 if (worker->state != WORKER_IDLE)
552 continue;
553
c0c6806b 554 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 555 if (count < 0) {
1fa2f38f
ZJS
556 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
557 worker->pid, count);
cb542e84 558 (void) kill(worker->pid, SIGKILL);
912541b0
KS
559 worker->state = WORKER_KILLED;
560 continue;
561 }
39c19cf1 562 worker_attach_event(worker, event);
912541b0
KS
563 return;
564 }
565
c0c6806b 566 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 567 if (arg_children_max > 1)
c0c6806b 568 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
569 return;
570 }
571
572 /* start new worker and pass initial device */
c0c6806b 573 worker_spawn(manager, event);
1e03b754
KS
574}
575
ecb17862 576static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 577 struct event *event;
cb49a4f2 578 int r;
912541b0 579
ecb17862
TG
580 assert(manager);
581 assert(dev);
582
040e6896
TG
583 /* only one process can add events to the queue */
584 if (manager->pid == 0)
df0ff127 585 manager->pid = getpid_cached();
040e6896 586
df0ff127 587 assert(manager->pid == getpid_cached());
cb49a4f2 588
955d98c9 589 event = new0(struct event, 1);
cb49a4f2
TG
590 if (!event)
591 return -ENOMEM;
912541b0 592
cb49a4f2 593 event->manager = manager;
912541b0 594 event->dev = dev;
6969c349
TG
595 event->dev_kernel = udev_device_shallow_clone(dev);
596 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
597 event->seqnum = udev_device_get_seqnum(dev);
598 event->devpath = udev_device_get_devpath(dev);
599 event->devpath_len = strlen(event->devpath);
600 event->devpath_old = udev_device_get_devpath_old(dev);
601 event->devnum = udev_device_get_devnum(dev);
ea6039a3 602 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
603 event->ifindex = udev_device_get_ifindex(dev);
604
9f6445e3 605 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
606 udev_device_get_action(dev), udev_device_get_subsystem(dev));
607
608 event->state = EVENT_QUEUED;
cb49a4f2 609
40a57716 610 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
611 r = touch("/run/udev/queue");
612 if (r < 0)
613 log_warning_errno(r, "could not touch /run/udev/queue: %m");
614 }
615
40a57716 616 LIST_APPEND(event, manager->events, event);
cb49a4f2 617
912541b0 618 return 0;
fc465079
KS
619}
620
c0c6806b 621static void manager_kill_workers(Manager *manager) {
a505965d
TG
622 struct worker *worker;
623 Iterator i;
1e03b754 624
c0c6806b
TG
625 assert(manager);
626
627 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
628 if (worker->state == WORKER_KILLED)
629 continue;
1e03b754 630
912541b0 631 worker->state = WORKER_KILLED;
cb542e84 632 (void) kill(worker->pid, SIGTERM);
912541b0 633 }
1e03b754
KS
634}
635
e3196993 636/* lookup event for identical, parent, child device */
ecb17862 637static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 638 struct event *loop_event;
912541b0
KS
639 size_t common;
640
641 /* check if queue contains events we depend on */
40a57716 642 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 643 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
644 if (loop_event->seqnum < event->delaying_seqnum)
645 continue;
646
647 /* event we checked earlier still exists, no need to check again */
648 if (loop_event->seqnum == event->delaying_seqnum)
649 return true;
650
651 /* found ourself, no later event can block us */
652 if (loop_event->seqnum >= event->seqnum)
653 break;
654
655 /* check major/minor */
656 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
657 return true;
658
659 /* check network device ifindex */
660 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
661 return true;
662
663 /* check our old name */
090be865 664 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
665 event->delaying_seqnum = loop_event->seqnum;
666 return true;
667 }
668
669 /* compare devpath */
670 common = MIN(loop_event->devpath_len, event->devpath_len);
671
672 /* one devpath is contained in the other? */
673 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
674 continue;
675
676 /* identical device event found */
677 if (loop_event->devpath_len == event->devpath_len) {
678 /* devices names might have changed/swapped in the meantime */
679 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
680 continue;
681 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
682 continue;
683 event->delaying_seqnum = loop_event->seqnum;
684 return true;
685 }
686
687 /* parent device event found */
688 if (event->devpath[common] == '/') {
689 event->delaying_seqnum = loop_event->seqnum;
690 return true;
691 }
692
693 /* child device event found */
694 if (loop_event->devpath[common] == '/') {
695 event->delaying_seqnum = loop_event->seqnum;
696 return true;
697 }
698
699 /* no matching device */
700 continue;
701 }
702
703 return false;
7fafc032
KS
704}
705
693d371d
TG
706static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
707 Manager *manager = userdata;
708
709 assert(manager);
710
711 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
712
713 sd_event_exit(manager->event, -ETIMEDOUT);
714
715 return 1;
716}
717
62d43dac 718static void manager_exit(Manager *manager) {
693d371d
TG
719 uint64_t usec;
720 int r;
62d43dac
TG
721
722 assert(manager);
723
724 manager->exit = true;
725
b79aacbf
TG
726 sd_notify(false,
727 "STOPPING=1\n"
728 "STATUS=Starting shutdown...");
729
62d43dac 730 /* close sources of new events and discard buffered events */
693d371d 731 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 732 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 733
693d371d 734 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 735 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 736
693d371d 737 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 738 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
739
740 /* discard queued events and kill workers */
741 event_queue_cleanup(manager, EVENT_QUEUED);
742 manager_kill_workers(manager);
693d371d 743
3285baa8 744 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 745
3285baa8 746 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
747 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
748 if (r < 0)
749 return;
62d43dac
TG
750}
751
752/* reload requested, HUP signal received, rules changed, builtin changed */
753static void manager_reload(Manager *manager) {
754
755 assert(manager);
756
b79aacbf
TG
757 sd_notify(false,
758 "RELOADING=1\n"
759 "STATUS=Flushing configuration...");
760
62d43dac
TG
761 manager_kill_workers(manager);
762 manager->rules = udev_rules_unref(manager->rules);
2024ed61 763 udev_builtin_exit();
b79aacbf 764
1ef72b55
MS
765 sd_notifyf(false,
766 "READY=1\n"
767 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
768}
769
eca195ec
YW
770static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
771 Manager *manager = userdata;
772
773 assert(manager);
774
775 log_debug("Cleanup idle workers");
776 manager_kill_workers(manager);
777
778 return 1;
779}
780
781static int manager_enable_kill_workers_event(Manager *manager) {
782 int enabled, r;
783
784 assert(manager);
785
786 if (!manager->kill_workers_event)
787 goto create_new;
788
789 r = sd_event_source_get_enabled(manager->kill_workers_event, &enabled);
790 if (r < 0) {
791 log_debug_errno(r, "Failed to query whether event source for killing idle workers is enabled or not, trying to create new event source: %m");
792 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
793 goto create_new;
794 }
795
796 if (enabled == SD_EVENT_ONESHOT)
797 return 0;
798
799 r = sd_event_source_set_time(manager->kill_workers_event, now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC);
800 if (r < 0) {
801 log_debug_errno(r, "Failed to set time to event source for killing idle workers, trying to create new event source: %m");
802 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
803 goto create_new;
804 }
805
806 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_ONESHOT);
807 if (r < 0) {
808 log_debug_errno(r, "Failed to enable event source for killing idle workers, trying to create new event source: %m");
809 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
810 goto create_new;
811 }
812
813 return 0;
814
815create_new:
816 r = sd_event_add_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
817 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC, on_kill_workers_event, manager);
818 if (r < 0)
819 return log_warning_errno(r, "Failed to create timer event for killing idle workers: %m");
820
821 return 0;
822}
823
824static int manager_disable_kill_workers_event(Manager *manager) {
825 int r;
826
827 if (!manager->kill_workers_event)
828 return 0;
829
830 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
831 if (r < 0)
832 return log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
833
834 return 0;
835}
836
c0c6806b 837static void event_queue_start(Manager *manager) {
40a57716 838 struct event *event;
693d371d 839 usec_t usec;
8ab44e3f 840
c0c6806b
TG
841 assert(manager);
842
40a57716 843 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
844 manager->exit || manager->stop_exec_queue)
845 return;
846
3285baa8 847 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
848 /* check for changed config, every 3 seconds at most */
849 if (manager->last_usec == 0 ||
850 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
851 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 852 udev_builtin_validate())
38a03f06 853 manager_reload(manager);
693d371d 854
38a03f06 855 manager->last_usec = usec;
7c4c7e89
TG
856 }
857
eca195ec
YW
858 (void) manager_disable_kill_workers_event(manager);
859
2024ed61 860 udev_builtin_init();
7c4c7e89
TG
861
862 if (!manager->rules) {
2024ed61 863 manager->rules = udev_rules_new(arg_resolve_names);
7c4c7e89
TG
864 if (!manager->rules)
865 return;
866 }
867
40a57716 868 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
869 if (event->state != EVENT_QUEUED)
870 continue;
0bc74ea7 871
912541b0 872 /* do not start event if parent or child event is still running */
ecb17862 873 if (is_devpath_busy(manager, event))
912541b0 874 continue;
fc465079 875
c0c6806b 876 event_run(manager, event);
912541b0 877 }
1e03b754
KS
878}
879
ecb17862 880static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 881 struct event *event, *tmp;
ff2c503d 882
40a57716 883 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
884 if (match_type != EVENT_UNDEF && match_type != event->state)
885 continue;
ff2c503d 886
c6aa11f2 887 event_free(event);
912541b0 888 }
ff2c503d
KS
889}
890
e82e8fa5 891static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
892 Manager *manager = userdata;
893
894 assert(manager);
895
912541b0
KS
896 for (;;) {
897 struct worker_message msg;
979558f3
TG
898 struct iovec iovec = {
899 .iov_base = &msg,
900 .iov_len = sizeof(msg),
901 };
902 union {
903 struct cmsghdr cmsghdr;
904 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
905 } control = {};
906 struct msghdr msghdr = {
907 .msg_iov = &iovec,
908 .msg_iovlen = 1,
909 .msg_control = &control,
910 .msg_controllen = sizeof(control),
911 };
912 struct cmsghdr *cmsg;
912541b0 913 ssize_t size;
979558f3 914 struct ucred *ucred = NULL;
a505965d 915 struct worker *worker;
912541b0 916
e82e8fa5 917 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 918 if (size < 0) {
738a7907
TG
919 if (errno == EINTR)
920 continue;
921 else if (errno == EAGAIN)
922 /* nothing more to read */
923 break;
979558f3 924
e82e8fa5 925 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
926 } else if (size != sizeof(struct worker_message)) {
927 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 928 continue;
979558f3
TG
929 }
930
2a1288ff 931 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
932 if (cmsg->cmsg_level == SOL_SOCKET &&
933 cmsg->cmsg_type == SCM_CREDENTIALS &&
934 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
935 ucred = (struct ucred*) CMSG_DATA(cmsg);
936 }
937
938 if (!ucred || ucred->pid <= 0) {
939 log_warning_errno(EIO, "ignoring worker message without valid PID");
940 continue;
941 }
912541b0
KS
942
943 /* lookup worker who sent the signal */
4a0b58c4 944 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
945 if (!worker) {
946 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
947 continue;
912541b0 948 }
c0bbfd72 949
a505965d
TG
950 if (worker->state != WORKER_KILLED)
951 worker->state = WORKER_IDLE;
952
953 /* worker returned */
954 event_free(worker->event);
912541b0 955 }
e82e8fa5 956
8302fe5a
TG
957 /* we have free workers, try to schedule events */
958 event_queue_start(manager);
959
e82e8fa5
TG
960 return 1;
961}
962
963static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 964 Manager *manager = userdata;
e82e8fa5
TG
965 struct udev_device *dev;
966 int r;
967
c0c6806b 968 assert(manager);
e82e8fa5 969
c0c6806b 970 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
971 if (dev) {
972 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 973 r = event_queue_insert(manager, dev);
e82e8fa5
TG
974 if (r < 0)
975 udev_device_unref(dev);
8302fe5a
TG
976 else
977 /* we have fresh events, try to schedule them */
978 event_queue_start(manager);
e82e8fa5
TG
979 }
980
981 return 1;
88f4b648
KS
982}
983
3b47c739 984/* receive the udevd message from userspace */
e82e8fa5 985static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 986 Manager *manager = userdata;
8e766630
LP
987 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
988 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 989 const char *str;
9b5150b6 990 int i, r;
912541b0 991
c0c6806b 992 assert(manager);
e4f66b77 993
c0c6806b 994 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 995 if (!ctrl_conn)
e82e8fa5 996 return 1;
912541b0
KS
997
998 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 999 if (!ctrl_msg)
e82e8fa5 1000 return 1;
912541b0
KS
1001
1002 i = udev_ctrl_get_set_log_level(ctrl_msg);
1003 if (i >= 0) {
ed14edc0 1004 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 1005 log_set_max_level(i);
c0c6806b 1006 manager_kill_workers(manager);
912541b0
KS
1007 }
1008
1009 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 1010 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 1011 manager->stop_exec_queue = true;
912541b0
KS
1012 }
1013
1014 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 1015 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 1016 manager->stop_exec_queue = false;
8302fe5a 1017 event_queue_start(manager);
912541b0
KS
1018 }
1019
1020 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 1021 log_debug("udevd message (RELOAD) received");
62d43dac 1022 manager_reload(manager);
912541b0
KS
1023 }
1024
1025 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
1026 if (str) {
1027 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
1028 char *eq;
1029
1030 eq = strchr(str, '=');
1031 if (!eq) {
1032 log_error("Invalid key format '%s'", str);
1033 return 1;
1034 }
1035
1036 key = strndup(str, eq - str);
1037 if (!key) {
1038 log_oom();
1039 return 1;
1040 }
1041
1042 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1043
1044 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1045 if (r < 0) {
1046 log_oom();
1047 return 1;
912541b0 1048 }
9b5150b6
YW
1049
1050 eq++;
1051 if (!isempty(eq)) {
1052 log_debug("udevd message (ENV) received, unset '%s'", key);
1053
1054 r = hashmap_put(manager->properties, key, NULL);
1055 if (r < 0) {
1056 log_oom();
1057 return 1;
1058 }
1059 } else {
1060 val = strdup(eq);
1061 if (!val) {
1062 log_oom();
1063 return 1;
1064 }
1065
1066 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
1067
1068 r = hashmap_put(manager->properties, key, val);
1069 if (r < 0) {
1070 log_oom();
1071 return 1;
1072 }
1073 }
1074
1075 key = val = NULL;
c0c6806b 1076 manager_kill_workers(manager);
912541b0
KS
1077 }
1078
1079 i = udev_ctrl_get_set_children_max(ctrl_msg);
1080 if (i >= 0) {
9f6445e3 1081 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1082 arg_children_max = i;
1ef72b55
MS
1083
1084 (void) sd_notifyf(false,
1085 "READY=1\n"
1086 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1087 }
1088
cb49a4f2 1089 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1090 log_debug("udevd message (SYNC) received");
912541b0
KS
1091
1092 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1093 log_debug("udevd message (EXIT) received");
62d43dac 1094 manager_exit(manager);
c0c6806b
TG
1095 /* keep reference to block the client until we exit
1096 TODO: deal with several blocking exit requests */
1097 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1098 }
e4f66b77 1099
e82e8fa5 1100 return 1;
88f4b648 1101}
4a231017 1102
70068602
YW
1103static int synthesize_change(sd_device *dev) {
1104 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1105 char filename[PATH_MAX];
f3a740a5 1106 int r;
edd32000 1107
70068602
YW
1108 r = sd_device_get_subsystem(dev, &subsystem);
1109 if (r < 0)
1110 return r;
1111
1112 r = sd_device_get_sysname(dev, &sysname);
1113 if (r < 0)
1114 return r;
1115
1116 r = sd_device_get_devname(dev, &devname);
1117 if (r < 0)
1118 return r;
1119
1120 r = sd_device_get_syspath(dev, &syspath);
1121 if (r < 0)
1122 return r;
1123
1124 r = sd_device_get_devtype(dev, &devtype);
1125 if (r < 0)
1126 return r;
1127
1128 if (streq_ptr("block", subsystem) &&
1129 streq_ptr("disk", devtype) &&
1130 !startswith(sysname, "dm-")) {
1131 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1132 bool part_table_read = false, has_partitions = false;
1133 sd_device *d;
ede34445 1134 int fd;
f3a740a5 1135
ede34445 1136 /*
e9fc29f4
KS
1137 * Try to re-read the partition table. This only succeeds if
1138 * none of the devices is busy. The kernel returns 0 if no
1139 * partition table is found, and we will not get an event for
1140 * the disk.
ede34445 1141 */
70068602 1142 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1143 if (fd >= 0) {
02ba8fb3
KS
1144 r = flock(fd, LOCK_EX|LOCK_NB);
1145 if (r >= 0)
1146 r = ioctl(fd, BLKRRPART, 0);
1147
ede34445
KS
1148 close(fd);
1149 if (r >= 0)
e9fc29f4 1150 part_table_read = true;
ede34445
KS
1151 }
1152
e9fc29f4 1153 /* search for partitions */
70068602 1154 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1155 if (r < 0)
1156 return r;
1157
70068602 1158 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1159 if (r < 0)
1160 return r;
1161
70068602 1162 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1163 if (r < 0)
1164 return r;
e9fc29f4 1165
70068602
YW
1166 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1167 if (r < 0)
1168 return r;
e9fc29f4 1169
70068602
YW
1170 FOREACH_DEVICE(e, d) {
1171 const char *t;
e9fc29f4 1172
70068602
YW
1173 if (sd_device_get_devtype(d, &t) < 0 ||
1174 !streq("partition", t))
e9fc29f4
KS
1175 continue;
1176
1177 has_partitions = true;
1178 break;
1179 }
1180
1181 /*
1182 * We have partitions and re-read the table, the kernel already sent
1183 * out a "change" event for the disk, and "remove/add" for all
1184 * partitions.
1185 */
1186 if (part_table_read && has_partitions)
1187 return 0;
1188
1189 /*
1190 * We have partitions but re-reading the partition table did not
1191 * work, synthesize "change" for the disk and all partitions.
1192 */
70068602
YW
1193 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1194 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1195 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1196
70068602
YW
1197 FOREACH_DEVICE(e, d) {
1198 const char *t, *n, *s;
f3a740a5 1199
70068602
YW
1200 if (sd_device_get_devtype(d, &t) < 0 ||
1201 !streq("partition", t))
f3a740a5
KS
1202 continue;
1203
70068602
YW
1204 if (sd_device_get_devname(d, &n) < 0 ||
1205 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1206 continue;
1207
70068602
YW
1208 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1209 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1210 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1211 }
ede34445
KS
1212
1213 return 0;
f3a740a5
KS
1214 }
1215
70068602
YW
1216 log_debug("Device %s is closed, synthesising 'change'", devname);
1217 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1218 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1219
f3a740a5 1220 return 0;
edd32000
KS
1221}
1222
e82e8fa5 1223static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1224 Manager *manager = userdata;
0254e944 1225 union inotify_event_buffer buffer;
f7c1ad4f
LP
1226 struct inotify_event *e;
1227 ssize_t l;
912541b0 1228
c0c6806b 1229 assert(manager);
e82e8fa5 1230
eca195ec
YW
1231 (void) manager_disable_kill_workers_event(manager);
1232
e82e8fa5 1233 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1234 if (l < 0) {
3742095b 1235 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1236 return 1;
912541b0 1237
f7c1ad4f 1238 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1239 }
1240
f7c1ad4f 1241 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1242 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1243 const char *devnode;
1244
7fe3324c 1245 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1246 continue;
912541b0 1247
70068602 1248 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1249 continue;
912541b0 1250
7fe3324c 1251 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1252 if (e->mask & IN_CLOSE_WRITE)
edd32000 1253 synthesize_change(dev);
da143134 1254 else if (e->mask & IN_IGNORED)
2024ed61 1255 udev_watch_end(dev);
912541b0
KS
1256 }
1257
e82e8fa5 1258 return 1;
bd284db1
SJR
1259}
1260
0561329d 1261static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1262 Manager *manager = userdata;
1263
1264 assert(manager);
1265
62d43dac 1266 manager_exit(manager);
912541b0 1267
e82e8fa5
TG
1268 return 1;
1269}
912541b0 1270
0561329d 1271static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1272 Manager *manager = userdata;
1273
1274 assert(manager);
1275
62d43dac 1276 manager_reload(manager);
912541b0 1277
e82e8fa5
TG
1278 return 1;
1279}
912541b0 1280
e82e8fa5 1281static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1282 Manager *manager = userdata;
1283
1284 assert(manager);
1285
e82e8fa5
TG
1286 for (;;) {
1287 pid_t pid;
1288 int status;
1289 struct worker *worker;
d1317d02 1290
e82e8fa5
TG
1291 pid = waitpid(-1, &status, WNOHANG);
1292 if (pid <= 0)
f29328d6 1293 break;
e82e8fa5 1294
4a0b58c4 1295 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1296 if (!worker) {
1297 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1298 continue;
912541b0 1299 }
e82e8fa5
TG
1300
1301 if (WIFEXITED(status)) {
1302 if (WEXITSTATUS(status) == 0)
1303 log_debug("worker ["PID_FMT"] exited", pid);
1304 else
1305 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1306 } else if (WIFSIGNALED(status)) {
76341acc 1307 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1308 } else if (WIFSTOPPED(status)) {
1309 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1310 continue;
e82e8fa5
TG
1311 } else if (WIFCONTINUED(status)) {
1312 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1313 continue;
e82e8fa5
TG
1314 } else
1315 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1316
05e6d9c6
YW
1317 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1318 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1319 /* delete state from disk */
1320 udev_device_delete_db(worker->event->dev);
1321 udev_device_tag_index(worker->event->dev, NULL, false);
1322 /* forward kernel event without amending it */
1323 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1324 }
1325
1326 worker_free(worker);
912541b0 1327 }
e82e8fa5 1328
8302fe5a
TG
1329 /* we can start new workers, try to schedule events */
1330 event_queue_start(manager);
1331
eca195ec
YW
1332 /* Disable unnecessary cleanup event */
1333 if (hashmap_isempty(manager->workers) && manager->kill_workers_event)
1334 (void) sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
1335
e82e8fa5 1336 return 1;
f27125f9 1337}
1338
693d371d
TG
1339static int on_post(sd_event_source *s, void *userdata) {
1340 Manager *manager = userdata;
693d371d
TG
1341
1342 assert(manager);
1343
b6107f01
YW
1344 if (!LIST_IS_EMPTY(manager->events))
1345 return 1;
1346
1347 /* There are no pending events. Let's cleanup idle process. */
1348
1349 if (!hashmap_isempty(manager->workers)) {
1350 /* There are idle workers */
eca195ec 1351 (void) manager_enable_kill_workers_event(manager);
b6107f01 1352 return 1;
693d371d
TG
1353 }
1354
b6107f01
YW
1355 /* There are no idle workers. */
1356
1357 if (manager->exit)
1358 return sd_event_exit(manager->event, 0);
1359
1360 if (manager->cgroup)
1361 /* cleanup possible left-over processes in our cgroup */
1362 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1363
693d371d
TG
1364 return 1;
1365}
1366
fcff1e72
TG
1367static int listen_fds(int *rctrl, int *rnetlink) {
1368 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1369 int fd, n, r;
912541b0 1370
fcff1e72
TG
1371 assert(rctrl);
1372 assert(rnetlink);
1373
912541b0 1374 n = sd_listen_fds(true);
fcff1e72
TG
1375 if (n < 0)
1376 return n;
912541b0
KS
1377
1378 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1379 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1380 if (ctrl_fd >= 0)
1381 return -EINVAL;
1382 ctrl_fd = fd;
912541b0
KS
1383 continue;
1384 }
1385
1386 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1387 if (netlink_fd >= 0)
1388 return -EINVAL;
1389 netlink_fd = fd;
912541b0
KS
1390 continue;
1391 }
1392
fcff1e72 1393 return -EINVAL;
912541b0
KS
1394 }
1395
f59118ec 1396 if (ctrl_fd < 0) {
8e766630 1397 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1398
2024ed61 1399 ctrl = udev_ctrl_new();
f59118ec
TG
1400 if (!ctrl)
1401 return log_error_errno(EINVAL, "error initializing udev control socket");
1402
1403 r = udev_ctrl_enable_receiving(ctrl);
1404 if (r < 0)
1405 return log_error_errno(EINVAL, "error binding udev control socket");
1406
1407 fd = udev_ctrl_get_fd(ctrl);
1408 if (fd < 0)
1409 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1410
f59118ec
TG
1411 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1412 if (ctrl_fd < 0)
1413 return log_error_errno(errno, "could not dup ctrl fd: %m");
1414 }
1415
1416 if (netlink_fd < 0) {
8e766630 1417 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1418
2024ed61 1419 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1420 if (!monitor)
1421 return log_error_errno(EINVAL, "error initializing netlink socket");
1422
1423 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1424
1425 r = udev_monitor_enable_receiving(monitor);
1426 if (r < 0)
1427 return log_error_errno(EINVAL, "error binding netlink socket");
1428
1429 fd = udev_monitor_get_fd(monitor);
1430 if (fd < 0)
1431 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1432
1433 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1434 if (netlink_fd < 0)
f59118ec
TG
1435 return log_error_errno(errno, "could not dup netlink fd: %m");
1436 }
fcff1e72
TG
1437
1438 *rctrl = ctrl_fd;
1439 *rnetlink = netlink_fd;
912541b0 1440
912541b0 1441 return 0;
7459bcdc
KS
1442}
1443
e6f86cac 1444/*
3f85ef0f 1445 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1446 * udev.log_priority=<level> syslog priority
1447 * udev.children_max=<number of workers> events are fully serialized if set to 1
1448 * udev.exec_delay=<number of seconds> delay execution of every executed program
1449 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1450 */
96287a49 1451static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1452 int r = 0;
e6f86cac 1453
614a823c 1454 assert(key);
e6f86cac 1455
614a823c
TG
1456 if (!value)
1457 return 0;
e6f86cac 1458
1d84ad94
LP
1459 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1460
1461 if (proc_cmdline_value_missing(key, value))
1462 return 0;
1463
92e72467
ZJS
1464 r = util_log_priority(value);
1465 if (r >= 0)
1466 log_set_max_level(r);
1d84ad94
LP
1467
1468 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1469
1470 if (proc_cmdline_value_missing(key, value))
1471 return 0;
1472
92e72467
ZJS
1473 r = safe_atou64(value, &arg_event_timeout_usec);
1474 if (r >= 0) {
1475 arg_event_timeout_usec *= USEC_PER_SEC;
1476 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1477 }
1d84ad94
LP
1478
1479 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1480
1481 if (proc_cmdline_value_missing(key, value))
1482 return 0;
1483
020328e1 1484 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1485
1486 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1487
1488 if (proc_cmdline_value_missing(key, value))
1489 return 0;
1490
614a823c 1491 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1492
1493 } else if (startswith(key, "udev."))
92e72467 1494 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1495
92e72467
ZJS
1496 if (r < 0)
1497 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1498
614a823c 1499 return 0;
e6f86cac
KS
1500}
1501
37ec0fdd
LP
1502static int help(void) {
1503 _cleanup_free_ char *link = NULL;
1504 int r;
1505
1506 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1507 if (r < 0)
1508 return log_oom();
1509
ed216e1f
TG
1510 printf("%s [OPTIONS...]\n\n"
1511 "Manages devices.\n\n"
5ac0162c 1512 " -h --help Print this message\n"
2d19c17e
MF
1513 " -V --version Print version of the program\n"
1514 " -d --daemon Detach and run in the background\n"
1515 " -D --debug Enable debug output\n"
1516 " -c --children-max=INT Set maximum number of workers\n"
1517 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1518 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1519 " -N --resolve-names=early|late|never\n"
5ac0162c 1520 " When to resolve users and groups\n"
37ec0fdd
LP
1521 "\nSee the %s for details.\n"
1522 , program_invocation_short_name
1523 , link
1524 );
1525
1526 return 0;
ed216e1f
TG
1527}
1528
bba7a484 1529static int parse_argv(int argc, char *argv[]) {
912541b0 1530 static const struct option options[] = {
bba7a484
TG
1531 { "daemon", no_argument, NULL, 'd' },
1532 { "debug", no_argument, NULL, 'D' },
1533 { "children-max", required_argument, NULL, 'c' },
1534 { "exec-delay", required_argument, NULL, 'e' },
1535 { "event-timeout", required_argument, NULL, 't' },
1536 { "resolve-names", required_argument, NULL, 'N' },
1537 { "help", no_argument, NULL, 'h' },
1538 { "version", no_argument, NULL, 'V' },
912541b0
KS
1539 {}
1540 };
689a97f5 1541
bba7a484 1542 int c;
689a97f5 1543
bba7a484
TG
1544 assert(argc >= 0);
1545 assert(argv);
912541b0 1546
e14b6f21 1547 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1548 int r;
912541b0 1549
bba7a484 1550 switch (c) {
912541b0 1551
912541b0 1552 case 'd':
bba7a484 1553 arg_daemonize = true;
912541b0
KS
1554 break;
1555 case 'c':
020328e1 1556 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1557 if (r < 0)
1558 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1559 break;
1560 case 'e':
6f5cf8a8
TG
1561 r = safe_atoi(optarg, &arg_exec_delay);
1562 if (r < 0)
1563 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1564 break;
9719859c 1565 case 't':
f1e8664e
TG
1566 r = safe_atou64(optarg, &arg_event_timeout_usec);
1567 if (r < 0)
65fea570 1568 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1569 else {
1570 arg_event_timeout_usec *= USEC_PER_SEC;
1571 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1572 }
9719859c 1573 break;
912541b0 1574 case 'D':
bba7a484 1575 arg_debug = true;
912541b0
KS
1576 break;
1577 case 'N':
090be865 1578 if (streq(optarg, "early")) {
bba7a484 1579 arg_resolve_names = 1;
090be865 1580 } else if (streq(optarg, "late")) {
bba7a484 1581 arg_resolve_names = 0;
090be865 1582 } else if (streq(optarg, "never")) {
bba7a484 1583 arg_resolve_names = -1;
912541b0 1584 } else {
9f6445e3 1585 log_error("resolve-names must be early, late or never");
bba7a484 1586 return 0;
912541b0
KS
1587 }
1588 break;
1589 case 'h':
37ec0fdd 1590 return help();
912541b0 1591 case 'V':
948aaa7c 1592 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1593 return 0;
1594 case '?':
1595 return -EINVAL;
912541b0 1596 default:
bba7a484
TG
1597 assert_not_reached("Unhandled option");
1598
912541b0
KS
1599 }
1600 }
1601
bba7a484
TG
1602 return 1;
1603}
1604
b7f74dd4 1605static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1606 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1607 int r, fd_worker;
c0c6806b
TG
1608
1609 assert(ret);
11b1dd8c
TG
1610 assert(fd_ctrl >= 0);
1611 assert(fd_uevent >= 0);
c0c6806b
TG
1612
1613 manager = new0(Manager, 1);
1614 if (!manager)
1615 return log_oom();
1616
e237d8cb
TG
1617 manager->fd_inotify = -1;
1618 manager->worker_watch[WRITE_END] = -1;
1619 manager->worker_watch[READ_END] = -1;
1620
2024ed61 1621 udev_builtin_init();
b2d21d93 1622
2024ed61 1623 manager->rules = udev_rules_new(arg_resolve_names);
ecb17862
TG
1624 if (!manager->rules)
1625 return log_error_errno(ENOMEM, "error reading rules");
1626
40a57716 1627 LIST_HEAD_INIT(manager->events);
ecb17862 1628
c26d1879
TG
1629 manager->cgroup = cgroup;
1630
2024ed61 1631 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1632 if (!manager->ctrl)
1633 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1634
2024ed61 1635 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1636 if (!manager->monitor)
1637 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1638
1639 /* unnamed socket from workers to the main daemon */
1640 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1641 if (r < 0)
1642 return log_error_errno(errno, "error creating socketpair: %m");
1643
693d371d 1644 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1645
2ff48e98 1646 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1647 if (r < 0)
2ff48e98 1648 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1649
b7759e04
YW
1650 r = udev_watch_init();
1651 if (r < 0)
1652 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1653 manager->fd_inotify = r;
e237d8cb 1654
2024ed61 1655 udev_watch_restore();
e237d8cb
TG
1656
1657 /* block and listen to all signals on signalfd */
72c0a2c2 1658 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1659
49f997f3
TG
1660 r = sd_event_default(&manager->event);
1661 if (r < 0)
709f6e46 1662 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1663
693d371d
TG
1664 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1665 if (r < 0)
1666 return log_error_errno(r, "error creating sigint event source: %m");
1667
1668 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1669 if (r < 0)
1670 return log_error_errno(r, "error creating sigterm event source: %m");
1671
1672 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1673 if (r < 0)
1674 return log_error_errno(r, "error creating sighup event source: %m");
1675
1676 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1677 if (r < 0)
1678 return log_error_errno(r, "error creating sigchld event source: %m");
1679
1680 r = sd_event_set_watchdog(manager->event, true);
1681 if (r < 0)
1682 return log_error_errno(r, "error creating watchdog event source: %m");
1683
11b1dd8c 1684 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1685 if (r < 0)
1686 return log_error_errno(r, "error creating ctrl event source: %m");
1687
1688 /* This needs to be after the inotify and uevent handling, to make sure
1689 * that the ping is send back after fully processing the pending uevents
1690 * (including the synthetic ones we may create due to inotify events).
1691 */
1692 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1693 if (r < 0)
1694 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1695
1696 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1697 if (r < 0)
1698 return log_error_errno(r, "error creating inotify event source: %m");
1699
11b1dd8c 1700 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1701 if (r < 0)
1702 return log_error_errno(r, "error creating uevent event source: %m");
1703
1704 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1705 if (r < 0)
1706 return log_error_errno(r, "error creating worker event source: %m");
1707
1708 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1709 if (r < 0)
1710 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1711
1cc6c93a 1712 *ret = TAKE_PTR(manager);
11b1dd8c 1713
86c3bece 1714 return 0;
c0c6806b
TG
1715}
1716
077fc5e2 1717static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1718 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1719 int r;
1720
1721 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1722 if (r < 0) {
1723 r = log_error_errno(r, "failed to allocate manager object: %m");
1724 goto exit;
1725 }
1726
1727 r = udev_rules_apply_static_dev_perms(manager->rules);
1728 if (r < 0)
1729 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1730
1ef72b55
MS
1731 (void) sd_notifyf(false,
1732 "READY=1\n"
1733 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1734
1735 r = sd_event_loop(manager->event);
1736 if (r < 0) {
1737 log_error_errno(r, "event loop failed: %m");
1738 goto exit;
1739 }
1740
1741 sd_event_get_exit_code(manager->event, &r);
1742
1743exit:
1744 sd_notify(false,
1745 "STOPPING=1\n"
1746 "STATUS=Shutting down...");
1747 if (manager)
1748 udev_ctrl_cleanup(manager->ctrl);
1749 return r;
1750}
1751
1752int main(int argc, char *argv[]) {
c26d1879 1753 _cleanup_free_ char *cgroup = NULL;
efa1606e 1754 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1755 int r;
bba7a484 1756
bba7a484 1757 log_set_target(LOG_TARGET_AUTO);
b237a168 1758 udev_parse_config();
bba7a484
TG
1759 log_parse_environment();
1760 log_open();
1761
bba7a484
TG
1762 r = parse_argv(argc, argv);
1763 if (r <= 0)
1764 goto exit;
1765
1d84ad94 1766 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1767 if (r < 0)
1768 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1769
78d3e041
KS
1770 if (arg_debug) {
1771 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1772 log_set_max_level(LOG_DEBUG);
78d3e041 1773 }
bba7a484 1774
6174a243
YW
1775 log_set_max_level_realm(LOG_REALM_SYSTEMD, log_get_max_level());
1776
fba868fa
LP
1777 r = must_be_root();
1778 if (r < 0)
912541b0 1779 goto exit;
912541b0 1780
712cebf1
TG
1781 if (arg_children_max == 0) {
1782 cpu_set_t cpu_set;
e438c57a 1783 unsigned long mem_limit;
ebc164ef 1784
712cebf1 1785 arg_children_max = 8;
d457ff83 1786
ece174c5 1787 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
faae64fa 1788 arg_children_max += CPU_COUNT(&cpu_set) * 8;
912541b0 1789
e438c57a
MW
1790 mem_limit = physical_memory() / (128LU*1024*1024);
1791 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1792
712cebf1 1793 log_debug("set children_max to %u", arg_children_max);
d457ff83 1794 }
912541b0 1795
712cebf1
TG
1796 /* set umask before creating any file/directory */
1797 r = chdir("/");
1798 if (r < 0) {
1799 r = log_error_errno(errno, "could not change dir to /: %m");
1800 goto exit;
1801 }
194bbe33 1802
712cebf1 1803 umask(022);
912541b0 1804
c3dacc8b 1805 r = mac_selinux_init();
712cebf1
TG
1806 if (r < 0) {
1807 log_error_errno(r, "could not initialize labelling: %m");
1808 goto exit;
912541b0
KS
1809 }
1810
dae8b82e
ZJS
1811 r = mkdir_errno_wrapper("/run/udev", 0755);
1812 if (r < 0 && r != -EEXIST) {
1813 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1814 goto exit;
1815 }
1816
03cfe0d5 1817 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1818
c26d1879
TG
1819 if (getppid() == 1) {
1820 /* get our own cgroup, we regularly kill everything udev has left behind
1821 we only do this on systemd systems, and only if we are directly spawned
1822 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1823 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1824 if (r < 0) {
a2d61f07 1825 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1826 log_debug_errno(r, "did not find dedicated cgroup: %m");
1827 else
1828 log_warning_errno(r, "failed to get cgroup: %m");
1829 }
c26d1879
TG
1830 }
1831
b7f74dd4
TG
1832 r = listen_fds(&fd_ctrl, &fd_uevent);
1833 if (r < 0) {
1834 r = log_error_errno(r, "could not listen on fds: %m");
1835 goto exit;
1836 }
1837
bba7a484 1838 if (arg_daemonize) {
912541b0 1839 pid_t pid;
912541b0 1840
948aaa7c 1841 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1842
40e749b5 1843 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1844 if (log_get_max_level() < LOG_DEBUG) {
1845 r = make_null_stdio();
1846 if (r < 0)
1847 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1848 }
1849
912541b0
KS
1850 pid = fork();
1851 switch (pid) {
1852 case 0:
1853 break;
1854 case -1:
6af5e6a4 1855 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1856 goto exit;
1857 default:
f53d1fcd
TG
1858 mac_selinux_finish();
1859 log_close();
1860 _exit(EXIT_SUCCESS);
912541b0
KS
1861 }
1862
1863 setsid();
1864
76cdddfb
YW
1865 r = set_oom_score_adjust(-1000);
1866 if (r < 0)
1867 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1868 }
912541b0 1869
077fc5e2 1870 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1871
53921bfa 1872exit:
cc56fafe 1873 mac_selinux_finish();
baa30fbc 1874 log_close();
6af5e6a4 1875 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1876}