]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev: include error cause of parsing --children-max option in log message
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
07a26e42 55#include "udev-builtin.h"
7d68eb1b 56#include "udev-ctrl.h"
618234a5 57#include "udev-util.h"
70068602 58#include "udev-watch.h"
618234a5 59#include "udev.h"
ee104e11 60#include "user-util.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
c4d44cba 64static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
216e8bbe 65static unsigned arg_children_max = 0;
6b92f429 66static usec_t arg_exec_delay_usec = 0;
bba7a484
TG
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
693d371d 71 sd_event *event;
c0c6806b 72 Hashmap *workers;
40a57716 73 LIST_HEAD(struct event, events);
c26d1879 74 const char *cgroup;
cb49a4f2 75 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 76
ecb17862 77 struct udev_rules *rules;
9b5150b6 78 Hashmap *properties;
c0c6806b
TG
79
80 struct udev_monitor *monitor;
81 struct udev_ctrl *ctrl;
82 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 83 int fd_inotify;
e237d8cb
TG
84 int worker_watch[2];
85
693d371d
TG
86 sd_event_source *ctrl_event;
87 sd_event_source *uevent_event;
88 sd_event_source *inotify_event;
eca195ec 89 sd_event_source *kill_workers_event;
693d371d 90
7c4c7e89
TG
91 usec_t last_usec;
92
c0c6806b 93 bool stop_exec_queue:1;
c0c6806b
TG
94 bool exit:1;
95} Manager;
1e03b754 96
1e03b754 97enum event_state {
912541b0
KS
98 EVENT_UNDEF,
99 EVENT_QUEUED,
100 EVENT_RUNNING,
1e03b754
KS
101};
102
103struct event {
40a57716 104 LIST_FIELDS(struct event, event);
cb49a4f2 105 Manager *manager;
912541b0 106 struct udev_device *dev;
6969c349 107 struct udev_device *dev_kernel;
c6aa11f2 108 struct worker *worker;
912541b0 109 enum event_state state;
912541b0
KS
110 unsigned long long int delaying_seqnum;
111 unsigned long long int seqnum;
112 const char *devpath;
113 size_t devpath_len;
114 const char *devpath_old;
115 dev_t devnum;
912541b0 116 int ifindex;
ea6039a3 117 bool is_block;
693d371d
TG
118 sd_event_source *timeout_warning;
119 sd_event_source *timeout;
1e03b754
KS
120};
121
ecb17862 122static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 123
1e03b754 124enum worker_state {
912541b0
KS
125 WORKER_UNDEF,
126 WORKER_RUNNING,
127 WORKER_IDLE,
128 WORKER_KILLED,
1e03b754
KS
129};
130
131struct worker {
c0c6806b 132 Manager *manager;
912541b0
KS
133 pid_t pid;
134 struct udev_monitor *monitor;
135 enum worker_state state;
136 struct event *event;
1e03b754
KS
137};
138
139/* passed from worker to main process */
140struct worker_message {
1e03b754
KS
141};
142
c6aa11f2 143static void event_free(struct event *event) {
cb49a4f2
TG
144 int r;
145
c6aa11f2
TG
146 if (!event)
147 return;
40a57716 148 assert(event->manager);
c6aa11f2 149
40a57716 150 LIST_REMOVE(event, event->manager->events, event);
912541b0 151 udev_device_unref(event->dev);
6969c349 152 udev_device_unref(event->dev_kernel);
c6aa11f2 153
693d371d
TG
154 sd_event_source_unref(event->timeout_warning);
155 sd_event_source_unref(event->timeout);
156
c6aa11f2
TG
157 if (event->worker)
158 event->worker->event = NULL;
159
40a57716 160 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 161 /* only clean up the queue from the process that created it */
df0ff127 162 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
163 r = unlink("/run/udev/queue");
164 if (r < 0)
165 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
166 }
167 }
168
912541b0 169 free(event);
aa8734ff 170}
7a770250 171
c6aa11f2
TG
172static void worker_free(struct worker *worker) {
173 if (!worker)
174 return;
bc113de9 175
c0c6806b
TG
176 assert(worker->manager);
177
4a0b58c4 178 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 179 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
180 event_free(worker->event);
181
c6aa11f2 182 free(worker);
ff2c503d
KS
183}
184
c0c6806b 185static void manager_workers_free(Manager *manager) {
a505965d
TG
186 struct worker *worker;
187 Iterator i;
ff2c503d 188
c0c6806b
TG
189 assert(manager);
190
191 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 192 worker_free(worker);
a505965d 193
c0c6806b 194 manager->workers = hashmap_free(manager->workers);
fc465079
KS
195}
196
c0c6806b 197static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
198 _cleanup_free_ struct worker *worker = NULL;
199 int r;
3a19b32a
TG
200
201 assert(ret);
c0c6806b 202 assert(manager);
3a19b32a
TG
203 assert(worker_monitor);
204 assert(pid > 1);
205
206 worker = new0(struct worker, 1);
207 if (!worker)
208 return -ENOMEM;
209
c0c6806b 210 worker->manager = manager;
3a19b32a
TG
211 /* close monitor, but keep address around */
212 udev_monitor_disconnect(worker_monitor);
213 worker->monitor = udev_monitor_ref(worker_monitor);
214 worker->pid = pid;
a505965d 215
c0c6806b 216 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
217 if (r < 0)
218 return r;
219
4a0b58c4 220 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
221 if (r < 0)
222 return r;
223
ae2a15bc 224 *ret = TAKE_PTR(worker);
3a19b32a
TG
225
226 return 0;
227}
228
4fa4d885
TG
229static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
230 struct event *event = userdata;
231
232 assert(event);
233 assert(event->worker);
234
235 kill_and_sigcont(event->worker->pid, SIGKILL);
236 event->worker->state = WORKER_KILLED;
237
238 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
239
240 return 1;
241}
242
243static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
244 struct event *event = userdata;
245
246 assert(event);
247
248 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
249
250 return 1;
251}
252
39c19cf1 253static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
254 sd_event *e;
255 uint64_t usec;
693d371d 256
c6aa11f2 257 assert(worker);
693d371d 258 assert(worker->manager);
c6aa11f2
TG
259 assert(event);
260 assert(!event->worker);
261 assert(!worker->event);
262
39c19cf1 263 worker->state = WORKER_RUNNING;
39c19cf1
TG
264 worker->event = event;
265 event->state = EVENT_RUNNING;
c6aa11f2 266 event->worker = worker;
693d371d
TG
267
268 e = worker->manager->event;
269
3285baa8 270 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 271
3285baa8 272 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
273 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
274
3285baa8 275 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 276 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
277}
278
e237d8cb
TG
279static void manager_free(Manager *manager) {
280 if (!manager)
281 return;
282
2024ed61 283 udev_builtin_exit();
b2d21d93 284
693d371d
TG
285 sd_event_source_unref(manager->ctrl_event);
286 sd_event_source_unref(manager->uevent_event);
287 sd_event_source_unref(manager->inotify_event);
eca195ec 288 sd_event_source_unref(manager->kill_workers_event);
693d371d 289
693d371d 290 sd_event_unref(manager->event);
e237d8cb
TG
291 manager_workers_free(manager);
292 event_queue_cleanup(manager, EVENT_UNDEF);
293
294 udev_monitor_unref(manager->monitor);
295 udev_ctrl_unref(manager->ctrl);
296 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
297
9b5150b6 298 hashmap_free_free_free(manager->properties);
e237d8cb 299 udev_rules_unref(manager->rules);
e237d8cb 300
e237d8cb
TG
301 safe_close(manager->fd_inotify);
302 safe_close_pair(manager->worker_watch);
303
304 free(manager);
305}
306
307DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
308
9a73bd7c
TG
309static int worker_send_message(int fd) {
310 struct worker_message message = {};
311
312 return loop_write(fd, &message, sizeof(message), false);
313}
314
fee854ee
RK
315static bool shall_lock_device(struct udev_device *dev) {
316 const char *sysname;
317
318 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
319 return false;
320
321 sysname = udev_device_get_sysname(dev);
322 return !startswith(sysname, "dm-") &&
323 !startswith(sysname, "md") &&
324 !startswith(sysname, "drbd");
325}
326
c0c6806b 327static void worker_spawn(Manager *manager, struct event *event) {
8e766630 328 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 329 pid_t pid;
b6aab8ef 330 int r = 0;
912541b0
KS
331
332 /* listen for new events */
2024ed61 333 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
334 if (worker_monitor == NULL)
335 return;
336 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 337 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
338 r = udev_monitor_enable_receiving(worker_monitor);
339 if (r < 0)
340 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 341
912541b0
KS
342 pid = fork();
343 switch (pid) {
344 case 0: {
cf28ad46 345 _cleanup_(udev_device_unrefp) struct udev_device *dev = NULL;
4afd3348 346 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 347 int fd_monitor;
e237d8cb 348 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
349 struct epoll_event ep_signal = { .events = EPOLLIN };
350 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 351 sigset_t mask;
912541b0 352
43095991 353 /* take initial device from queue */
1cc6c93a 354 dev = TAKE_PTR(event->dev);
912541b0 355
39fd2ca1
TG
356 unsetenv("NOTIFY_SOCKET");
357
c0c6806b 358 manager_workers_free(manager);
ecb17862 359 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 360
e237d8cb 361 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 364 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 365
693d371d
TG
366 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
367 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
368 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
eca195ec 369 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
693d371d
TG
370
371 manager->event = sd_event_unref(manager->event);
372
912541b0
KS
373 sigfillset(&mask);
374 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
375 if (fd_signal < 0) {
6af5e6a4 376 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
377 goto out;
378 }
2dd9f98d
TG
379 ep_signal.data.fd = fd_signal;
380
381 fd_monitor = udev_monitor_get_fd(worker_monitor);
382 ep_monitor.data.fd = fd_monitor;
912541b0
KS
383
384 fd_ep = epoll_create1(EPOLL_CLOEXEC);
385 if (fd_ep < 0) {
6af5e6a4 386 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
387 goto out;
388 }
389
912541b0
KS
390 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
391 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 392 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
393 goto out;
394 }
395
045e00cf
ZJS
396 /* Request TERM signal if parent exits.
397 Ignore error, not much we can do in that case. */
398 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 399
045e00cf 400 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
401 r = set_oom_score_adjust(0);
402 if (r < 0)
403 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 404
912541b0 405 for (;;) {
c1118ceb 406 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 407 int fd_lock = -1;
912541b0 408
3b64e4d4
TG
409 assert(dev);
410
9f6445e3 411 log_debug("seq %llu running", udev_device_get_seqnum(dev));
6b92f429 412 udev_event = udev_event_new(dev->device, arg_exec_delay_usec, rtnl);
0f86dc90 413 if (!udev_event) {
6af5e6a4 414 r = -ENOMEM;
912541b0
KS
415 goto out;
416 }
417
3ebdb81e 418 /*
2e5b17d0 419 * Take a shared lock on the device node; this establishes
3ebdb81e 420 * a concept of device "ownership" to serialize device
2e5b17d0 421 * access. External processes holding an exclusive lock will
3ebdb81e 422 * cause udev to skip the event handling; in the case udev
2e5b17d0 423 * acquired the lock, the external process can block until
3ebdb81e
KS
424 * udev has finished its event handling.
425 */
2e5b17d0 426 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 427 shall_lock_device(dev)) {
3ebdb81e
KS
428 struct udev_device *d = dev;
429
430 if (streq_ptr("partition", udev_device_get_devtype(d)))
431 d = udev_device_get_parent(d);
432
433 if (d) {
434 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
435 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 436 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 437 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
438 goto skip;
439 }
440 }
441 }
442
912541b0 443 /* apply rules, create node, symlinks */
adeba500
KS
444 udev_event_execute_rules(udev_event,
445 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 446 manager->properties,
8314de1d 447 manager->rules);
adeba500
KS
448
449 udev_event_execute_run(udev_event,
8314de1d 450 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 451
e0bb2ff9 452 if (!rtnl)
523c620b 453 /* in case rtnl was initialized */
1c4baffc 454 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 455
912541b0 456 /* apply/restore inotify watch */
bf9bead1 457 if (udev_event->inotify_watch) {
70068602 458 udev_watch_begin(dev->device);
912541b0
KS
459 udev_device_update_db(dev);
460 }
461
3d06f418 462 safe_close(fd_lock);
3ebdb81e 463
912541b0
KS
464 /* send processed event back to libudev listeners */
465 udev_monitor_send_device(worker_monitor, NULL, dev);
466
3ebdb81e 467skip:
4914cb2d 468 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 469
912541b0 470 /* send udevd the result of the event execution */
e237d8cb 471 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 472 if (r < 0)
9a73bd7c 473 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 474 udev_device_get_seqnum(dev));
912541b0 475
cf28ad46 476 dev = udev_device_unref(dev);
912541b0 477
912541b0
KS
478 /* wait for more device messages from main udevd, or term signal */
479 while (dev == NULL) {
480 struct epoll_event ev[4];
481 int fdcount;
482 int i;
483
8fef0ff2 484 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
485 if (fdcount < 0) {
486 if (errno == EINTR)
487 continue;
6af5e6a4 488 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
489 goto out;
490 }
491
492 for (i = 0; i < fdcount; i++) {
493 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
494 dev = udev_monitor_receive_device(worker_monitor);
495 break;
496 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
497 struct signalfd_siginfo fdsi;
498 ssize_t size;
499
500 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
501 if (size != sizeof(struct signalfd_siginfo))
502 continue;
503 switch (fdsi.ssi_signo) {
504 case SIGTERM:
505 goto out;
506 }
507 }
508 }
509 }
510 }
82063a88 511out:
912541b0 512 udev_device_unref(dev);
e237d8cb 513 manager_free(manager);
baa30fbc 514 log_close();
8b46c3fc 515 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
516 }
517 case -1:
912541b0 518 event->state = EVENT_QUEUED;
56f64d95 519 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
520 break;
521 default:
e03c7cc2
TG
522 {
523 struct worker *worker;
524
c0c6806b 525 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 526 if (r < 0)
e03c7cc2 527 return;
e03c7cc2 528
39c19cf1
TG
529 worker_attach_event(worker, event);
530
1fa2f38f 531 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
532 break;
533 }
e03c7cc2 534 }
7fafc032
KS
535}
536
c0c6806b 537static void event_run(Manager *manager, struct event *event) {
a505965d
TG
538 struct worker *worker;
539 Iterator i;
912541b0 540
c0c6806b
TG
541 assert(manager);
542 assert(event);
543
544 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
545 ssize_t count;
546
547 if (worker->state != WORKER_IDLE)
548 continue;
549
c0c6806b 550 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 551 if (count < 0) {
1fa2f38f
ZJS
552 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
553 worker->pid, count);
cb542e84 554 (void) kill(worker->pid, SIGKILL);
912541b0
KS
555 worker->state = WORKER_KILLED;
556 continue;
557 }
39c19cf1 558 worker_attach_event(worker, event);
912541b0
KS
559 return;
560 }
561
c0c6806b 562 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 563 if (arg_children_max > 1)
c0c6806b 564 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
565 return;
566 }
567
568 /* start new worker and pass initial device */
c0c6806b 569 worker_spawn(manager, event);
1e03b754
KS
570}
571
ecb17862 572static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 573 struct event *event;
cb49a4f2 574 int r;
912541b0 575
ecb17862
TG
576 assert(manager);
577 assert(dev);
578
040e6896
TG
579 /* only one process can add events to the queue */
580 if (manager->pid == 0)
df0ff127 581 manager->pid = getpid_cached();
040e6896 582
df0ff127 583 assert(manager->pid == getpid_cached());
cb49a4f2 584
955d98c9 585 event = new0(struct event, 1);
cb49a4f2
TG
586 if (!event)
587 return -ENOMEM;
912541b0 588
cb49a4f2 589 event->manager = manager;
912541b0 590 event->dev = dev;
6969c349
TG
591 event->dev_kernel = udev_device_shallow_clone(dev);
592 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
593 event->seqnum = udev_device_get_seqnum(dev);
594 event->devpath = udev_device_get_devpath(dev);
595 event->devpath_len = strlen(event->devpath);
596 event->devpath_old = udev_device_get_devpath_old(dev);
597 event->devnum = udev_device_get_devnum(dev);
ea6039a3 598 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
599 event->ifindex = udev_device_get_ifindex(dev);
600
9f6445e3 601 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
602 udev_device_get_action(dev), udev_device_get_subsystem(dev));
603
604 event->state = EVENT_QUEUED;
cb49a4f2 605
40a57716 606 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
607 r = touch("/run/udev/queue");
608 if (r < 0)
609 log_warning_errno(r, "could not touch /run/udev/queue: %m");
610 }
611
40a57716 612 LIST_APPEND(event, manager->events, event);
cb49a4f2 613
912541b0 614 return 0;
fc465079
KS
615}
616
c0c6806b 617static void manager_kill_workers(Manager *manager) {
a505965d
TG
618 struct worker *worker;
619 Iterator i;
1e03b754 620
c0c6806b
TG
621 assert(manager);
622
623 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
624 if (worker->state == WORKER_KILLED)
625 continue;
1e03b754 626
912541b0 627 worker->state = WORKER_KILLED;
cb542e84 628 (void) kill(worker->pid, SIGTERM);
912541b0 629 }
1e03b754
KS
630}
631
e3196993 632/* lookup event for identical, parent, child device */
ecb17862 633static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 634 struct event *loop_event;
912541b0
KS
635 size_t common;
636
637 /* check if queue contains events we depend on */
40a57716 638 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 639 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
640 if (loop_event->seqnum < event->delaying_seqnum)
641 continue;
642
643 /* event we checked earlier still exists, no need to check again */
644 if (loop_event->seqnum == event->delaying_seqnum)
645 return true;
646
647 /* found ourself, no later event can block us */
648 if (loop_event->seqnum >= event->seqnum)
649 break;
650
651 /* check major/minor */
652 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
653 return true;
654
655 /* check network device ifindex */
edc81c1c 656 if (event->ifindex > 0 && event->ifindex == loop_event->ifindex)
912541b0
KS
657 return true;
658
659 /* check our old name */
edc81c1c 660 if (event->devpath_old && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
661 event->delaying_seqnum = loop_event->seqnum;
662 return true;
663 }
664
665 /* compare devpath */
666 common = MIN(loop_event->devpath_len, event->devpath_len);
667
668 /* one devpath is contained in the other? */
669 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
670 continue;
671
672 /* identical device event found */
673 if (loop_event->devpath_len == event->devpath_len) {
674 /* devices names might have changed/swapped in the meantime */
edc81c1c 675 if (major(event->devnum) != 0 || event->ifindex > 0)
912541b0
KS
676 continue;
677 event->delaying_seqnum = loop_event->seqnum;
678 return true;
679 }
680
681 /* parent device event found */
682 if (event->devpath[common] == '/') {
683 event->delaying_seqnum = loop_event->seqnum;
684 return true;
685 }
686
687 /* child device event found */
688 if (loop_event->devpath[common] == '/') {
689 event->delaying_seqnum = loop_event->seqnum;
690 return true;
691 }
912541b0
KS
692 }
693
694 return false;
7fafc032
KS
695}
696
693d371d
TG
697static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
698 Manager *manager = userdata;
699
700 assert(manager);
701
702 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
703
704 sd_event_exit(manager->event, -ETIMEDOUT);
705
706 return 1;
707}
708
62d43dac 709static void manager_exit(Manager *manager) {
693d371d
TG
710 uint64_t usec;
711 int r;
62d43dac
TG
712
713 assert(manager);
714
715 manager->exit = true;
716
b79aacbf
TG
717 sd_notify(false,
718 "STOPPING=1\n"
719 "STATUS=Starting shutdown...");
720
62d43dac 721 /* close sources of new events and discard buffered events */
693d371d 722 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 723 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 724
693d371d 725 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 726 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 727
693d371d 728 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 729 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
730
731 /* discard queued events and kill workers */
732 event_queue_cleanup(manager, EVENT_QUEUED);
733 manager_kill_workers(manager);
693d371d 734
3285baa8 735 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 736
3285baa8 737 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
738 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
739 if (r < 0)
740 return;
62d43dac
TG
741}
742
743/* reload requested, HUP signal received, rules changed, builtin changed */
744static void manager_reload(Manager *manager) {
745
746 assert(manager);
747
b79aacbf
TG
748 sd_notify(false,
749 "RELOADING=1\n"
750 "STATUS=Flushing configuration...");
751
62d43dac
TG
752 manager_kill_workers(manager);
753 manager->rules = udev_rules_unref(manager->rules);
2024ed61 754 udev_builtin_exit();
b79aacbf 755
1ef72b55
MS
756 sd_notifyf(false,
757 "READY=1\n"
758 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
759}
760
eca195ec
YW
761static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
762 Manager *manager = userdata;
763
764 assert(manager);
765
766 log_debug("Cleanup idle workers");
767 manager_kill_workers(manager);
768
769 return 1;
770}
771
772static int manager_enable_kill_workers_event(Manager *manager) {
773 int enabled, r;
774
775 assert(manager);
776
777 if (!manager->kill_workers_event)
778 goto create_new;
779
780 r = sd_event_source_get_enabled(manager->kill_workers_event, &enabled);
781 if (r < 0) {
782 log_debug_errno(r, "Failed to query whether event source for killing idle workers is enabled or not, trying to create new event source: %m");
783 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
784 goto create_new;
785 }
786
787 if (enabled == SD_EVENT_ONESHOT)
788 return 0;
789
790 r = sd_event_source_set_time(manager->kill_workers_event, now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC);
791 if (r < 0) {
792 log_debug_errno(r, "Failed to set time to event source for killing idle workers, trying to create new event source: %m");
793 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
794 goto create_new;
795 }
796
797 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_ONESHOT);
798 if (r < 0) {
799 log_debug_errno(r, "Failed to enable event source for killing idle workers, trying to create new event source: %m");
800 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
801 goto create_new;
802 }
803
804 return 0;
805
806create_new:
807 r = sd_event_add_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
808 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC, on_kill_workers_event, manager);
809 if (r < 0)
810 return log_warning_errno(r, "Failed to create timer event for killing idle workers: %m");
811
812 return 0;
813}
814
815static int manager_disable_kill_workers_event(Manager *manager) {
816 int r;
817
818 if (!manager->kill_workers_event)
819 return 0;
820
821 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
822 if (r < 0)
823 return log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
824
825 return 0;
826}
827
c0c6806b 828static void event_queue_start(Manager *manager) {
40a57716 829 struct event *event;
693d371d 830 usec_t usec;
8ab44e3f 831
c0c6806b
TG
832 assert(manager);
833
40a57716 834 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
835 manager->exit || manager->stop_exec_queue)
836 return;
837
3285baa8 838 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
839 /* check for changed config, every 3 seconds at most */
840 if (manager->last_usec == 0 ||
841 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
842 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 843 udev_builtin_validate())
38a03f06 844 manager_reload(manager);
693d371d 845
38a03f06 846 manager->last_usec = usec;
7c4c7e89
TG
847 }
848
eca195ec
YW
849 (void) manager_disable_kill_workers_event(manager);
850
2024ed61 851 udev_builtin_init();
7c4c7e89
TG
852
853 if (!manager->rules) {
c4d44cba 854 manager->rules = udev_rules_new(arg_resolve_name_timing);
7c4c7e89
TG
855 if (!manager->rules)
856 return;
857 }
858
40a57716 859 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
860 if (event->state != EVENT_QUEUED)
861 continue;
0bc74ea7 862
912541b0 863 /* do not start event if parent or child event is still running */
ecb17862 864 if (is_devpath_busy(manager, event))
912541b0 865 continue;
fc465079 866
c0c6806b 867 event_run(manager, event);
912541b0 868 }
1e03b754
KS
869}
870
ecb17862 871static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 872 struct event *event, *tmp;
ff2c503d 873
40a57716 874 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
875 if (match_type != EVENT_UNDEF && match_type != event->state)
876 continue;
ff2c503d 877
c6aa11f2 878 event_free(event);
912541b0 879 }
ff2c503d
KS
880}
881
e82e8fa5 882static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
883 Manager *manager = userdata;
884
885 assert(manager);
886
912541b0
KS
887 for (;;) {
888 struct worker_message msg;
979558f3
TG
889 struct iovec iovec = {
890 .iov_base = &msg,
891 .iov_len = sizeof(msg),
892 };
893 union {
894 struct cmsghdr cmsghdr;
895 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
896 } control = {};
897 struct msghdr msghdr = {
898 .msg_iov = &iovec,
899 .msg_iovlen = 1,
900 .msg_control = &control,
901 .msg_controllen = sizeof(control),
902 };
903 struct cmsghdr *cmsg;
912541b0 904 ssize_t size;
979558f3 905 struct ucred *ucred = NULL;
a505965d 906 struct worker *worker;
912541b0 907
e82e8fa5 908 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 909 if (size < 0) {
738a7907
TG
910 if (errno == EINTR)
911 continue;
912 else if (errno == EAGAIN)
913 /* nothing more to read */
914 break;
979558f3 915
e82e8fa5 916 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
917 } else if (size != sizeof(struct worker_message)) {
918 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 919 continue;
979558f3
TG
920 }
921
2a1288ff 922 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
923 if (cmsg->cmsg_level == SOL_SOCKET &&
924 cmsg->cmsg_type == SCM_CREDENTIALS &&
925 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
926 ucred = (struct ucred*) CMSG_DATA(cmsg);
927 }
928
929 if (!ucred || ucred->pid <= 0) {
930 log_warning_errno(EIO, "ignoring worker message without valid PID");
931 continue;
932 }
912541b0
KS
933
934 /* lookup worker who sent the signal */
4a0b58c4 935 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
936 if (!worker) {
937 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
938 continue;
912541b0 939 }
c0bbfd72 940
a505965d
TG
941 if (worker->state != WORKER_KILLED)
942 worker->state = WORKER_IDLE;
943
944 /* worker returned */
945 event_free(worker->event);
912541b0 946 }
e82e8fa5 947
8302fe5a
TG
948 /* we have free workers, try to schedule events */
949 event_queue_start(manager);
950
e82e8fa5
TG
951 return 1;
952}
953
954static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 955 Manager *manager = userdata;
e82e8fa5
TG
956 struct udev_device *dev;
957 int r;
958
c0c6806b 959 assert(manager);
e82e8fa5 960
c0c6806b 961 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
962 if (dev) {
963 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 964 r = event_queue_insert(manager, dev);
e82e8fa5
TG
965 if (r < 0)
966 udev_device_unref(dev);
8302fe5a
TG
967 else
968 /* we have fresh events, try to schedule them */
969 event_queue_start(manager);
e82e8fa5
TG
970 }
971
972 return 1;
88f4b648
KS
973}
974
3b47c739 975/* receive the udevd message from userspace */
e82e8fa5 976static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 977 Manager *manager = userdata;
8e766630
LP
978 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
979 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 980 const char *str;
9b5150b6 981 int i, r;
912541b0 982
c0c6806b 983 assert(manager);
e4f66b77 984
c0c6806b 985 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 986 if (!ctrl_conn)
e82e8fa5 987 return 1;
912541b0
KS
988
989 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 990 if (!ctrl_msg)
e82e8fa5 991 return 1;
912541b0
KS
992
993 i = udev_ctrl_get_set_log_level(ctrl_msg);
994 if (i >= 0) {
ed14edc0 995 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 996 log_set_max_level(i);
c0c6806b 997 manager_kill_workers(manager);
912541b0
KS
998 }
999
1000 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 1001 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 1002 manager->stop_exec_queue = true;
912541b0
KS
1003 }
1004
1005 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 1006 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 1007 manager->stop_exec_queue = false;
8302fe5a 1008 event_queue_start(manager);
912541b0
KS
1009 }
1010
1011 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 1012 log_debug("udevd message (RELOAD) received");
62d43dac 1013 manager_reload(manager);
912541b0
KS
1014 }
1015
1016 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
1017 if (str) {
1018 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
1019 char *eq;
1020
1021 eq = strchr(str, '=');
1022 if (!eq) {
1023 log_error("Invalid key format '%s'", str);
1024 return 1;
1025 }
1026
1027 key = strndup(str, eq - str);
1028 if (!key) {
1029 log_oom();
1030 return 1;
1031 }
1032
1033 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1034
1035 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1036 if (r < 0) {
1037 log_oom();
1038 return 1;
912541b0 1039 }
9b5150b6
YW
1040
1041 eq++;
1042 if (!isempty(eq)) {
1043 log_debug("udevd message (ENV) received, unset '%s'", key);
1044
1045 r = hashmap_put(manager->properties, key, NULL);
1046 if (r < 0) {
1047 log_oom();
1048 return 1;
1049 }
1050 } else {
1051 val = strdup(eq);
1052 if (!val) {
1053 log_oom();
1054 return 1;
1055 }
1056
1057 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
1058
1059 r = hashmap_put(manager->properties, key, val);
1060 if (r < 0) {
1061 log_oom();
1062 return 1;
1063 }
1064 }
1065
1066 key = val = NULL;
c0c6806b 1067 manager_kill_workers(manager);
912541b0
KS
1068 }
1069
1070 i = udev_ctrl_get_set_children_max(ctrl_msg);
1071 if (i >= 0) {
9f6445e3 1072 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1073 arg_children_max = i;
1ef72b55
MS
1074
1075 (void) sd_notifyf(false,
1076 "READY=1\n"
1077 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1078 }
1079
cb49a4f2 1080 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1081 log_debug("udevd message (SYNC) received");
912541b0
KS
1082
1083 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1084 log_debug("udevd message (EXIT) received");
62d43dac 1085 manager_exit(manager);
c0c6806b
TG
1086 /* keep reference to block the client until we exit
1087 TODO: deal with several blocking exit requests */
1088 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1089 }
e4f66b77 1090
e82e8fa5 1091 return 1;
88f4b648 1092}
4a231017 1093
70068602
YW
1094static int synthesize_change(sd_device *dev) {
1095 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1096 char filename[PATH_MAX];
f3a740a5 1097 int r;
edd32000 1098
70068602
YW
1099 r = sd_device_get_subsystem(dev, &subsystem);
1100 if (r < 0)
1101 return r;
1102
1103 r = sd_device_get_sysname(dev, &sysname);
1104 if (r < 0)
1105 return r;
1106
1107 r = sd_device_get_devname(dev, &devname);
1108 if (r < 0)
1109 return r;
1110
1111 r = sd_device_get_syspath(dev, &syspath);
1112 if (r < 0)
1113 return r;
1114
1115 r = sd_device_get_devtype(dev, &devtype);
1116 if (r < 0)
1117 return r;
1118
1119 if (streq_ptr("block", subsystem) &&
1120 streq_ptr("disk", devtype) &&
1121 !startswith(sysname, "dm-")) {
1122 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1123 bool part_table_read = false, has_partitions = false;
1124 sd_device *d;
ede34445 1125 int fd;
f3a740a5 1126
ede34445 1127 /*
e9fc29f4
KS
1128 * Try to re-read the partition table. This only succeeds if
1129 * none of the devices is busy. The kernel returns 0 if no
1130 * partition table is found, and we will not get an event for
1131 * the disk.
ede34445 1132 */
70068602 1133 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1134 if (fd >= 0) {
02ba8fb3
KS
1135 r = flock(fd, LOCK_EX|LOCK_NB);
1136 if (r >= 0)
1137 r = ioctl(fd, BLKRRPART, 0);
1138
ede34445
KS
1139 close(fd);
1140 if (r >= 0)
e9fc29f4 1141 part_table_read = true;
ede34445
KS
1142 }
1143
e9fc29f4 1144 /* search for partitions */
70068602 1145 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1146 if (r < 0)
1147 return r;
1148
70068602 1149 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1150 if (r < 0)
1151 return r;
1152
70068602 1153 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1154 if (r < 0)
1155 return r;
e9fc29f4 1156
70068602
YW
1157 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1158 if (r < 0)
1159 return r;
e9fc29f4 1160
70068602
YW
1161 FOREACH_DEVICE(e, d) {
1162 const char *t;
e9fc29f4 1163
70068602
YW
1164 if (sd_device_get_devtype(d, &t) < 0 ||
1165 !streq("partition", t))
e9fc29f4
KS
1166 continue;
1167
1168 has_partitions = true;
1169 break;
1170 }
1171
1172 /*
1173 * We have partitions and re-read the table, the kernel already sent
1174 * out a "change" event for the disk, and "remove/add" for all
1175 * partitions.
1176 */
1177 if (part_table_read && has_partitions)
1178 return 0;
1179
1180 /*
1181 * We have partitions but re-reading the partition table did not
1182 * work, synthesize "change" for the disk and all partitions.
1183 */
70068602
YW
1184 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1185 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1186 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1187
70068602
YW
1188 FOREACH_DEVICE(e, d) {
1189 const char *t, *n, *s;
f3a740a5 1190
70068602
YW
1191 if (sd_device_get_devtype(d, &t) < 0 ||
1192 !streq("partition", t))
f3a740a5
KS
1193 continue;
1194
70068602
YW
1195 if (sd_device_get_devname(d, &n) < 0 ||
1196 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1197 continue;
1198
70068602
YW
1199 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1200 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1201 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1202 }
ede34445
KS
1203
1204 return 0;
f3a740a5
KS
1205 }
1206
70068602
YW
1207 log_debug("Device %s is closed, synthesising 'change'", devname);
1208 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1209 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1210
f3a740a5 1211 return 0;
edd32000
KS
1212}
1213
e82e8fa5 1214static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1215 Manager *manager = userdata;
0254e944 1216 union inotify_event_buffer buffer;
f7c1ad4f
LP
1217 struct inotify_event *e;
1218 ssize_t l;
912541b0 1219
c0c6806b 1220 assert(manager);
e82e8fa5 1221
eca195ec
YW
1222 (void) manager_disable_kill_workers_event(manager);
1223
e82e8fa5 1224 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1225 if (l < 0) {
3742095b 1226 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1227 return 1;
912541b0 1228
f7c1ad4f 1229 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1230 }
1231
f7c1ad4f 1232 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1233 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1234 const char *devnode;
1235
7fe3324c 1236 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1237 continue;
912541b0 1238
70068602 1239 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1240 continue;
912541b0 1241
7fe3324c 1242 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1243 if (e->mask & IN_CLOSE_WRITE)
edd32000 1244 synthesize_change(dev);
da143134 1245 else if (e->mask & IN_IGNORED)
2024ed61 1246 udev_watch_end(dev);
912541b0
KS
1247 }
1248
e82e8fa5 1249 return 1;
bd284db1
SJR
1250}
1251
0561329d 1252static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1253 Manager *manager = userdata;
1254
1255 assert(manager);
1256
62d43dac 1257 manager_exit(manager);
912541b0 1258
e82e8fa5
TG
1259 return 1;
1260}
912541b0 1261
0561329d 1262static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1263 Manager *manager = userdata;
1264
1265 assert(manager);
1266
62d43dac 1267 manager_reload(manager);
912541b0 1268
e82e8fa5
TG
1269 return 1;
1270}
912541b0 1271
e82e8fa5 1272static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1273 Manager *manager = userdata;
1274
1275 assert(manager);
1276
e82e8fa5
TG
1277 for (;;) {
1278 pid_t pid;
1279 int status;
1280 struct worker *worker;
d1317d02 1281
e82e8fa5
TG
1282 pid = waitpid(-1, &status, WNOHANG);
1283 if (pid <= 0)
f29328d6 1284 break;
e82e8fa5 1285
4a0b58c4 1286 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1287 if (!worker) {
1288 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1289 continue;
912541b0 1290 }
e82e8fa5
TG
1291
1292 if (WIFEXITED(status)) {
1293 if (WEXITSTATUS(status) == 0)
1294 log_debug("worker ["PID_FMT"] exited", pid);
1295 else
1296 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1297 } else if (WIFSIGNALED(status)) {
76341acc 1298 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1299 } else if (WIFSTOPPED(status)) {
1300 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1301 continue;
e82e8fa5
TG
1302 } else if (WIFCONTINUED(status)) {
1303 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1304 continue;
e82e8fa5
TG
1305 } else
1306 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1307
05e6d9c6
YW
1308 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1309 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1310 /* delete state from disk */
1311 udev_device_delete_db(worker->event->dev);
1312 udev_device_tag_index(worker->event->dev, NULL, false);
1313 /* forward kernel event without amending it */
1314 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1315 }
1316
1317 worker_free(worker);
912541b0 1318 }
e82e8fa5 1319
8302fe5a
TG
1320 /* we can start new workers, try to schedule events */
1321 event_queue_start(manager);
1322
eca195ec
YW
1323 /* Disable unnecessary cleanup event */
1324 if (hashmap_isempty(manager->workers) && manager->kill_workers_event)
1325 (void) sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
1326
e82e8fa5 1327 return 1;
f27125f9 1328}
1329
693d371d
TG
1330static int on_post(sd_event_source *s, void *userdata) {
1331 Manager *manager = userdata;
693d371d
TG
1332
1333 assert(manager);
1334
b6107f01
YW
1335 if (!LIST_IS_EMPTY(manager->events))
1336 return 1;
1337
1338 /* There are no pending events. Let's cleanup idle process. */
1339
1340 if (!hashmap_isempty(manager->workers)) {
1341 /* There are idle workers */
eca195ec 1342 (void) manager_enable_kill_workers_event(manager);
b6107f01 1343 return 1;
693d371d
TG
1344 }
1345
b6107f01
YW
1346 /* There are no idle workers. */
1347
1348 if (manager->exit)
1349 return sd_event_exit(manager->event, 0);
1350
1351 if (manager->cgroup)
1352 /* cleanup possible left-over processes in our cgroup */
1353 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1354
693d371d
TG
1355 return 1;
1356}
1357
fcff1e72
TG
1358static int listen_fds(int *rctrl, int *rnetlink) {
1359 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1360 int fd, n, r;
912541b0 1361
fcff1e72
TG
1362 assert(rctrl);
1363 assert(rnetlink);
1364
912541b0 1365 n = sd_listen_fds(true);
fcff1e72
TG
1366 if (n < 0)
1367 return n;
912541b0
KS
1368
1369 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1370 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1371 if (ctrl_fd >= 0)
1372 return -EINVAL;
1373 ctrl_fd = fd;
912541b0
KS
1374 continue;
1375 }
1376
1377 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1378 if (netlink_fd >= 0)
1379 return -EINVAL;
1380 netlink_fd = fd;
912541b0
KS
1381 continue;
1382 }
1383
fcff1e72 1384 return -EINVAL;
912541b0
KS
1385 }
1386
f59118ec 1387 if (ctrl_fd < 0) {
8e766630 1388 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1389
2024ed61 1390 ctrl = udev_ctrl_new();
f59118ec
TG
1391 if (!ctrl)
1392 return log_error_errno(EINVAL, "error initializing udev control socket");
1393
1394 r = udev_ctrl_enable_receiving(ctrl);
1395 if (r < 0)
1396 return log_error_errno(EINVAL, "error binding udev control socket");
1397
1398 fd = udev_ctrl_get_fd(ctrl);
1399 if (fd < 0)
1400 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1401
f59118ec
TG
1402 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1403 if (ctrl_fd < 0)
1404 return log_error_errno(errno, "could not dup ctrl fd: %m");
1405 }
1406
1407 if (netlink_fd < 0) {
8e766630 1408 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1409
2024ed61 1410 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1411 if (!monitor)
1412 return log_error_errno(EINVAL, "error initializing netlink socket");
1413
1414 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1415
1416 r = udev_monitor_enable_receiving(monitor);
1417 if (r < 0)
1418 return log_error_errno(EINVAL, "error binding netlink socket");
1419
1420 fd = udev_monitor_get_fd(monitor);
1421 if (fd < 0)
1422 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1423
1424 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1425 if (netlink_fd < 0)
f59118ec
TG
1426 return log_error_errno(errno, "could not dup netlink fd: %m");
1427 }
fcff1e72
TG
1428
1429 *rctrl = ctrl_fd;
1430 *rnetlink = netlink_fd;
912541b0 1431
912541b0 1432 return 0;
7459bcdc
KS
1433}
1434
e6f86cac 1435/*
3f85ef0f 1436 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1437 * udev.log_priority=<level> syslog priority
1438 * udev.children_max=<number of workers> events are fully serialized if set to 1
1439 * udev.exec_delay=<number of seconds> delay execution of every executed program
1440 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1441 */
96287a49 1442static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1443 int r = 0;
e6f86cac 1444
614a823c 1445 assert(key);
e6f86cac 1446
614a823c
TG
1447 if (!value)
1448 return 0;
e6f86cac 1449
1d84ad94
LP
1450 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1451
1452 if (proc_cmdline_value_missing(key, value))
1453 return 0;
1454
92e72467
ZJS
1455 r = util_log_priority(value);
1456 if (r >= 0)
1457 log_set_max_level(r);
1d84ad94
LP
1458
1459 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1460
1461 if (proc_cmdline_value_missing(key, value))
1462 return 0;
1463
92e72467
ZJS
1464 r = safe_atou64(value, &arg_event_timeout_usec);
1465 if (r >= 0) {
1466 arg_event_timeout_usec *= USEC_PER_SEC;
1467 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1468 }
1d84ad94
LP
1469
1470 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1471
1472 if (proc_cmdline_value_missing(key, value))
1473 return 0;
1474
020328e1 1475 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1476
1477 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1478
1479 if (proc_cmdline_value_missing(key, value))
1480 return 0;
1481
6b92f429 1482 r = parse_sec(value, &arg_exec_delay_usec);
1d84ad94
LP
1483
1484 } else if (startswith(key, "udev."))
92e72467 1485 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1486
92e72467
ZJS
1487 if (r < 0)
1488 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1489
614a823c 1490 return 0;
e6f86cac
KS
1491}
1492
37ec0fdd
LP
1493static int help(void) {
1494 _cleanup_free_ char *link = NULL;
1495 int r;
1496
1497 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1498 if (r < 0)
1499 return log_oom();
1500
ed216e1f
TG
1501 printf("%s [OPTIONS...]\n\n"
1502 "Manages devices.\n\n"
5ac0162c 1503 " -h --help Print this message\n"
2d19c17e
MF
1504 " -V --version Print version of the program\n"
1505 " -d --daemon Detach and run in the background\n"
1506 " -D --debug Enable debug output\n"
1507 " -c --children-max=INT Set maximum number of workers\n"
1508 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1509 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1510 " -N --resolve-names=early|late|never\n"
5ac0162c 1511 " When to resolve users and groups\n"
37ec0fdd
LP
1512 "\nSee the %s for details.\n"
1513 , program_invocation_short_name
1514 , link
1515 );
1516
1517 return 0;
ed216e1f
TG
1518}
1519
bba7a484 1520static int parse_argv(int argc, char *argv[]) {
912541b0 1521 static const struct option options[] = {
bba7a484
TG
1522 { "daemon", no_argument, NULL, 'd' },
1523 { "debug", no_argument, NULL, 'D' },
1524 { "children-max", required_argument, NULL, 'c' },
1525 { "exec-delay", required_argument, NULL, 'e' },
1526 { "event-timeout", required_argument, NULL, 't' },
1527 { "resolve-names", required_argument, NULL, 'N' },
1528 { "help", no_argument, NULL, 'h' },
1529 { "version", no_argument, NULL, 'V' },
912541b0
KS
1530 {}
1531 };
689a97f5 1532
bba7a484 1533 int c;
689a97f5 1534
bba7a484
TG
1535 assert(argc >= 0);
1536 assert(argv);
912541b0 1537
e14b6f21 1538 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1539 int r;
912541b0 1540
bba7a484 1541 switch (c) {
912541b0 1542
912541b0 1543 case 'd':
bba7a484 1544 arg_daemonize = true;
912541b0
KS
1545 break;
1546 case 'c':
020328e1 1547 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8 1548 if (r < 0)
389f9bf2 1549 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
912541b0
KS
1550 break;
1551 case 'e':
6b92f429 1552 r = parse_sec(optarg, &arg_exec_delay_usec);
6f5cf8a8 1553 if (r < 0)
6b92f429 1554 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
912541b0 1555 break;
9719859c 1556 case 't':
f1e8664e
TG
1557 r = safe_atou64(optarg, &arg_event_timeout_usec);
1558 if (r < 0)
65fea570 1559 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1560 else {
1561 arg_event_timeout_usec *= USEC_PER_SEC;
1562 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1563 }
9719859c 1564 break;
912541b0 1565 case 'D':
bba7a484 1566 arg_debug = true;
912541b0 1567 break;
c4d44cba
YW
1568 case 'N': {
1569 ResolveNameTiming t;
1570
1571 t = resolve_name_timing_from_string(optarg);
1572 if (t < 0)
1573 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1574 else
1575 arg_resolve_name_timing = t;
912541b0 1576 break;
c4d44cba 1577 }
912541b0 1578 case 'h':
37ec0fdd 1579 return help();
912541b0 1580 case 'V':
948aaa7c 1581 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1582 return 0;
1583 case '?':
1584 return -EINVAL;
912541b0 1585 default:
bba7a484
TG
1586 assert_not_reached("Unhandled option");
1587
912541b0
KS
1588 }
1589 }
1590
bba7a484
TG
1591 return 1;
1592}
1593
b7f74dd4 1594static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1595 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1596 int r, fd_worker;
c0c6806b
TG
1597
1598 assert(ret);
11b1dd8c
TG
1599 assert(fd_ctrl >= 0);
1600 assert(fd_uevent >= 0);
c0c6806b
TG
1601
1602 manager = new0(Manager, 1);
1603 if (!manager)
1604 return log_oom();
1605
e237d8cb
TG
1606 manager->fd_inotify = -1;
1607 manager->worker_watch[WRITE_END] = -1;
1608 manager->worker_watch[READ_END] = -1;
1609
2024ed61 1610 udev_builtin_init();
b2d21d93 1611
c4d44cba 1612 manager->rules = udev_rules_new(arg_resolve_name_timing);
ecb17862
TG
1613 if (!manager->rules)
1614 return log_error_errno(ENOMEM, "error reading rules");
1615
40a57716 1616 LIST_HEAD_INIT(manager->events);
ecb17862 1617
c26d1879
TG
1618 manager->cgroup = cgroup;
1619
2024ed61 1620 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1621 if (!manager->ctrl)
1622 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1623
2024ed61 1624 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1625 if (!manager->monitor)
1626 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1627
1628 /* unnamed socket from workers to the main daemon */
1629 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1630 if (r < 0)
1631 return log_error_errno(errno, "error creating socketpair: %m");
1632
693d371d 1633 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1634
2ff48e98 1635 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1636 if (r < 0)
2ff48e98 1637 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1638
b7759e04
YW
1639 r = udev_watch_init();
1640 if (r < 0)
1641 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1642 manager->fd_inotify = r;
e237d8cb 1643
2024ed61 1644 udev_watch_restore();
e237d8cb
TG
1645
1646 /* block and listen to all signals on signalfd */
72c0a2c2 1647 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1648
49f997f3
TG
1649 r = sd_event_default(&manager->event);
1650 if (r < 0)
709f6e46 1651 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1652
693d371d
TG
1653 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1654 if (r < 0)
1655 return log_error_errno(r, "error creating sigint event source: %m");
1656
1657 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1658 if (r < 0)
1659 return log_error_errno(r, "error creating sigterm event source: %m");
1660
1661 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1662 if (r < 0)
1663 return log_error_errno(r, "error creating sighup event source: %m");
1664
1665 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1666 if (r < 0)
1667 return log_error_errno(r, "error creating sigchld event source: %m");
1668
1669 r = sd_event_set_watchdog(manager->event, true);
1670 if (r < 0)
1671 return log_error_errno(r, "error creating watchdog event source: %m");
1672
11b1dd8c 1673 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1674 if (r < 0)
1675 return log_error_errno(r, "error creating ctrl event source: %m");
1676
1677 /* This needs to be after the inotify and uevent handling, to make sure
1678 * that the ping is send back after fully processing the pending uevents
1679 * (including the synthetic ones we may create due to inotify events).
1680 */
1681 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1682 if (r < 0)
1683 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1684
1685 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1686 if (r < 0)
1687 return log_error_errno(r, "error creating inotify event source: %m");
1688
11b1dd8c 1689 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1690 if (r < 0)
1691 return log_error_errno(r, "error creating uevent event source: %m");
1692
1693 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1694 if (r < 0)
1695 return log_error_errno(r, "error creating worker event source: %m");
1696
1697 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1698 if (r < 0)
1699 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1700
1cc6c93a 1701 *ret = TAKE_PTR(manager);
11b1dd8c 1702
86c3bece 1703 return 0;
c0c6806b
TG
1704}
1705
077fc5e2 1706static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1707 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1708 int r;
1709
1710 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1711 if (r < 0) {
1712 r = log_error_errno(r, "failed to allocate manager object: %m");
1713 goto exit;
1714 }
1715
1716 r = udev_rules_apply_static_dev_perms(manager->rules);
1717 if (r < 0)
1718 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1719
1ef72b55
MS
1720 (void) sd_notifyf(false,
1721 "READY=1\n"
1722 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1723
1724 r = sd_event_loop(manager->event);
1725 if (r < 0) {
1726 log_error_errno(r, "event loop failed: %m");
1727 goto exit;
1728 }
1729
1730 sd_event_get_exit_code(manager->event, &r);
1731
1732exit:
1733 sd_notify(false,
1734 "STOPPING=1\n"
1735 "STATUS=Shutting down...");
1736 if (manager)
1737 udev_ctrl_cleanup(manager->ctrl);
1738 return r;
1739}
1740
1741int main(int argc, char *argv[]) {
c26d1879 1742 _cleanup_free_ char *cgroup = NULL;
efa1606e 1743 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1744 int r;
bba7a484 1745
bba7a484 1746 log_set_target(LOG_TARGET_AUTO);
b237a168 1747 udev_parse_config();
bba7a484
TG
1748 log_parse_environment();
1749 log_open();
1750
bba7a484
TG
1751 r = parse_argv(argc, argv);
1752 if (r <= 0)
1753 goto exit;
1754
1d84ad94 1755 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1756 if (r < 0)
1757 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1758
78d3e041
KS
1759 if (arg_debug) {
1760 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1761 log_set_max_level(LOG_DEBUG);
78d3e041 1762 }
bba7a484 1763
6174a243
YW
1764 log_set_max_level_realm(LOG_REALM_SYSTEMD, log_get_max_level());
1765
fba868fa
LP
1766 r = must_be_root();
1767 if (r < 0)
912541b0 1768 goto exit;
912541b0 1769
712cebf1
TG
1770 if (arg_children_max == 0) {
1771 cpu_set_t cpu_set;
e438c57a 1772 unsigned long mem_limit;
ebc164ef 1773
712cebf1 1774 arg_children_max = 8;
d457ff83 1775
ece174c5 1776 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
faae64fa 1777 arg_children_max += CPU_COUNT(&cpu_set) * 8;
912541b0 1778
e438c57a
MW
1779 mem_limit = physical_memory() / (128LU*1024*1024);
1780 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1781
712cebf1 1782 log_debug("set children_max to %u", arg_children_max);
d457ff83 1783 }
912541b0 1784
712cebf1
TG
1785 /* set umask before creating any file/directory */
1786 r = chdir("/");
1787 if (r < 0) {
1788 r = log_error_errno(errno, "could not change dir to /: %m");
1789 goto exit;
1790 }
194bbe33 1791
712cebf1 1792 umask(022);
912541b0 1793
c3dacc8b 1794 r = mac_selinux_init();
712cebf1
TG
1795 if (r < 0) {
1796 log_error_errno(r, "could not initialize labelling: %m");
1797 goto exit;
912541b0
KS
1798 }
1799
dae8b82e
ZJS
1800 r = mkdir_errno_wrapper("/run/udev", 0755);
1801 if (r < 0 && r != -EEXIST) {
1802 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1803 goto exit;
1804 }
1805
03cfe0d5 1806 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1807
c26d1879
TG
1808 if (getppid() == 1) {
1809 /* get our own cgroup, we regularly kill everything udev has left behind
1810 we only do this on systemd systems, and only if we are directly spawned
1811 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1812 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1813 if (r < 0) {
a2d61f07 1814 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1815 log_debug_errno(r, "did not find dedicated cgroup: %m");
1816 else
1817 log_warning_errno(r, "failed to get cgroup: %m");
1818 }
c26d1879
TG
1819 }
1820
b7f74dd4
TG
1821 r = listen_fds(&fd_ctrl, &fd_uevent);
1822 if (r < 0) {
1823 r = log_error_errno(r, "could not listen on fds: %m");
1824 goto exit;
1825 }
1826
bba7a484 1827 if (arg_daemonize) {
912541b0 1828 pid_t pid;
912541b0 1829
948aaa7c 1830 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1831
40e749b5 1832 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1833 if (log_get_max_level() < LOG_DEBUG) {
1834 r = make_null_stdio();
1835 if (r < 0)
1836 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1837 }
1838
912541b0
KS
1839 pid = fork();
1840 switch (pid) {
1841 case 0:
1842 break;
1843 case -1:
6af5e6a4 1844 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1845 goto exit;
1846 default:
f53d1fcd
TG
1847 mac_selinux_finish();
1848 log_close();
1849 _exit(EXIT_SUCCESS);
912541b0
KS
1850 }
1851
1852 setsid();
1853
76cdddfb
YW
1854 r = set_oom_score_adjust(-1000);
1855 if (r < 0)
1856 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1857 }
912541b0 1858
077fc5e2 1859 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1860
53921bfa 1861exit:
cc56fafe 1862 mac_selinux_finish();
baa30fbc 1863 log_close();
6af5e6a4 1864 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1865}