]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
Clarify that these values are in bytes
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
f13467ec 1/* SPDX-License-Identifier: GPL-2.0-or-later */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
618234a5
LP
11#include <stdbool.h>
12#include <stddef.h>
7fafc032
KS
13#include <stdio.h>
14#include <stdlib.h>
618234a5 15#include <sys/epoll.h>
3ebdb81e 16#include <sys/file.h>
618234a5
LP
17#include <sys/inotify.h>
18#include <sys/ioctl.h>
19#include <sys/mount.h>
1e03b754 20#include <sys/prctl.h>
1e03b754 21#include <sys/signalfd.h>
dc117daa 22#include <sys/stat.h>
618234a5
LP
23#include <sys/time.h>
24#include <sys/wait.h>
25#include <unistd.h>
7fafc032 26
392ef7a2 27#include "sd-daemon.h"
693d371d 28#include "sd-event.h"
8314de1d 29
b5efdb8a 30#include "alloc-util.h"
194bbe33 31#include "cgroup-util.h"
618234a5 32#include "cpu-set-util.h"
5ba2dc25 33#include "dev-setup.h"
7f2e3a14 34#include "device-monitor-private.h"
abde5ea8 35#include "device-private.h"
70068602 36#include "device-util.h"
6d63048a 37#include "event-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
eefc66aa 44#include "limits-util.h"
40a57716 45#include "list.h"
0c5a109a 46#include "main-func.h"
5ea78a39 47#include "mkdir.h"
618234a5 48#include "netlink-util.h"
6bedfcbb 49#include "parse-util.h"
294bf0c3 50#include "pretty-print.h"
4e731273 51#include "proc-cmdline.h"
618234a5
LP
52#include "process-util.h"
53#include "selinux-util.h"
54#include "signal-util.h"
8f328d36 55#include "socket-util.h"
07630cea 56#include "string-util.h"
49fe5c09 57#include "strv.h"
5ea78a39 58#include "strxcpyx.h"
46f0fbd8 59#include "syslog-util.h"
63e2d171 60#include "udevd.h"
07a26e42 61#include "udev-builtin.h"
7d68eb1b 62#include "udev-ctrl.h"
25de7aa7 63#include "udev-event.h"
618234a5 64#include "udev-util.h"
70068602 65#include "udev-watch.h"
ee104e11 66#include "user-util.h"
47350c5f 67#include "version.h"
7fafc032 68
88bd5a32
FB
69#define WORKER_NUM_MAX 2048U
70
bba7a484
TG
71static bool arg_debug = false;
72static int arg_daemonize = false;
c4d44cba 73static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
216e8bbe 74static unsigned arg_children_max = 0;
6b92f429 75static usec_t arg_exec_delay_usec = 0;
bba7a484 76static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
e2099267 77static int arg_timeout_signal = SIGKILL;
95ac5230 78static bool arg_blockdev_read_only = false;
c0c6806b
TG
79
80typedef struct Manager {
693d371d 81 sd_event *event;
c0c6806b 82 Hashmap *workers;
40a57716 83 LIST_HEAD(struct event, events);
c26d1879 84 const char *cgroup;
cb49a4f2 85 pid_t pid; /* the process that originally allocated the manager object */
1a0bd015 86 int log_level;
c0c6806b 87
9a07157d 88 UdevRules *rules;
9b5150b6 89 Hashmap *properties;
c0c6806b 90
0bed242c
YW
91 sd_netlink *rtnl;
92
7f2e3a14 93 sd_device_monitor *monitor;
c0c6806b 94 struct udev_ctrl *ctrl;
e237d8cb 95 int fd_inotify;
e237d8cb
TG
96 int worker_watch[2];
97
693d371d 98 sd_event_source *inotify_event;
eca195ec 99 sd_event_source *kill_workers_event;
693d371d 100
7c4c7e89
TG
101 usec_t last_usec;
102
481f24d1
YW
103 bool stop_exec_queue;
104 bool exit;
c0c6806b 105} Manager;
1e03b754 106
1e03b754 107enum event_state {
912541b0
KS
108 EVENT_UNDEF,
109 EVENT_QUEUED,
110 EVENT_RUNNING,
1e03b754
KS
111};
112
113struct event {
cb49a4f2 114 Manager *manager;
c6aa11f2 115 struct worker *worker;
912541b0 116 enum event_state state;
eb546b35
YW
117
118 sd_device *dev;
119 sd_device *dev_kernel; /* clone of originally received device */
120
121 uint64_t seqnum;
122 uint64_t delaying_seqnum;
d8f462b4
YW
123
124 sd_event_source *timeout_warning_event;
125 sd_event_source *timeout_event;
eb546b35
YW
126
127 LIST_FIELDS(struct event, event);
1e03b754
KS
128};
129
ecb17862 130static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 131
1e03b754 132enum worker_state {
912541b0
KS
133 WORKER_UNDEF,
134 WORKER_RUNNING,
135 WORKER_IDLE,
136 WORKER_KILLED,
f257a8fc 137 WORKER_KILLING,
1e03b754
KS
138};
139
140struct worker {
c0c6806b 141 Manager *manager;
912541b0 142 pid_t pid;
7f2e3a14 143 sd_device_monitor *monitor;
912541b0
KS
144 enum worker_state state;
145 struct event *event;
1e03b754
KS
146};
147
148/* passed from worker to main process */
149struct worker_message {
1e03b754
KS
150};
151
c6aa11f2
TG
152static void event_free(struct event *event) {
153 if (!event)
154 return;
ba47b71c 155
40a57716 156 assert(event->manager);
c6aa11f2 157
40a57716 158 LIST_REMOVE(event, event->manager->events, event);
eb546b35
YW
159 sd_device_unref(event->dev);
160 sd_device_unref(event->dev_kernel);
c6aa11f2 161
d8f462b4
YW
162 sd_event_source_unref(event->timeout_warning_event);
163 sd_event_source_unref(event->timeout_event);
693d371d 164
c6aa11f2
TG
165 if (event->worker)
166 event->worker->event = NULL;
167
ba47b71c
YW
168 /* only clean up the queue from the process that created it */
169 if (LIST_IS_EMPTY(event->manager->events) &&
170 event->manager->pid == getpid_cached())
171 if (unlink("/run/udev/queue") < 0)
172 log_warning_errno(errno, "Failed to unlink /run/udev/queue: %m");
cb49a4f2 173
912541b0 174 free(event);
aa8734ff 175}
7a770250 176
75db809a 177static struct worker* worker_free(struct worker *worker) {
c6aa11f2 178 if (!worker)
75db809a 179 return NULL;
bc113de9 180
c0c6806b
TG
181 assert(worker->manager);
182
4a0b58c4 183 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
7f2e3a14 184 sd_device_monitor_unref(worker->monitor);
c6aa11f2
TG
185 event_free(worker->event);
186
75db809a 187 return mfree(worker);
ff2c503d
KS
188}
189
1f3f6bd0 190DEFINE_TRIVIAL_CLEANUP_FUNC(struct worker *, worker_free);
956833b4 191DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(worker_hash_op, void, trivial_hash_func, trivial_compare_func, struct worker, worker_free);
fc465079 192
7f2e3a14 193static int worker_new(struct worker **ret, Manager *manager, sd_device_monitor *worker_monitor, pid_t pid) {
1f3f6bd0 194 _cleanup_(worker_freep) struct worker *worker = NULL;
a505965d 195 int r;
3a19b32a
TG
196
197 assert(ret);
c0c6806b 198 assert(manager);
3a19b32a
TG
199 assert(worker_monitor);
200 assert(pid > 1);
201
d4053464
YW
202 /* close monitor, but keep address around */
203 device_monitor_disconnect(worker_monitor);
204
205 worker = new(struct worker, 1);
3a19b32a
TG
206 if (!worker)
207 return -ENOMEM;
208
d4053464
YW
209 *worker = (struct worker) {
210 .manager = manager,
211 .monitor = sd_device_monitor_ref(worker_monitor),
212 .pid = pid,
213 };
a505965d 214
92a74c47 215 r = hashmap_ensure_put(&manager->workers, &worker_hash_op, PID_TO_PTR(pid), worker);
a505965d
TG
216 if (r < 0)
217 return r;
218
ae2a15bc 219 *ret = TAKE_PTR(worker);
3a19b32a
TG
220
221 return 0;
222}
223
4fa4d885
TG
224static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
225 struct event *event = userdata;
226
227 assert(event);
228 assert(event->worker);
229
e2099267 230 kill_and_sigcont(event->worker->pid, arg_timeout_signal);
4fa4d885
TG
231 event->worker->state = WORKER_KILLED;
232
eb546b35 233 log_device_error(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" killed", event->worker->pid, event->seqnum);
4fa4d885
TG
234
235 return 1;
236}
237
238static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
239 struct event *event = userdata;
240
241 assert(event);
044497e2 242 assert(event->worker);
4fa4d885 243
eb546b35 244 log_device_warning(event->dev, "Worker ["PID_FMT"] processing SEQNUM=%"PRIu64" is taking a long time", event->worker->pid, event->seqnum);
4fa4d885
TG
245
246 return 1;
247}
248
39c19cf1 249static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d 250 sd_event *e;
693d371d 251
c6aa11f2 252 assert(worker);
693d371d 253 assert(worker->manager);
c6aa11f2
TG
254 assert(event);
255 assert(!event->worker);
256 assert(!worker->event);
257
39c19cf1 258 worker->state = WORKER_RUNNING;
39c19cf1
TG
259 worker->event = event;
260 event->state = EVENT_RUNNING;
c6aa11f2 261 event->worker = worker;
693d371d
TG
262
263 e = worker->manager->event;
264
39cf0351
LP
265 (void) sd_event_add_time_relative(e, &event->timeout_warning_event, CLOCK_MONOTONIC,
266 udev_warn_timeout(arg_event_timeout_usec), USEC_PER_SEC,
267 on_event_timeout_warning, event);
693d371d 268
39cf0351
LP
269 (void) sd_event_add_time_relative(e, &event->timeout_event, CLOCK_MONOTONIC,
270 arg_event_timeout_usec, USEC_PER_SEC,
271 on_event_timeout, event);
39c19cf1
TG
272}
273
0bed242c
YW
274static void manager_clear_for_worker(Manager *manager) {
275 assert(manager);
276
0bed242c
YW
277 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
278 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
279
280 manager->event = sd_event_unref(manager->event);
281
956833b4 282 manager->workers = hashmap_free(manager->workers);
0bed242c
YW
283 event_queue_cleanup(manager, EVENT_UNDEF);
284
7f2e3a14 285 manager->monitor = sd_device_monitor_unref(manager->monitor);
0bed242c
YW
286 manager->ctrl = udev_ctrl_unref(manager->ctrl);
287
288 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
289}
290
75db809a 291static Manager* manager_free(Manager *manager) {
e237d8cb 292 if (!manager)
75db809a 293 return NULL;
e237d8cb 294
2024ed61 295 udev_builtin_exit();
b2d21d93 296
76e62a4d
YW
297 if (manager->pid == getpid_cached())
298 udev_ctrl_cleanup(manager->ctrl);
299
0bed242c 300 manager_clear_for_worker(manager);
693d371d 301
0bed242c 302 sd_netlink_unref(manager->rtnl);
e237d8cb 303
9b5150b6 304 hashmap_free_free_free(manager->properties);
981fae90 305 udev_rules_free(manager->rules);
e237d8cb 306
e237d8cb
TG
307 safe_close(manager->fd_inotify);
308 safe_close_pair(manager->worker_watch);
309
75db809a 310 return mfree(manager);
e237d8cb
TG
311}
312
313DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
314
9a73bd7c
TG
315static int worker_send_message(int fd) {
316 struct worker_message message = {};
317
318 return loop_write(fd, &message, sizeof(message), false);
319}
320
b97897e3 321static int worker_lock_block_device(sd_device *dev, int *ret_fd) {
0bed242c 322 _cleanup_close_ int fd = -1;
b97897e3
YW
323 const char *val;
324 int r;
fee854ee 325
0bed242c
YW
326 assert(dev);
327 assert(ret_fd);
328
a1130022
LP
329 /* Take a shared lock on the device node; this establishes a concept of device "ownership" to
330 * serialize device access. External processes holding an exclusive lock will cause udev to skip the
331 * event handling; in the case udev acquired the lock, the external process can block until udev has
332 * finished its event handling. */
333
334 if (device_for_action(dev, SD_DEVICE_REMOVE))
0bed242c
YW
335 return 0;
336
b97897e3
YW
337 r = sd_device_get_subsystem(dev, &val);
338 if (r < 0)
339 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
340
341 if (!streq(val, "block"))
0bed242c 342 return 0;
fee854ee 343
b97897e3
YW
344 r = sd_device_get_sysname(dev, &val);
345 if (r < 0)
346 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
347
49fe5c09 348 if (STARTSWITH_SET(val, "dm-", "md", "drbd"))
0bed242c
YW
349 return 0;
350
b97897e3
YW
351 r = sd_device_get_devtype(dev, &val);
352 if (r < 0 && r != -ENOENT)
353 return log_device_debug_errno(dev, r, "Failed to get devtype: %m");
354 if (r >= 0 && streq(val, "partition")) {
355 r = sd_device_get_parent(dev, &dev);
356 if (r < 0)
357 return log_device_debug_errno(dev, r, "Failed to get parent device: %m");
358 }
0bed242c 359
b97897e3
YW
360 r = sd_device_get_devname(dev, &val);
361 if (r == -ENOENT)
0bed242c 362 return 0;
b97897e3
YW
363 if (r < 0)
364 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
0bed242c 365
b97897e3
YW
366 fd = open(val, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
367 if (fd < 0) {
368 log_device_debug_errno(dev, errno, "Failed to open '%s', ignoring: %m", val);
0bed242c 369 return 0;
b97897e3 370 }
0bed242c
YW
371
372 if (flock(fd, LOCK_SH|LOCK_NB) < 0)
b97897e3 373 return log_device_debug_errno(dev, errno, "Failed to flock(%s): %m", val);
0bed242c
YW
374
375 *ret_fd = TAKE_FD(fd);
376 return 1;
fee854ee
RK
377}
378
95ac5230
LP
379static int worker_mark_block_device_read_only(sd_device *dev) {
380 _cleanup_close_ int fd = -1;
381 const char *val;
382 int state = 1, r;
383
384 assert(dev);
385
386 if (!arg_blockdev_read_only)
387 return 0;
388
389 /* Do this only once, when the block device is new. If the device is later retriggered let's not
390 * toggle the bit again, so that people can boot up with full read-only mode and then unset the bit
391 * for specific devices only. */
a1130022 392 if (!device_for_action(dev, SD_DEVICE_ADD))
95ac5230
LP
393 return 0;
394
395 r = sd_device_get_subsystem(dev, &val);
396 if (r < 0)
397 return log_device_debug_errno(dev, r, "Failed to get subsystem: %m");
398
399 if (!streq(val, "block"))
400 return 0;
401
402 r = sd_device_get_sysname(dev, &val);
403 if (r < 0)
404 return log_device_debug_errno(dev, r, "Failed to get sysname: %m");
405
406 /* Exclude synthetic devices for now, this is supposed to be a safety feature to avoid modification
407 * of physical devices, and what sits on top of those doesn't really matter if we don't allow the
cb713f16 408 * underlying block devices to receive changes. */
95ac5230
LP
409 if (STARTSWITH_SET(val, "dm-", "md", "drbd", "loop", "nbd", "zram"))
410 return 0;
411
412 r = sd_device_get_devname(dev, &val);
413 if (r == -ENOENT)
414 return 0;
415 if (r < 0)
416 return log_device_debug_errno(dev, r, "Failed to get devname: %m");
417
418 fd = open(val, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
419 if (fd < 0)
420 return log_device_debug_errno(dev, errno, "Failed to open '%s', ignoring: %m", val);
421
422 if (ioctl(fd, BLKROSET, &state) < 0)
423 return log_device_warning_errno(dev, errno, "Failed to mark block device '%s' read-only: %m", val);
424
425 log_device_info(dev, "Successfully marked block device '%s' read-only.", val);
426 return 0;
427}
428
abde5ea8 429static int worker_process_device(Manager *manager, sd_device *dev) {
2e088715 430 _cleanup_(udev_event_freep) UdevEvent *udev_event = NULL;
0bed242c
YW
431 _cleanup_close_ int fd_lock = -1;
432 int r;
912541b0 433
0bed242c
YW
434 assert(manager);
435 assert(dev);
436
b2d9e58f 437 log_device_uevent(dev, "Processing device");
abde5ea8 438
1a0bd015 439 udev_event = udev_event_new(dev, arg_exec_delay_usec, manager->rtnl, manager->log_level);
0bed242c
YW
440 if (!udev_event)
441 return -ENOMEM;
442
abde5ea8 443 r = worker_lock_block_device(dev, &fd_lock);
b8380cc6
LP
444 if (r == -EAGAIN) {
445 /* So this is a block device and the device is locked currently via the BSD advisory locks —
446 * someone else is exclusively using it. This means we don't run our udev rules now, to not
447 * interfere. However we want to know when the device is unlocked again, and retrigger the
448 * device again then, so that the rules are run eventually. For that we use IN_CLOSE_WRITE
449 * inotify watches (which isn't exactly the same as waiting for the BSD locks to release, but
450 * not totally off, as long as unlock+close() is done together, as it usually is).
451 *
452 * (The user-facing side of this: https://systemd.io/BLOCK_DEVICE_LOCKING)
453 *
454 * There's a bit of a chicken and egg problem here for this however: inotify watching is
455 * supposed to be enabled via an option set via udev rules (OPTIONS+="watch"). If we skip the
456 * udev rules here however (as we just said we do), we would thus never see that specific
457 * udev rule, and thus never turn on inotify watching. But in order to catch up eventually
458 * and run them we we need the inotify watching: hence a classic chicken and egg problem.
459 *
460 * Our way out here: if we see the block device locked, unconditionally watch the device via
461 * inotify, regardless of any explicit request via OPTIONS+="watch". Thus, a device that is
462 * currently locked via the BSD file locks will be treated as if we ran a single udev rule
463 * only for it: the one that turns on inotify watching for it. If we eventually see the
464 * inotify IN_CLOSE_WRITE event, and then run the rules after all and we then realize that
465 * this wasn't actually requested (i.e. no OPTIONS+="watch" set) we'll simply turn off the
466 * watching again (see below). Effectively this means: inotify watching is now enabled either
467 * a) when the udev rules say so, or b) while the device is locked.
468 *
469 * Worst case scenario hence: in the (unlikely) case someone locked the device and we clash
470 * with that we might do inotify watching for a brief moment for a device where we actually
471 * weren't supposed to. But that shouldn't be too bad, in particular as BSD locks being taken
472 * on a block device is kinda an indication that the inotify logic is desired too, to some
473 * degree — they go hand-in-hand after all. */
474
475 log_device_debug(dev, "Block device is currently locked, installing watch to wait until the lock is released.");
476 (void) udev_watch_begin(dev);
477
478 /* Now the watch is installed, let's lock the device again, maybe in the meantime things changed */
479 r = worker_lock_block_device(dev, &fd_lock);
480 }
b6aab8ef 481 if (r < 0)
0bed242c 482 return r;
912541b0 483
95ac5230
LP
484 (void) worker_mark_block_device_read_only(dev);
485
0bed242c 486 /* apply rules, create node, symlinks */
e2099267 487 r = udev_event_execute_rules(udev_event, arg_event_timeout_usec, arg_timeout_signal, manager->properties, manager->rules);
99058cd6
YW
488 if (r < 0)
489 return r;
490
e2099267 491 udev_event_execute_run(udev_event, arg_event_timeout_usec, arg_timeout_signal);
2dd9f98d 492
0bed242c
YW
493 if (!manager->rtnl)
494 /* in case rtnl was initialized */
495 manager->rtnl = sd_netlink_ref(udev_event->rtnl);
912541b0 496
b8380cc6 497 /* apply/restore/end inotify watch */
0bed242c 498 if (udev_event->inotify_watch) {
abde5ea8
YW
499 (void) udev_watch_begin(dev);
500 r = device_update_db(dev);
501 if (r < 0)
502 return log_device_debug_errno(dev, r, "Failed to update database under /run/udev/data/: %m");
b8380cc6
LP
503 } else
504 (void) udev_watch_end(dev);
912541b0 505
b2d9e58f 506 log_device_uevent(dev, "Device processed");
0bed242c
YW
507 return 0;
508}
912541b0 509
e2130348
YW
510static int worker_device_monitor_handler(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
511 Manager *manager = userdata;
512 int r;
513
514 assert(dev);
515 assert(manager);
516
517 r = worker_process_device(manager, dev);
5abee64e
LP
518 if (r == -EAGAIN)
519 /* if we couldn't acquire the flock(), then proceed quietly */
520 log_device_debug_errno(dev, r, "Device currently locked, not processing.");
521 else {
522 if (r < 0)
523 log_device_warning_errno(dev, r, "Failed to process device, ignoring: %m");
e2130348 524
5abee64e
LP
525 /* send processed event back to libudev listeners */
526 r = device_monitor_send_device(monitor, NULL, dev);
527 if (r < 0)
528 log_device_warning_errno(dev, r, "Failed to send device, ignoring: %m");
529 }
e2130348
YW
530
531 /* send udevd the result of the event execution */
532 r = worker_send_message(manager->worker_watch[WRITE_END]);
533 if (r < 0)
534 log_device_warning_errno(dev, r, "Failed to send signal to main daemon, ignoring: %m");
535
1a0bd015 536 /* Reset the log level, as it might be changed by "OPTIONS=log_level=". */
3cc6b14a 537 log_set_max_level(manager->log_level);
1a0bd015 538
e2130348
YW
539 return 1;
540}
541
7f2e3a14
YW
542static int worker_main(Manager *_manager, sd_device_monitor *monitor, sd_device *first_device) {
543 _cleanup_(sd_device_unrefp) sd_device *dev = first_device;
0bed242c 544 _cleanup_(manager_freep) Manager *manager = _manager;
44dcf454 545 int r;
145dae7e 546
0bed242c
YW
547 assert(manager);
548 assert(monitor);
549 assert(dev);
912541b0 550
44ee03d1 551 assert_se(unsetenv("NOTIFY_SOCKET") == 0);
3b64e4d4 552
e2130348
YW
553 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, -1) >= 0);
554
555 /* Reset OOM score, we only protect the main daemon. */
556 r = set_oom_score_adjust(0);
557 if (r < 0)
558 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
912541b0 559
e2130348
YW
560 /* Clear unnecessary data in Manager object.*/
561 manager_clear_for_worker(manager);
3ebdb81e 562
e2130348
YW
563 r = sd_event_new(&manager->event);
564 if (r < 0)
565 return log_error_errno(r, "Failed to allocate event loop: %m");
912541b0 566
e2130348
YW
567 r = sd_event_add_signal(manager->event, NULL, SIGTERM, NULL, NULL);
568 if (r < 0)
569 return log_error_errno(r, "Failed to set SIGTERM event: %m");
4c83d994 570
e2130348
YW
571 r = sd_device_monitor_attach_event(monitor, manager->event);
572 if (r < 0)
573 return log_error_errno(r, "Failed to attach event loop to device monitor: %m");
912541b0 574
e2130348 575 r = sd_device_monitor_start(monitor, worker_device_monitor_handler, manager);
0bed242c 576 if (r < 0)
e2130348 577 return log_error_errno(r, "Failed to start device monitor: %m");
912541b0 578
e2130348 579 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(monitor), "worker-device-monitor");
b66f29a1 580
e2130348
YW
581 /* Process first device */
582 (void) worker_device_monitor_handler(monitor, dev, manager);
912541b0 583
e2130348
YW
584 r = sd_event_loop(manager->event);
585 if (r < 0)
586 return log_error_errno(r, "Event loop failed: %m");
587
44dcf454 588 return 0;
0bed242c
YW
589}
590
7443654e 591static int worker_spawn(Manager *manager, struct event *event) {
7f2e3a14 592 _cleanup_(sd_device_monitor_unrefp) sd_device_monitor *worker_monitor = NULL;
7443654e 593 struct worker *worker;
0bed242c 594 pid_t pid;
7443654e 595 int r;
0bed242c
YW
596
597 /* listen for new events */
7f2e3a14
YW
598 r = device_monitor_new_full(&worker_monitor, MONITOR_GROUP_NONE, -1);
599 if (r < 0)
600 return r;
7443654e 601
0bed242c 602 /* allow the main daemon netlink address to send devices to the worker */
7f2e3a14
YW
603 r = device_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
604 if (r < 0)
605 return log_error_errno(r, "Worker: Failed to set unicast sender: %m");
606
607 r = device_monitor_enable_receiving(worker_monitor);
0bed242c 608 if (r < 0)
7f2e3a14 609 return log_error_errno(r, "Worker: Failed to enable receiving of device: %m");
0bed242c 610
ff86c92e 611 r = safe_fork(NULL, FORK_DEATHSIG, &pid);
7443654e
YW
612 if (r < 0) {
613 event->state = EVENT_QUEUED;
614 return log_error_errno(r, "Failed to fork() worker: %m");
615 }
616 if (r == 0) {
617 /* Worker process */
eb546b35 618 r = worker_main(manager, worker_monitor, sd_device_ref(event->dev));
baa30fbc 619 log_close();
8b46c3fc 620 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0 621 }
e03c7cc2 622
7443654e
YW
623 r = worker_new(&worker, manager, worker_monitor, pid);
624 if (r < 0)
625 return log_error_errno(r, "Failed to create worker object: %m");
e03c7cc2 626
7443654e 627 worker_attach_event(worker, event);
39c19cf1 628
eb546b35 629 log_device_debug(event->dev, "Worker ["PID_FMT"] is forked for processing SEQNUM=%"PRIu64".", pid, event->seqnum);
7443654e 630 return 0;
7fafc032
KS
631}
632
c0c6806b 633static void event_run(Manager *manager, struct event *event) {
5406c368 634 static bool log_children_max_reached = true;
a505965d 635 struct worker *worker;
7f2e3a14 636 int r;
912541b0 637
c0c6806b
TG
638 assert(manager);
639 assert(event);
640
b2d9e58f 641 log_device_uevent(event->dev, "Device ready for processing");
7600dbb8 642
90e74a66 643 HASHMAP_FOREACH(worker, manager->workers) {
912541b0
KS
644 if (worker->state != WORKER_IDLE)
645 continue;
646
eb546b35 647 r = device_monitor_send_device(manager->monitor, worker->monitor, event->dev);
7f2e3a14 648 if (r < 0) {
eb546b35 649 log_device_error_errno(event->dev, r, "Worker ["PID_FMT"] did not accept message, killing the worker: %m",
7f2e3a14 650 worker->pid);
cb542e84 651 (void) kill(worker->pid, SIGKILL);
912541b0
KS
652 worker->state = WORKER_KILLED;
653 continue;
654 }
39c19cf1 655 worker_attach_event(worker, event);
912541b0
KS
656 return;
657 }
658
c0c6806b 659 if (hashmap_size(manager->workers) >= arg_children_max) {
5406c368
FB
660
661 /* Avoid spamming the debug logs if the limit is already reached and
662 * many events still need to be processed */
663 if (log_children_max_reached && arg_children_max > 1) {
044497e2 664 log_debug("Maximum number (%u) of children reached.", hashmap_size(manager->workers));
5406c368
FB
665 log_children_max_reached = false;
666 }
912541b0
KS
667 return;
668 }
669
5406c368
FB
670 /* Re-enable the debug message for the next batch of events */
671 log_children_max_reached = true;
672
2df2152c
CG
673 /* fork with up-to-date SELinux label database, so the child inherits the up-to-date db
674 and, until the next SELinux policy changes, we safe further reloads in future children */
675 mac_selinux_maybe_reload();
676
912541b0 677 /* start new worker and pass initial device */
c0c6806b 678 worker_spawn(manager, event);
1e03b754
KS
679}
680
eb546b35
YW
681static int event_queue_insert(Manager *manager, sd_device *dev) {
682 _cleanup_(sd_device_unrefp) sd_device *clone = NULL;
912541b0 683 struct event *event;
eb546b35 684 uint64_t seqnum;
cb49a4f2 685 int r;
912541b0 686
ecb17862
TG
687 assert(manager);
688 assert(dev);
689
040e6896 690 /* only one process can add events to the queue */
df0ff127 691 assert(manager->pid == getpid_cached());
cb49a4f2 692
eb546b35 693 /* We only accepts devices received by device monitor. */
a1130022 694 r = sd_device_get_seqnum(dev, &seqnum);
eb546b35
YW
695 if (r < 0)
696 return r;
697
eb546b35
YW
698 /* Save original device to restore the state on failures. */
699 r = device_shallow_clone(dev, &clone);
700 if (r < 0)
701 return r;
702
703 r = device_copy_properties(clone, dev);
704 if (r < 0)
705 return r;
912541b0 706
eb546b35
YW
707 event = new(struct event, 1);
708 if (!event)
709 return -ENOMEM;
912541b0 710
eb546b35
YW
711 *event = (struct event) {
712 .manager = manager,
713 .dev = sd_device_ref(dev),
714 .dev_kernel = TAKE_PTR(clone),
715 .seqnum = seqnum,
716 .state = EVENT_QUEUED,
717 };
cb49a4f2 718
40a57716 719 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
720 r = touch("/run/udev/queue");
721 if (r < 0)
044497e2 722 log_warning_errno(r, "Failed to touch /run/udev/queue: %m");
cb49a4f2
TG
723 }
724
40a57716 725 LIST_APPEND(event, manager->events, event);
cb49a4f2 726
b2d9e58f 727 log_device_uevent(dev, "Device is queued");
eb546b35 728
912541b0 729 return 0;
fc465079
KS
730}
731
f257a8fc 732static void manager_kill_workers(Manager *manager, bool force) {
a505965d 733 struct worker *worker;
1e03b754 734
c0c6806b
TG
735 assert(manager);
736
90e74a66 737 HASHMAP_FOREACH(worker, manager->workers) {
912541b0
KS
738 if (worker->state == WORKER_KILLED)
739 continue;
1e03b754 740
f257a8fc
YG
741 if (worker->state == WORKER_RUNNING && !force) {
742 worker->state = WORKER_KILLING;
743 continue;
744 }
745
912541b0 746 worker->state = WORKER_KILLED;
cb542e84 747 (void) kill(worker->pid, SIGTERM);
912541b0 748 }
1e03b754
KS
749}
750
e3196993 751/* lookup event for identical, parent, child device */
eb546b35
YW
752static int is_device_busy(Manager *manager, struct event *event) {
753 const char *subsystem, *devpath, *devpath_old = NULL;
754 dev_t devnum = makedev(0, 0);
40a57716 755 struct event *loop_event;
eb546b35
YW
756 size_t devpath_len;
757 int r, ifindex = 0;
758 bool is_block;
759
760 r = sd_device_get_subsystem(event->dev, &subsystem);
761 if (r < 0)
762 return r;
763
764 is_block = streq(subsystem, "block");
765
766 r = sd_device_get_devpath(event->dev, &devpath);
767 if (r < 0)
768 return r;
769
770 devpath_len = strlen(devpath);
771
772 r = sd_device_get_property_value(event->dev, "DEVPATH_OLD", &devpath_old);
773 if (r < 0 && r != -ENOENT)
774 return r;
775
776 r = sd_device_get_devnum(event->dev, &devnum);
777 if (r < 0 && r != -ENOENT)
778 return r;
779
780 r = sd_device_get_ifindex(event->dev, &ifindex);
781 if (r < 0 && r != -ENOENT)
782 return r;
912541b0
KS
783
784 /* check if queue contains events we depend on */
40a57716 785 LIST_FOREACH(event, loop_event, manager->events) {
eb546b35
YW
786 size_t loop_devpath_len, common;
787 const char *loop_devpath;
788
87ac8d99 789 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
790 if (loop_event->seqnum < event->delaying_seqnum)
791 continue;
792
793 /* event we checked earlier still exists, no need to check again */
794 if (loop_event->seqnum == event->delaying_seqnum)
795 return true;
796
797 /* found ourself, no later event can block us */
798 if (loop_event->seqnum >= event->seqnum)
799 break;
800
801 /* check major/minor */
eb546b35
YW
802 if (major(devnum) != 0) {
803 const char *s;
804 dev_t d;
805
806 if (sd_device_get_subsystem(loop_event->dev, &s) < 0)
807 continue;
808
809 if (sd_device_get_devnum(loop_event->dev, &d) >= 0 &&
810 devnum == d && is_block == streq(s, "block"))
86714116 811 goto set_delaying_seqnum;
eb546b35 812 }
912541b0
KS
813
814 /* check network device ifindex */
eb546b35
YW
815 if (ifindex > 0) {
816 int i;
817
818 if (sd_device_get_ifindex(loop_event->dev, &i) >= 0 &&
819 ifindex == i)
86714116 820 goto set_delaying_seqnum;
eb546b35
YW
821 }
822
823 if (sd_device_get_devpath(loop_event->dev, &loop_devpath) < 0)
824 continue;
912541b0
KS
825
826 /* check our old name */
0bd0407e
YW
827 if (devpath_old && streq(devpath_old, loop_devpath))
828 goto set_delaying_seqnum;
912541b0 829
eb546b35
YW
830 loop_devpath_len = strlen(loop_devpath);
831
912541b0 832 /* compare devpath */
eb546b35 833 common = MIN(devpath_len, loop_devpath_len);
912541b0
KS
834
835 /* one devpath is contained in the other? */
eb546b35 836 if (!strneq(devpath, loop_devpath, common))
912541b0
KS
837 continue;
838
839 /* identical device event found */
baa461fc 840 if (devpath_len == loop_devpath_len)
0bd0407e 841 goto set_delaying_seqnum;
912541b0
KS
842
843 /* parent device event found */
0bd0407e
YW
844 if (devpath[common] == '/')
845 goto set_delaying_seqnum;
912541b0
KS
846
847 /* child device event found */
0bd0407e
YW
848 if (loop_devpath[common] == '/')
849 goto set_delaying_seqnum;
912541b0
KS
850 }
851
852 return false;
0bd0407e
YW
853
854set_delaying_seqnum:
7600dbb8
UKK
855 log_device_debug(event->dev, "SEQNUM=%" PRIu64 " blocked by SEQNUM=%" PRIu64,
856 event->seqnum, loop_event->seqnum);
857
0bd0407e
YW
858 event->delaying_seqnum = loop_event->seqnum;
859 return true;
7fafc032
KS
860}
861
62d43dac 862static void manager_exit(Manager *manager) {
62d43dac
TG
863 assert(manager);
864
865 manager->exit = true;
866
b79aacbf
TG
867 sd_notify(false,
868 "STOPPING=1\n"
869 "STATUS=Starting shutdown...");
870
62d43dac 871 /* close sources of new events and discard buffered events */
ab7854df 872 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 873
693d371d 874 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 875 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 876
3cabdc23
MW
877 manager->monitor = sd_device_monitor_unref(manager->monitor);
878
62d43dac
TG
879 /* discard queued events and kill workers */
880 event_queue_cleanup(manager, EVENT_QUEUED);
f257a8fc 881 manager_kill_workers(manager, true);
62d43dac
TG
882}
883
884/* reload requested, HUP signal received, rules changed, builtin changed */
885static void manager_reload(Manager *manager) {
886
887 assert(manager);
888
b79aacbf
TG
889 sd_notify(false,
890 "RELOADING=1\n"
891 "STATUS=Flushing configuration...");
892
f257a8fc 893 manager_kill_workers(manager, false);
981fae90 894 manager->rules = udev_rules_free(manager->rules);
2024ed61 895 udev_builtin_exit();
b79aacbf 896
1ef72b55
MS
897 sd_notifyf(false,
898 "READY=1\n"
899 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
900}
901
eca195ec
YW
902static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
903 Manager *manager = userdata;
904
905 assert(manager);
906
907 log_debug("Cleanup idle workers");
f257a8fc 908 manager_kill_workers(manager, false);
eca195ec
YW
909
910 return 1;
911}
912
c0c6806b 913static void event_queue_start(Manager *manager) {
40a57716 914 struct event *event;
693d371d 915 usec_t usec;
0725c4b9 916 int r;
8ab44e3f 917
c0c6806b
TG
918 assert(manager);
919
40a57716 920 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
921 manager->exit || manager->stop_exec_queue)
922 return;
923
3285baa8 924 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
925 /* check for changed config, every 3 seconds at most */
926 if (manager->last_usec == 0 ||
33ad742a 927 usec - manager->last_usec > 3 * USEC_PER_SEC) {
38a03f06 928 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 929 udev_builtin_validate())
38a03f06 930 manager_reload(manager);
693d371d 931
38a03f06 932 manager->last_usec = usec;
7c4c7e89
TG
933 }
934
0725c4b9
YW
935 r = event_source_disable(manager->kill_workers_event);
936 if (r < 0)
937 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
eca195ec 938
2024ed61 939 udev_builtin_init();
7c4c7e89
TG
940
941 if (!manager->rules) {
c238a1f5 942 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
1d791281
ZJS
943 if (r < 0) {
944 log_warning_errno(r, "Failed to read udev rules: %m");
7c4c7e89 945 return;
1d791281 946 }
7c4c7e89
TG
947 }
948
eb546b35 949 LIST_FOREACH(event, event, manager->events) {
912541b0
KS
950 if (event->state != EVENT_QUEUED)
951 continue;
0bc74ea7 952
912541b0 953 /* do not start event if parent or child event is still running */
eb546b35 954 if (is_device_busy(manager, event) != 0)
912541b0 955 continue;
fc465079 956
c0c6806b 957 event_run(manager, event);
912541b0 958 }
1e03b754
KS
959}
960
ecb17862 961static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 962 struct event *event, *tmp;
ff2c503d 963
40a57716 964 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
965 if (match_type != EVENT_UNDEF && match_type != event->state)
966 continue;
ff2c503d 967
c6aa11f2 968 event_free(event);
912541b0 969 }
ff2c503d
KS
970}
971
e82e8fa5 972static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
973 Manager *manager = userdata;
974
975 assert(manager);
976
912541b0
KS
977 for (;;) {
978 struct worker_message msg;
979558f3
TG
979 struct iovec iovec = {
980 .iov_base = &msg,
981 .iov_len = sizeof(msg),
982 };
fb29cdbe 983 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred))) control;
979558f3
TG
984 struct msghdr msghdr = {
985 .msg_iov = &iovec,
986 .msg_iovlen = 1,
987 .msg_control = &control,
988 .msg_controllen = sizeof(control),
989 };
912541b0 990 ssize_t size;
371d72e0 991 struct ucred *ucred;
a505965d 992 struct worker *worker;
912541b0 993
3691bcf3
LP
994 size = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT);
995 if (size == -EINTR)
996 continue;
997 if (size == -EAGAIN)
998 /* nothing more to read */
999 break;
1000 if (size < 0)
1001 return log_error_errno(size, "Failed to receive message: %m");
1002
1003 cmsg_close_all(&msghdr);
979558f3 1004
3691bcf3 1005 if (size != sizeof(struct worker_message)) {
d4e98880 1006 log_warning("Ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 1007 continue;
979558f3
TG
1008 }
1009
371d72e0 1010 ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred);
979558f3 1011 if (!ucred || ucred->pid <= 0) {
d4e98880 1012 log_warning("Ignoring worker message without valid PID");
979558f3
TG
1013 continue;
1014 }
912541b0
KS
1015
1016 /* lookup worker who sent the signal */
4a0b58c4 1017 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d 1018 if (!worker) {
044497e2 1019 log_debug("Worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
a505965d 1020 continue;
912541b0 1021 }
c0bbfd72 1022
f257a8fc
YG
1023 if (worker->state == WORKER_KILLING) {
1024 worker->state = WORKER_KILLED;
1025 (void) kill(worker->pid, SIGTERM);
1026 } else if (worker->state != WORKER_KILLED)
a505965d
TG
1027 worker->state = WORKER_IDLE;
1028
1029 /* worker returned */
1030 event_free(worker->event);
912541b0 1031 }
e82e8fa5 1032
8302fe5a
TG
1033 /* we have free workers, try to schedule events */
1034 event_queue_start(manager);
1035
e82e8fa5
TG
1036 return 1;
1037}
1038
f00d2b6d 1039static int on_uevent(sd_device_monitor *monitor, sd_device *dev, void *userdata) {
c0c6806b 1040 Manager *manager = userdata;
e82e8fa5
TG
1041 int r;
1042
c0c6806b 1043 assert(manager);
e82e8fa5 1044
7f2e3a14
YW
1045 device_ensure_usec_initialized(dev, NULL);
1046
eb546b35
YW
1047 r = event_queue_insert(manager, dev);
1048 if (r < 0) {
1049 log_device_error_errno(dev, r, "Failed to insert device into event queue: %m");
7f2e3a14 1050 return 1;
eb546b35 1051 }
7f2e3a14
YW
1052
1053 /* we have fresh events, try to schedule them */
1054 event_queue_start(manager);
e82e8fa5
TG
1055
1056 return 1;
88f4b648
KS
1057}
1058
3b47c739 1059/* receive the udevd message from userspace */
d02c6f54 1060static int on_ctrl_msg(struct udev_ctrl *uctrl, enum udev_ctrl_msg_type type, const union udev_ctrl_msg_value *value, void *userdata) {
c0c6806b 1061 Manager *manager = userdata;
d02c6f54 1062 int r;
912541b0 1063
d02c6f54 1064 assert(value);
c0c6806b 1065 assert(manager);
e4f66b77 1066
d02c6f54
YW
1067 switch (type) {
1068 case UDEV_CTRL_SET_LOG_LEVEL:
64a3494c 1069 log_debug("Received udev control message (SET_LOG_LEVEL), setting log_level=%i", value->intval);
3cc6b14a 1070 log_set_max_level(value->intval);
1a0bd015 1071 manager->log_level = value->intval;
f257a8fc 1072 manager_kill_workers(manager, false);
d02c6f54
YW
1073 break;
1074 case UDEV_CTRL_STOP_EXEC_QUEUE:
044497e2 1075 log_debug("Received udev control message (STOP_EXEC_QUEUE)");
c0c6806b 1076 manager->stop_exec_queue = true;
d02c6f54
YW
1077 break;
1078 case UDEV_CTRL_START_EXEC_QUEUE:
044497e2 1079 log_debug("Received udev control message (START_EXEC_QUEUE)");
c0c6806b 1080 manager->stop_exec_queue = false;
8302fe5a 1081 event_queue_start(manager);
d02c6f54
YW
1082 break;
1083 case UDEV_CTRL_RELOAD:
044497e2 1084 log_debug("Received udev control message (RELOAD)");
62d43dac 1085 manager_reload(manager);
d02c6f54
YW
1086 break;
1087 case UDEV_CTRL_SET_ENV: {
9b5150b6 1088 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
d02c6f54 1089 const char *eq;
9b5150b6 1090
d02c6f54 1091 eq = strchr(value->buf, '=');
9b5150b6 1092 if (!eq) {
d02c6f54 1093 log_error("Invalid key format '%s'", value->buf);
9b5150b6
YW
1094 return 1;
1095 }
1096
d02c6f54 1097 key = strndup(value->buf, eq - value->buf);
9b5150b6
YW
1098 if (!key) {
1099 log_oom();
1100 return 1;
1101 }
1102
1103 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1104
1105 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1106 if (r < 0) {
1107 log_oom();
1108 return 1;
912541b0 1109 }
9b5150b6
YW
1110
1111 eq++;
f053fc33 1112 if (isempty(eq)) {
044497e2 1113 log_debug("Received udev control message (ENV), unsetting '%s'", key);
9b5150b6
YW
1114
1115 r = hashmap_put(manager->properties, key, NULL);
1116 if (r < 0) {
1117 log_oom();
1118 return 1;
1119 }
1120 } else {
1121 val = strdup(eq);
1122 if (!val) {
1123 log_oom();
1124 return 1;
1125 }
1126
044497e2 1127 log_debug("Received udev control message (ENV), setting '%s=%s'", key, val);
9b5150b6
YW
1128
1129 r = hashmap_put(manager->properties, key, val);
1130 if (r < 0) {
1131 log_oom();
1132 return 1;
1133 }
1134 }
1135
1136 key = val = NULL;
f257a8fc 1137 manager_kill_workers(manager, false);
d02c6f54 1138 break;
912541b0 1139 }
d02c6f54
YW
1140 case UDEV_CTRL_SET_CHILDREN_MAX:
1141 if (value->intval <= 0) {
1142 log_debug("Received invalid udev control message (SET_MAX_CHILDREN, %i), ignoring.", value->intval);
1143 return 0;
1144 }
912541b0 1145
d02c6f54
YW
1146 log_debug("Received udev control message (SET_MAX_CHILDREN), setting children_max=%i", value->intval);
1147 arg_children_max = value->intval;
1ef72b55
MS
1148
1149 (void) sd_notifyf(false,
1150 "READY=1\n"
1151 "STATUS=Processing with %u children at max", arg_children_max);
d02c6f54
YW
1152 break;
1153 case UDEV_CTRL_PING:
d30f43ee 1154 log_debug("Received udev control message (PING)");
d02c6f54
YW
1155 break;
1156 case UDEV_CTRL_EXIT:
044497e2 1157 log_debug("Received udev control message (EXIT)");
62d43dac 1158 manager_exit(manager);
d02c6f54
YW
1159 break;
1160 default:
1161 log_debug("Received unknown udev control message, ignoring");
912541b0 1162 }
e4f66b77 1163
e82e8fa5 1164 return 1;
88f4b648 1165}
4a231017 1166
25677a05 1167static int synthesize_change_one(sd_device *dev, sd_device *target) {
0584b17a
ZJS
1168 int r;
1169
25677a05
YW
1170 if (DEBUG_LOGGING) {
1171 const char *syspath = NULL;
1172 (void) sd_device_get_syspath(target, &syspath);
1173 log_device_debug(dev, "device is closed, synthesising 'change' on %s", strna(syspath));
1174 }
1175
1176 r = sd_device_trigger(target, SD_DEVICE_CHANGE);
0584b17a 1177 if (r < 0)
25677a05
YW
1178 return log_device_debug_errno(target, r, "Failed to trigger 'change' uevent: %m");
1179
0584b17a
ZJS
1180 return 0;
1181}
1182
70068602 1183static int synthesize_change(sd_device *dev) {
25677a05 1184 const char *subsystem, *sysname, *devtype;
f3a740a5 1185 int r;
edd32000 1186
70068602
YW
1187 r = sd_device_get_subsystem(dev, &subsystem);
1188 if (r < 0)
1189 return r;
1190
25677a05 1191 r = sd_device_get_devtype(dev, &devtype);
70068602
YW
1192 if (r < 0)
1193 return r;
1194
25677a05 1195 r = sd_device_get_sysname(dev, &sysname);
70068602
YW
1196 if (r < 0)
1197 return r;
1198
25677a05
YW
1199 if (streq_ptr(subsystem, "block") &&
1200 streq_ptr(devtype, "disk") &&
70068602
YW
1201 !startswith(sysname, "dm-")) {
1202 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1203 bool part_table_read = false, has_partitions = false;
25677a05 1204 const char *devname;
70068602 1205 sd_device *d;
ede34445 1206 int fd;
f3a740a5 1207
25677a05
YW
1208 r = sd_device_get_devname(dev, &devname);
1209 if (r < 0)
1210 return r;
1211
1212 /* Try to re-read the partition table. This only succeeds if none of the devices is
1213 * busy. The kernel returns 0 if no partition table is found, and we will not get an
1214 * event for the disk. */
70068602 1215 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1216 if (fd >= 0) {
02ba8fb3
KS
1217 r = flock(fd, LOCK_EX|LOCK_NB);
1218 if (r >= 0)
1219 r = ioctl(fd, BLKRRPART, 0);
1220
ede34445
KS
1221 close(fd);
1222 if (r >= 0)
e9fc29f4 1223 part_table_read = true;
ede34445
KS
1224 }
1225
e9fc29f4 1226 /* search for partitions */
70068602 1227 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1228 if (r < 0)
1229 return r;
1230
70068602 1231 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1232 if (r < 0)
1233 return r;
1234
70068602 1235 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1236 if (r < 0)
1237 return r;
e9fc29f4 1238
70068602
YW
1239 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1240 if (r < 0)
1241 return r;
e9fc29f4 1242
70068602
YW
1243 FOREACH_DEVICE(e, d) {
1244 const char *t;
e9fc29f4 1245
25677a05 1246 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
e9fc29f4
KS
1247 continue;
1248
1249 has_partitions = true;
1250 break;
1251 }
1252
25677a05
YW
1253 /* We have partitions and re-read the table, the kernel already sent out a "change"
1254 * event for the disk, and "remove/add" for all partitions. */
e9fc29f4
KS
1255 if (part_table_read && has_partitions)
1256 return 0;
1257
25677a05
YW
1258 /* We have partitions but re-reading the partition table did not work, synthesize
1259 * "change" for the disk and all partitions. */
1260 (void) synthesize_change_one(dev, dev);
e9fc29f4 1261
70068602 1262 FOREACH_DEVICE(e, d) {
25677a05 1263 const char *t;
f3a740a5 1264
25677a05 1265 if (sd_device_get_devtype(d, &t) < 0 || !streq(t, "partition"))
f3a740a5
KS
1266 continue;
1267
25677a05 1268 (void) synthesize_change_one(dev, d);
f3a740a5 1269 }
ede34445 1270
0584b17a 1271 } else
25677a05 1272 (void) synthesize_change_one(dev, dev);
ede34445 1273
f3a740a5 1274 return 0;
edd32000
KS
1275}
1276
e82e8fa5 1277static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1278 Manager *manager = userdata;
0254e944 1279 union inotify_event_buffer buffer;
f7c1ad4f
LP
1280 struct inotify_event *e;
1281 ssize_t l;
0725c4b9 1282 int r;
912541b0 1283
c0c6806b 1284 assert(manager);
e82e8fa5 1285
0725c4b9
YW
1286 r = event_source_disable(manager->kill_workers_event);
1287 if (r < 0)
1288 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
eca195ec 1289
e82e8fa5 1290 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1291 if (l < 0) {
3742095b 1292 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1293 return 1;
912541b0 1294
f7c1ad4f 1295 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1296 }
1297
f7c1ad4f 1298 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1299 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1300 const char *devnode;
1301
7fe3324c 1302 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1303 continue;
912541b0 1304
70068602 1305 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1306 continue;
912541b0 1307
7fe3324c 1308 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1309 if (e->mask & IN_CLOSE_WRITE)
edd32000 1310 synthesize_change(dev);
da143134 1311 else if (e->mask & IN_IGNORED)
2024ed61 1312 udev_watch_end(dev);
912541b0
KS
1313 }
1314
e82e8fa5 1315 return 1;
bd284db1
SJR
1316}
1317
0561329d 1318static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1319 Manager *manager = userdata;
1320
1321 assert(manager);
1322
62d43dac 1323 manager_exit(manager);
912541b0 1324
e82e8fa5
TG
1325 return 1;
1326}
912541b0 1327
0561329d 1328static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1329 Manager *manager = userdata;
1330
1331 assert(manager);
1332
62d43dac 1333 manager_reload(manager);
912541b0 1334
e82e8fa5
TG
1335 return 1;
1336}
912541b0 1337
e82e8fa5 1338static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b 1339 Manager *manager = userdata;
0725c4b9 1340 int r;
c0c6806b
TG
1341
1342 assert(manager);
1343
e82e8fa5
TG
1344 for (;;) {
1345 pid_t pid;
1346 int status;
1347 struct worker *worker;
d1317d02 1348
e82e8fa5
TG
1349 pid = waitpid(-1, &status, WNOHANG);
1350 if (pid <= 0)
f29328d6 1351 break;
e82e8fa5 1352
4a0b58c4 1353 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5 1354 if (!worker) {
dc7faf2a 1355 log_warning("Worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1356 continue;
912541b0 1357 }
e82e8fa5
TG
1358
1359 if (WIFEXITED(status)) {
1360 if (WEXITSTATUS(status) == 0)
dc7faf2a 1361 log_debug("Worker ["PID_FMT"] exited", pid);
e82e8fa5 1362 else
dc7faf2a 1363 log_warning("Worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
33ad742a 1364 } else if (WIFSIGNALED(status))
dc7faf2a 1365 log_warning("Worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
33ad742a 1366 else if (WIFSTOPPED(status)) {
dc7faf2a 1367 log_info("Worker ["PID_FMT"] stopped", pid);
f29328d6 1368 continue;
e82e8fa5 1369 } else if (WIFCONTINUED(status)) {
dc7faf2a 1370 log_info("Worker ["PID_FMT"] continued", pid);
f29328d6 1371 continue;
e82e8fa5 1372 } else
dc7faf2a 1373 log_warning("Worker ["PID_FMT"] exit with status 0x%04x", pid, status);
e82e8fa5 1374
05e6d9c6 1375 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
eb546b35 1376 log_device_error(worker->event->dev, "Worker ["PID_FMT"] failed", pid);
dc7faf2a 1377
05e6d9c6 1378 /* delete state from disk */
eb546b35
YW
1379 device_delete_db(worker->event->dev);
1380 device_tag_index(worker->event->dev, NULL, false);
dc7faf2a 1381
030f4571
MW
1382 if (manager->monitor) {
1383 /* forward kernel event without amending it */
1384 r = device_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
1385 if (r < 0)
1386 log_device_error_errno(worker->event->dev_kernel, r, "Failed to send back device to kernel: %m");
1387 }
e82e8fa5
TG
1388 }
1389
1390 worker_free(worker);
912541b0 1391 }
e82e8fa5 1392
8302fe5a
TG
1393 /* we can start new workers, try to schedule events */
1394 event_queue_start(manager);
1395
eca195ec 1396 /* Disable unnecessary cleanup event */
0725c4b9
YW
1397 if (hashmap_isempty(manager->workers)) {
1398 r = event_source_disable(manager->kill_workers_event);
1399 if (r < 0)
1400 log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
1401 }
eca195ec 1402
e82e8fa5 1403 return 1;
f27125f9 1404}
1405
693d371d
TG
1406static int on_post(sd_event_source *s, void *userdata) {
1407 Manager *manager = userdata;
693d371d
TG
1408
1409 assert(manager);
1410
b6107f01
YW
1411 if (!LIST_IS_EMPTY(manager->events))
1412 return 1;
1413
1414 /* There are no pending events. Let's cleanup idle process. */
1415
1416 if (!hashmap_isempty(manager->workers)) {
1417 /* There are idle workers */
6d63048a
YW
1418 (void) event_reset_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
1419 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC,
1420 on_kill_workers_event, manager, 0, "kill-workers-event", false);
b6107f01 1421 return 1;
693d371d
TG
1422 }
1423
b6107f01
YW
1424 /* There are no idle workers. */
1425
1426 if (manager->exit)
1427 return sd_event_exit(manager->event, 0);
1428
1429 if (manager->cgroup)
1430 /* cleanup possible left-over processes in our cgroup */
1431 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1432
693d371d
TG
1433 return 1;
1434}
1435
c4b69e99 1436static int listen_fds(int *ret_ctrl, int *ret_netlink) {
fcff1e72 1437 int ctrl_fd = -1, netlink_fd = -1;
c4b69e99 1438 int fd, n;
912541b0 1439
c4b69e99
YW
1440 assert(ret_ctrl);
1441 assert(ret_netlink);
fcff1e72 1442
912541b0 1443 n = sd_listen_fds(true);
fcff1e72
TG
1444 if (n < 0)
1445 return n;
912541b0
KS
1446
1447 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
c52cff07 1448 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1) > 0) {
fcff1e72
TG
1449 if (ctrl_fd >= 0)
1450 return -EINVAL;
1451 ctrl_fd = fd;
912541b0
KS
1452 continue;
1453 }
1454
c52cff07 1455 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
fcff1e72
TG
1456 if (netlink_fd >= 0)
1457 return -EINVAL;
1458 netlink_fd = fd;
912541b0
KS
1459 continue;
1460 }
1461
fcff1e72 1462 return -EINVAL;
912541b0
KS
1463 }
1464
c4b69e99
YW
1465 *ret_ctrl = ctrl_fd;
1466 *ret_netlink = netlink_fd;
912541b0 1467
912541b0 1468 return 0;
7459bcdc
KS
1469}
1470
e6f86cac 1471/*
3f85ef0f 1472 * read the kernel command line, in case we need to get into debug mode
64a3494c 1473 * udev.log_level=<level> syslog priority
1d84ad94
LP
1474 * udev.children_max=<number of workers> events are fully serialized if set to 1
1475 * udev.exec_delay=<number of seconds> delay execution of every executed program
1476 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
95ac5230 1477 * udev.blockdev_read_only<=bool> mark all block devices read-only when they appear
e6f86cac 1478 */
96287a49 1479static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
95ac5230 1480 int r;
e6f86cac 1481
614a823c 1482 assert(key);
e6f86cac 1483
64a3494c
FB
1484 if (proc_cmdline_key_streq(key, "udev.log_level") ||
1485 proc_cmdline_key_streq(key, "udev.log_priority")) { /* kept for backward compatibility */
1d84ad94
LP
1486
1487 if (proc_cmdline_value_missing(key, value))
1488 return 0;
1489
46f0fbd8 1490 r = log_level_from_string(value);
92e72467
ZJS
1491 if (r >= 0)
1492 log_set_max_level(r);
1d84ad94
LP
1493
1494 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1495
1496 if (proc_cmdline_value_missing(key, value))
1497 return 0;
1498
9d9264ba 1499 r = parse_sec(value, &arg_event_timeout_usec);
1d84ad94
LP
1500
1501 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1502
1503 if (proc_cmdline_value_missing(key, value))
1504 return 0;
1505
020328e1 1506 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1507
1508 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1509
1510 if (proc_cmdline_value_missing(key, value))
1511 return 0;
1512
6b92f429 1513 r = parse_sec(value, &arg_exec_delay_usec);
1d84ad94 1514
e2099267 1515 } else if (proc_cmdline_key_streq(key, "udev.timeout_signal")) {
95ac5230 1516
e2099267
MS
1517 if (proc_cmdline_value_missing(key, value))
1518 return 0;
1519
1520 r = signal_from_string(value);
1521 if (r > 0)
1522 arg_timeout_signal = r;
95ac5230
LP
1523
1524 } else if (proc_cmdline_key_streq(key, "udev.blockdev_read_only")) {
1525
1526 if (!value)
1527 arg_blockdev_read_only = true;
1528 else {
1529 r = parse_boolean(value);
1530 if (r < 0)
1531 log_warning_errno(r, "Failed to parse udev.blockdev-read-only argument, ignoring: %s", value);
1532 else
1533 arg_blockdev_read_only = r;
1534 }
1535
1536 if (arg_blockdev_read_only)
1537 log_notice("All physical block devices will be marked read-only.");
1538
1539 return 0;
1540
1541 } else {
1542 if (startswith(key, "udev."))
1543 log_warning("Unknown udev kernel command line option \"%s\", ignoring.", key);
1544
1545 return 0;
1546 }
614a823c 1547
92e72467
ZJS
1548 if (r < 0)
1549 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1550
614a823c 1551 return 0;
e6f86cac
KS
1552}
1553
37ec0fdd
LP
1554static int help(void) {
1555 _cleanup_free_ char *link = NULL;
1556 int r;
1557
1558 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1559 if (r < 0)
1560 return log_oom();
1561
ed216e1f 1562 printf("%s [OPTIONS...]\n\n"
d1109e12 1563 "Rule-based manager for device events and files.\n\n"
5ac0162c 1564 " -h --help Print this message\n"
2d19c17e
MF
1565 " -V --version Print version of the program\n"
1566 " -d --daemon Detach and run in the background\n"
1567 " -D --debug Enable debug output\n"
1568 " -c --children-max=INT Set maximum number of workers\n"
1569 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1570 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1571 " -N --resolve-names=early|late|never\n"
5ac0162c 1572 " When to resolve users and groups\n"
bc556335
DDM
1573 "\nSee the %s for details.\n",
1574 program_invocation_short_name,
1575 link);
37ec0fdd
LP
1576
1577 return 0;
ed216e1f
TG
1578}
1579
bba7a484 1580static int parse_argv(int argc, char *argv[]) {
e2099267
MS
1581 enum {
1582 ARG_TIMEOUT_SIGNAL,
1583 };
1584
912541b0 1585 static const struct option options[] = {
e2099267
MS
1586 { "daemon", no_argument, NULL, 'd' },
1587 { "debug", no_argument, NULL, 'D' },
1588 { "children-max", required_argument, NULL, 'c' },
1589 { "exec-delay", required_argument, NULL, 'e' },
1590 { "event-timeout", required_argument, NULL, 't' },
1591 { "resolve-names", required_argument, NULL, 'N' },
1592 { "help", no_argument, NULL, 'h' },
1593 { "version", no_argument, NULL, 'V' },
1594 { "timeout-signal", required_argument, NULL, ARG_TIMEOUT_SIGNAL },
912541b0
KS
1595 {}
1596 };
689a97f5 1597
044497e2 1598 int c, r;
689a97f5 1599
bba7a484
TG
1600 assert(argc >= 0);
1601 assert(argv);
912541b0 1602
e14b6f21 1603 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
bba7a484 1604 switch (c) {
912541b0 1605
912541b0 1606 case 'd':
bba7a484 1607 arg_daemonize = true;
912541b0
KS
1608 break;
1609 case 'c':
020328e1 1610 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8 1611 if (r < 0)
389f9bf2 1612 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
912541b0
KS
1613 break;
1614 case 'e':
6b92f429 1615 r = parse_sec(optarg, &arg_exec_delay_usec);
6f5cf8a8 1616 if (r < 0)
6b92f429 1617 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
912541b0 1618 break;
e2099267
MS
1619 case ARG_TIMEOUT_SIGNAL:
1620 r = signal_from_string(optarg);
1621 if (r <= 0)
1622 log_warning_errno(r, "Failed to parse --timeout-signal= value '%s', ignoring: %m", optarg);
1623 else
1624 arg_timeout_signal = r;
1625
1626 break;
9719859c 1627 case 't':
9d9264ba 1628 r = parse_sec(optarg, &arg_event_timeout_usec);
f1e8664e 1629 if (r < 0)
9d9264ba 1630 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
9719859c 1631 break;
912541b0 1632 case 'D':
bba7a484 1633 arg_debug = true;
912541b0 1634 break;
c4d44cba
YW
1635 case 'N': {
1636 ResolveNameTiming t;
1637
1638 t = resolve_name_timing_from_string(optarg);
1639 if (t < 0)
1640 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1641 else
1642 arg_resolve_name_timing = t;
912541b0 1643 break;
c4d44cba 1644 }
912541b0 1645 case 'h':
37ec0fdd 1646 return help();
912541b0 1647 case 'V':
681bd2c5 1648 printf("%s\n", GIT_VERSION);
bba7a484
TG
1649 return 0;
1650 case '?':
1651 return -EINVAL;
912541b0 1652 default:
bba7a484
TG
1653 assert_not_reached("Unhandled option");
1654
912541b0
KS
1655 }
1656 }
1657
bba7a484
TG
1658 return 1;
1659}
1660
b7f74dd4 1661static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1662 _cleanup_(manager_freep) Manager *manager = NULL;
b5af8c8c 1663 int r;
c0c6806b
TG
1664
1665 assert(ret);
1666
6f19b42f 1667 manager = new(Manager, 1);
c0c6806b
TG
1668 if (!manager)
1669 return log_oom();
1670
6f19b42f
YW
1671 *manager = (Manager) {
1672 .fd_inotify = -1,
1673 .worker_watch = { -1, -1 },
1674 .cgroup = cgroup,
1675 };
e237d8cb 1676
100bc5bf
YW
1677 r = udev_ctrl_new_from_fd(&manager->ctrl, fd_ctrl);
1678 if (r < 0)
1679 return log_error_errno(r, "Failed to initialize udev control socket: %m");
e237d8cb 1680
b5af8c8c
ZJS
1681 r = udev_ctrl_enable_receiving(manager->ctrl);
1682 if (r < 0)
1683 return log_error_errno(r, "Failed to bind udev control socket: %m");
c4b69e99 1684
7f2e3a14
YW
1685 r = device_monitor_new_full(&manager->monitor, MONITOR_GROUP_KERNEL, fd_uevent);
1686 if (r < 0)
1687 return log_error_errno(r, "Failed to initialize device monitor: %m");
e237d8cb 1688
1ffadeaa
FB
1689 /* Bump receiver buffer, but only if we are not called via socket activation, as in that
1690 * case systemd sets the receive buffer size for us, and the value in the .socket unit
1691 * should take full effect. */
e77f52e5
YW
1692 if (fd_uevent < 0) {
1693 r = sd_device_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
1694 if (r < 0)
1695 log_warning_errno(r, "Failed to set receive buffer size for device monitor, ignoring: %m");
1696 }
c4b69e99 1697
b5af8c8c
ZJS
1698 r = device_monitor_enable_receiving(manager->monitor);
1699 if (r < 0)
1700 return log_error_errno(r, "Failed to bind netlink socket: %m");
1701
1a0bd015
YW
1702 manager->log_level = log_get_max_level();
1703
b5af8c8c
ZJS
1704 *ret = TAKE_PTR(manager);
1705
1706 return 0;
1707}
1708
1709static int main_loop(Manager *manager) {
d02c6f54 1710 int fd_worker, r;
b5af8c8c 1711
76e62a4d
YW
1712 manager->pid = getpid_cached();
1713
e237d8cb
TG
1714 /* unnamed socket from workers to the main daemon */
1715 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1716 if (r < 0)
044497e2 1717 return log_error_errno(errno, "Failed to create socketpair for communicating with workers: %m");
e237d8cb 1718
693d371d 1719 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1720
2ff48e98 1721 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1722 if (r < 0)
044497e2 1723 return log_error_errno(r, "Failed to enable SO_PASSCRED: %m");
e237d8cb 1724
b7759e04
YW
1725 r = udev_watch_init();
1726 if (r < 0)
1727 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1728 manager->fd_inotify = r;
e237d8cb 1729
2024ed61 1730 udev_watch_restore();
e237d8cb
TG
1731
1732 /* block and listen to all signals on signalfd */
72c0a2c2 1733 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1734
49f997f3
TG
1735 r = sd_event_default(&manager->event);
1736 if (r < 0)
044497e2 1737 return log_error_errno(r, "Failed to allocate event loop: %m");
49f997f3 1738
693d371d
TG
1739 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1740 if (r < 0)
044497e2 1741 return log_error_errno(r, "Failed to create SIGINT event source: %m");
693d371d
TG
1742
1743 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1744 if (r < 0)
044497e2 1745 return log_error_errno(r, "Failed to create SIGTERM event source: %m");
693d371d
TG
1746
1747 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1748 if (r < 0)
044497e2 1749 return log_error_errno(r, "Failed to create SIGHUP event source: %m");
693d371d
TG
1750
1751 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1752 if (r < 0)
044497e2 1753 return log_error_errno(r, "Failed to create SIGCHLD event source: %m");
693d371d
TG
1754
1755 r = sd_event_set_watchdog(manager->event, true);
1756 if (r < 0)
044497e2 1757 return log_error_errno(r, "Failed to create watchdog event source: %m");
693d371d 1758
d02c6f54
YW
1759 r = udev_ctrl_attach_event(manager->ctrl, manager->event);
1760 if (r < 0)
1761 return log_error_errno(r, "Failed to attach event to udev control: %m");
b5af8c8c 1762
d02c6f54 1763 r = udev_ctrl_start(manager->ctrl, on_ctrl_msg, manager);
693d371d 1764 if (r < 0)
d02c6f54 1765 return log_error_errno(r, "Failed to start device monitor: %m");
693d371d
TG
1766
1767 /* This needs to be after the inotify and uevent handling, to make sure
1768 * that the ping is send back after fully processing the pending uevents
1769 * (including the synthetic ones we may create due to inotify events).
1770 */
d02c6f54 1771 r = sd_event_source_set_priority(udev_ctrl_get_event_source(manager->ctrl), SD_EVENT_PRIORITY_IDLE);
693d371d 1772 if (r < 0)
044497e2 1773 return log_error_errno(r, "Failed to set IDLE event priority for udev control event source: %m");
693d371d
TG
1774
1775 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1776 if (r < 0)
044497e2 1777 return log_error_errno(r, "Failed to create inotify event source: %m");
693d371d 1778
f00d2b6d
YW
1779 r = sd_device_monitor_attach_event(manager->monitor, manager->event);
1780 if (r < 0)
1781 return log_error_errno(r, "Failed to attach event to device monitor: %m");
1782
1783 r = sd_device_monitor_start(manager->monitor, on_uevent, manager);
693d371d 1784 if (r < 0)
f00d2b6d
YW
1785 return log_error_errno(r, "Failed to start device monitor: %m");
1786
1787 (void) sd_event_source_set_description(sd_device_monitor_get_event_source(manager->monitor), "device-monitor");
693d371d
TG
1788
1789 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1790 if (r < 0)
044497e2 1791 return log_error_errno(r, "Failed to create worker event source: %m");
693d371d
TG
1792
1793 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1794 if (r < 0)
044497e2 1795 return log_error_errno(r, "Failed to create post event source: %m");
e237d8cb 1796
b5af8c8c 1797 udev_builtin_init();
077fc5e2 1798
c238a1f5 1799 r = udev_rules_load(&manager->rules, arg_resolve_name_timing);
b5af8c8c
ZJS
1800 if (!manager->rules)
1801 return log_error_errno(r, "Failed to read udev rules: %m");
077fc5e2
DH
1802
1803 r = udev_rules_apply_static_dev_perms(manager->rules);
1804 if (r < 0)
044497e2 1805 log_error_errno(r, "Failed to apply permissions on static device nodes: %m");
077fc5e2 1806
1ef72b55
MS
1807 (void) sd_notifyf(false,
1808 "READY=1\n"
1809 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1810
1811 r = sd_event_loop(manager->event);
44dcf454 1812 if (r < 0)
044497e2 1813 log_error_errno(r, "Event loop failed: %m");
077fc5e2 1814
077fc5e2
DH
1815 sd_notify(false,
1816 "STOPPING=1\n"
1817 "STATUS=Shutting down...");
077fc5e2
DH
1818 return r;
1819}
1820
63e2d171 1821int run_udevd(int argc, char *argv[]) {
c26d1879 1822 _cleanup_free_ char *cgroup = NULL;
b5af8c8c 1823 _cleanup_(manager_freep) Manager *manager = NULL;
efa1606e 1824 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1825 int r;
bba7a484 1826
bba7a484 1827 log_set_target(LOG_TARGET_AUTO);
6b413782 1828 log_open();
e2099267 1829 udev_parse_config_full(&arg_children_max, &arg_exec_delay_usec, &arg_event_timeout_usec, &arg_resolve_name_timing, &arg_timeout_signal);
bba7a484 1830 log_parse_environment();
6b413782 1831 log_open(); /* Done again to update after reading configuration. */
bba7a484 1832
bba7a484
TG
1833 r = parse_argv(argc, argv);
1834 if (r <= 0)
0c5a109a 1835 return r;
bba7a484 1836
1d84ad94 1837 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c 1838 if (r < 0)
044497e2 1839 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
912541b0 1840
78d3e041
KS
1841 if (arg_debug) {
1842 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1843 log_set_max_level(LOG_DEBUG);
78d3e041 1844 }
bba7a484 1845
fba868fa
LP
1846 r = must_be_root();
1847 if (r < 0)
0c5a109a 1848 return r;
912541b0 1849
712cebf1 1850 if (arg_children_max == 0) {
fe56acd8 1851 unsigned long cpu_limit, mem_limit, cpu_count = 1;
d457ff83 1852
fe56acd8
LP
1853 r = cpus_in_affinity_mask();
1854 if (r < 0)
1855 log_warning_errno(r, "Failed to determine number of local CPUs, ignoring: %m");
1856 else
1857 cpu_count = r;
88bd5a32
FB
1858
1859 cpu_limit = cpu_count * 2 + 16;
1860 mem_limit = MAX(physical_memory() / (128UL*1024*1024), 10U);
912541b0 1861
88bd5a32
FB
1862 arg_children_max = MIN(cpu_limit, mem_limit);
1863 arg_children_max = MIN(WORKER_NUM_MAX, arg_children_max);
e438c57a 1864
044497e2 1865 log_debug("Set children_max to %u", arg_children_max);
d457ff83 1866 }
912541b0 1867
712cebf1 1868 /* set umask before creating any file/directory */
712cebf1 1869 umask(022);
912541b0 1870
c3dacc8b 1871 r = mac_selinux_init();
0c5a109a 1872 if (r < 0)
a9ba0e32 1873 return r;
912541b0 1874
dae8b82e 1875 r = mkdir_errno_wrapper("/run/udev", 0755);
0c5a109a
ZJS
1876 if (r < 0 && r != -EEXIST)
1877 return log_error_errno(r, "Failed to create /run/udev: %m");
712cebf1 1878
31cbd202
YW
1879 if (getppid() == 1 && sd_booted() > 0) {
1880 /* Get our own cgroup, we regularly kill everything udev has left behind.
1881 * We only do this on systemd systems, and only if we are directly spawned
1882 * by PID1. Otherwise we are not guaranteed to have a dedicated cgroup. */
c26d1879 1883 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1884 if (r < 0) {
a2d61f07 1885 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
044497e2 1886 log_debug_errno(r, "Dedicated cgroup not found: %m");
11b9fb15 1887 else
044497e2 1888 log_warning_errno(r, "Failed to get cgroup: %m");
11b9fb15 1889 }
c26d1879
TG
1890 }
1891
b7f74dd4 1892 r = listen_fds(&fd_ctrl, &fd_uevent);
0c5a109a
ZJS
1893 if (r < 0)
1894 return log_error_errno(r, "Failed to listen on fds: %m");
b7f74dd4 1895
b5af8c8c
ZJS
1896 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1897 if (r < 0)
1898 return log_error_errno(r, "Failed to create manager: %m");
1899
bba7a484 1900 if (arg_daemonize) {
912541b0 1901 pid_t pid;
912541b0 1902
b5af8c8c 1903 log_info("Starting version " GIT_VERSION);
3cbb2057 1904
40e749b5 1905 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1906 if (log_get_max_level() < LOG_DEBUG) {
1907 r = make_null_stdio();
1908 if (r < 0)
1909 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1910 }
1911
912541b0 1912 pid = fork();
0c5a109a
ZJS
1913 if (pid < 0)
1914 return log_error_errno(errno, "Failed to fork daemon: %m");
1915 if (pid > 0)
1916 /* parent */
1917 return 0;
912541b0 1918
0c5a109a 1919 /* child */
ece0fe12 1920 (void) setsid();
7500cd5e 1921 }
912541b0 1922
76e62a4d 1923 return main_loop(manager);
7fafc032 1924}