]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev-rules: replace udev_device by sd_device in udev_rules_apply_to_event()
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
07a26e42 55#include "udev-builtin.h"
7d68eb1b 56#include "udev-ctrl.h"
618234a5 57#include "udev-util.h"
70068602 58#include "udev-watch.h"
618234a5 59#include "udev.h"
ee104e11 60#include "user-util.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
64static int arg_resolve_names = 1;
020328e1 65static unsigned arg_children_max;
bba7a484
TG
66static int arg_exec_delay;
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
693d371d 71 sd_event *event;
c0c6806b 72 Hashmap *workers;
40a57716 73 LIST_HEAD(struct event, events);
c26d1879 74 const char *cgroup;
cb49a4f2 75 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 76
ecb17862 77 struct udev_rules *rules;
9b5150b6 78 Hashmap *properties;
c0c6806b
TG
79
80 struct udev_monitor *monitor;
81 struct udev_ctrl *ctrl;
82 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 83 int fd_inotify;
e237d8cb
TG
84 int worker_watch[2];
85
693d371d
TG
86 sd_event_source *ctrl_event;
87 sd_event_source *uevent_event;
88 sd_event_source *inotify_event;
eca195ec 89 sd_event_source *kill_workers_event;
693d371d 90
7c4c7e89
TG
91 usec_t last_usec;
92
c0c6806b 93 bool stop_exec_queue:1;
c0c6806b
TG
94 bool exit:1;
95} Manager;
1e03b754 96
1e03b754 97enum event_state {
912541b0
KS
98 EVENT_UNDEF,
99 EVENT_QUEUED,
100 EVENT_RUNNING,
1e03b754
KS
101};
102
103struct event {
40a57716 104 LIST_FIELDS(struct event, event);
cb49a4f2 105 Manager *manager;
912541b0 106 struct udev_device *dev;
6969c349 107 struct udev_device *dev_kernel;
c6aa11f2 108 struct worker *worker;
912541b0 109 enum event_state state;
912541b0
KS
110 unsigned long long int delaying_seqnum;
111 unsigned long long int seqnum;
112 const char *devpath;
113 size_t devpath_len;
114 const char *devpath_old;
115 dev_t devnum;
912541b0 116 int ifindex;
ea6039a3 117 bool is_block;
693d371d
TG
118 sd_event_source *timeout_warning;
119 sd_event_source *timeout;
1e03b754
KS
120};
121
ecb17862 122static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 123
1e03b754 124enum worker_state {
912541b0
KS
125 WORKER_UNDEF,
126 WORKER_RUNNING,
127 WORKER_IDLE,
128 WORKER_KILLED,
1e03b754
KS
129};
130
131struct worker {
c0c6806b 132 Manager *manager;
912541b0
KS
133 pid_t pid;
134 struct udev_monitor *monitor;
135 enum worker_state state;
136 struct event *event;
1e03b754
KS
137};
138
139/* passed from worker to main process */
140struct worker_message {
1e03b754
KS
141};
142
c6aa11f2 143static void event_free(struct event *event) {
cb49a4f2
TG
144 int r;
145
c6aa11f2
TG
146 if (!event)
147 return;
40a57716 148 assert(event->manager);
c6aa11f2 149
40a57716 150 LIST_REMOVE(event, event->manager->events, event);
912541b0 151 udev_device_unref(event->dev);
6969c349 152 udev_device_unref(event->dev_kernel);
c6aa11f2 153
693d371d
TG
154 sd_event_source_unref(event->timeout_warning);
155 sd_event_source_unref(event->timeout);
156
c6aa11f2
TG
157 if (event->worker)
158 event->worker->event = NULL;
159
40a57716 160 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 161 /* only clean up the queue from the process that created it */
df0ff127 162 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
163 r = unlink("/run/udev/queue");
164 if (r < 0)
165 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
166 }
167 }
168
912541b0 169 free(event);
aa8734ff 170}
7a770250 171
c6aa11f2
TG
172static void worker_free(struct worker *worker) {
173 if (!worker)
174 return;
bc113de9 175
c0c6806b
TG
176 assert(worker->manager);
177
4a0b58c4 178 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 179 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
180 event_free(worker->event);
181
c6aa11f2 182 free(worker);
ff2c503d
KS
183}
184
c0c6806b 185static void manager_workers_free(Manager *manager) {
a505965d
TG
186 struct worker *worker;
187 Iterator i;
ff2c503d 188
c0c6806b
TG
189 assert(manager);
190
191 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 192 worker_free(worker);
a505965d 193
c0c6806b 194 manager->workers = hashmap_free(manager->workers);
fc465079
KS
195}
196
c0c6806b 197static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
198 _cleanup_free_ struct worker *worker = NULL;
199 int r;
3a19b32a
TG
200
201 assert(ret);
c0c6806b 202 assert(manager);
3a19b32a
TG
203 assert(worker_monitor);
204 assert(pid > 1);
205
206 worker = new0(struct worker, 1);
207 if (!worker)
208 return -ENOMEM;
209
c0c6806b 210 worker->manager = manager;
3a19b32a
TG
211 /* close monitor, but keep address around */
212 udev_monitor_disconnect(worker_monitor);
213 worker->monitor = udev_monitor_ref(worker_monitor);
214 worker->pid = pid;
a505965d 215
c0c6806b 216 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
217 if (r < 0)
218 return r;
219
4a0b58c4 220 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
221 if (r < 0)
222 return r;
223
ae2a15bc 224 *ret = TAKE_PTR(worker);
3a19b32a
TG
225
226 return 0;
227}
228
4fa4d885
TG
229static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
230 struct event *event = userdata;
231
232 assert(event);
233 assert(event->worker);
234
235 kill_and_sigcont(event->worker->pid, SIGKILL);
236 event->worker->state = WORKER_KILLED;
237
238 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
239
240 return 1;
241}
242
243static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
244 struct event *event = userdata;
245
246 assert(event);
247
248 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
249
250 return 1;
251}
252
39c19cf1 253static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
254 sd_event *e;
255 uint64_t usec;
693d371d 256
c6aa11f2 257 assert(worker);
693d371d 258 assert(worker->manager);
c6aa11f2
TG
259 assert(event);
260 assert(!event->worker);
261 assert(!worker->event);
262
39c19cf1 263 worker->state = WORKER_RUNNING;
39c19cf1
TG
264 worker->event = event;
265 event->state = EVENT_RUNNING;
c6aa11f2 266 event->worker = worker;
693d371d
TG
267
268 e = worker->manager->event;
269
3285baa8 270 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 271
3285baa8 272 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
273 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
274
3285baa8 275 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 276 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
277}
278
e237d8cb
TG
279static void manager_free(Manager *manager) {
280 if (!manager)
281 return;
282
2024ed61 283 udev_builtin_exit();
b2d21d93 284
693d371d
TG
285 sd_event_source_unref(manager->ctrl_event);
286 sd_event_source_unref(manager->uevent_event);
287 sd_event_source_unref(manager->inotify_event);
eca195ec 288 sd_event_source_unref(manager->kill_workers_event);
693d371d 289
693d371d 290 sd_event_unref(manager->event);
e237d8cb
TG
291 manager_workers_free(manager);
292 event_queue_cleanup(manager, EVENT_UNDEF);
293
294 udev_monitor_unref(manager->monitor);
295 udev_ctrl_unref(manager->ctrl);
296 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
297
9b5150b6 298 hashmap_free_free_free(manager->properties);
e237d8cb 299 udev_rules_unref(manager->rules);
e237d8cb 300
e237d8cb
TG
301 safe_close(manager->fd_inotify);
302 safe_close_pair(manager->worker_watch);
303
304 free(manager);
305}
306
307DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
308
9a73bd7c
TG
309static int worker_send_message(int fd) {
310 struct worker_message message = {};
311
312 return loop_write(fd, &message, sizeof(message), false);
313}
314
fee854ee
RK
315static bool shall_lock_device(struct udev_device *dev) {
316 const char *sysname;
317
318 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
319 return false;
320
321 sysname = udev_device_get_sysname(dev);
322 return !startswith(sysname, "dm-") &&
323 !startswith(sysname, "md") &&
324 !startswith(sysname, "drbd");
325}
326
c0c6806b 327static void worker_spawn(Manager *manager, struct event *event) {
8e766630 328 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 329 pid_t pid;
b6aab8ef 330 int r = 0;
912541b0
KS
331
332 /* listen for new events */
2024ed61 333 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
334 if (worker_monitor == NULL)
335 return;
336 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 337 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
338 r = udev_monitor_enable_receiving(worker_monitor);
339 if (r < 0)
340 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 341
912541b0
KS
342 pid = fork();
343 switch (pid) {
344 case 0: {
345 struct udev_device *dev = NULL;
4afd3348 346 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 347 int fd_monitor;
e237d8cb 348 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
349 struct epoll_event ep_signal = { .events = EPOLLIN };
350 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 351 sigset_t mask;
912541b0 352
43095991 353 /* take initial device from queue */
1cc6c93a 354 dev = TAKE_PTR(event->dev);
912541b0 355
39fd2ca1
TG
356 unsetenv("NOTIFY_SOCKET");
357
c0c6806b 358 manager_workers_free(manager);
ecb17862 359 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 360
e237d8cb 361 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 364 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 365
693d371d
TG
366 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
367 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
368 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
eca195ec 369 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
693d371d
TG
370
371 manager->event = sd_event_unref(manager->event);
372
912541b0
KS
373 sigfillset(&mask);
374 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
375 if (fd_signal < 0) {
6af5e6a4 376 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
377 goto out;
378 }
2dd9f98d
TG
379 ep_signal.data.fd = fd_signal;
380
381 fd_monitor = udev_monitor_get_fd(worker_monitor);
382 ep_monitor.data.fd = fd_monitor;
912541b0
KS
383
384 fd_ep = epoll_create1(EPOLL_CLOEXEC);
385 if (fd_ep < 0) {
6af5e6a4 386 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
387 goto out;
388 }
389
912541b0
KS
390 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
391 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 392 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
393 goto out;
394 }
395
045e00cf
ZJS
396 /* Request TERM signal if parent exits.
397 Ignore error, not much we can do in that case. */
398 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 399
045e00cf 400 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
401 r = set_oom_score_adjust(0);
402 if (r < 0)
403 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 404
912541b0 405 for (;;) {
c1118ceb 406 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 407 int fd_lock = -1;
912541b0 408
3b64e4d4
TG
409 assert(dev);
410
9f6445e3 411 log_debug("seq %llu running", udev_device_get_seqnum(dev));
e0bb2ff9 412 udev_event = udev_event_new(dev, arg_exec_delay, rtnl);
0f86dc90 413 if (!udev_event) {
6af5e6a4 414 r = -ENOMEM;
912541b0
KS
415 goto out;
416 }
417
3ebdb81e 418 /*
2e5b17d0 419 * Take a shared lock on the device node; this establishes
3ebdb81e 420 * a concept of device "ownership" to serialize device
2e5b17d0 421 * access. External processes holding an exclusive lock will
3ebdb81e 422 * cause udev to skip the event handling; in the case udev
2e5b17d0 423 * acquired the lock, the external process can block until
3ebdb81e
KS
424 * udev has finished its event handling.
425 */
2e5b17d0 426 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 427 shall_lock_device(dev)) {
3ebdb81e
KS
428 struct udev_device *d = dev;
429
430 if (streq_ptr("partition", udev_device_get_devtype(d)))
431 d = udev_device_get_parent(d);
432
433 if (d) {
434 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
435 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 436 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 437 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
438 goto skip;
439 }
440 }
441 }
442
912541b0 443 /* apply rules, create node, symlinks */
adeba500
KS
444 udev_event_execute_rules(udev_event,
445 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 446 manager->properties,
8314de1d 447 manager->rules);
adeba500
KS
448
449 udev_event_execute_run(udev_event,
8314de1d 450 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 451
e0bb2ff9 452 if (!rtnl)
523c620b 453 /* in case rtnl was initialized */
1c4baffc 454 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 455
912541b0 456 /* apply/restore inotify watch */
bf9bead1 457 if (udev_event->inotify_watch) {
70068602 458 udev_watch_begin(dev->device);
912541b0
KS
459 udev_device_update_db(dev);
460 }
461
3d06f418 462 safe_close(fd_lock);
3ebdb81e 463
912541b0
KS
464 /* send processed event back to libudev listeners */
465 udev_monitor_send_device(worker_monitor, NULL, dev);
466
3ebdb81e 467skip:
4914cb2d 468 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 469
912541b0 470 /* send udevd the result of the event execution */
e237d8cb 471 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 472 if (r < 0)
9a73bd7c 473 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 474 udev_device_get_seqnum(dev));
912541b0
KS
475
476 udev_device_unref(dev);
477 dev = NULL;
478
912541b0
KS
479 /* wait for more device messages from main udevd, or term signal */
480 while (dev == NULL) {
481 struct epoll_event ev[4];
482 int fdcount;
483 int i;
484
8fef0ff2 485 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
486 if (fdcount < 0) {
487 if (errno == EINTR)
488 continue;
6af5e6a4 489 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
490 goto out;
491 }
492
493 for (i = 0; i < fdcount; i++) {
494 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
495 dev = udev_monitor_receive_device(worker_monitor);
496 break;
497 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
498 struct signalfd_siginfo fdsi;
499 ssize_t size;
500
501 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
502 if (size != sizeof(struct signalfd_siginfo))
503 continue;
504 switch (fdsi.ssi_signo) {
505 case SIGTERM:
506 goto out;
507 }
508 }
509 }
510 }
511 }
82063a88 512out:
912541b0 513 udev_device_unref(dev);
e237d8cb 514 manager_free(manager);
baa30fbc 515 log_close();
8b46c3fc 516 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
517 }
518 case -1:
912541b0 519 event->state = EVENT_QUEUED;
56f64d95 520 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
521 break;
522 default:
e03c7cc2
TG
523 {
524 struct worker *worker;
525
c0c6806b 526 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 527 if (r < 0)
e03c7cc2 528 return;
e03c7cc2 529
39c19cf1
TG
530 worker_attach_event(worker, event);
531
1fa2f38f 532 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
533 break;
534 }
e03c7cc2 535 }
7fafc032
KS
536}
537
c0c6806b 538static void event_run(Manager *manager, struct event *event) {
a505965d
TG
539 struct worker *worker;
540 Iterator i;
912541b0 541
c0c6806b
TG
542 assert(manager);
543 assert(event);
544
545 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
546 ssize_t count;
547
548 if (worker->state != WORKER_IDLE)
549 continue;
550
c0c6806b 551 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 552 if (count < 0) {
1fa2f38f
ZJS
553 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
554 worker->pid, count);
cb542e84 555 (void) kill(worker->pid, SIGKILL);
912541b0
KS
556 worker->state = WORKER_KILLED;
557 continue;
558 }
39c19cf1 559 worker_attach_event(worker, event);
912541b0
KS
560 return;
561 }
562
c0c6806b 563 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 564 if (arg_children_max > 1)
c0c6806b 565 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
566 return;
567 }
568
569 /* start new worker and pass initial device */
c0c6806b 570 worker_spawn(manager, event);
1e03b754
KS
571}
572
ecb17862 573static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 574 struct event *event;
cb49a4f2 575 int r;
912541b0 576
ecb17862
TG
577 assert(manager);
578 assert(dev);
579
040e6896
TG
580 /* only one process can add events to the queue */
581 if (manager->pid == 0)
df0ff127 582 manager->pid = getpid_cached();
040e6896 583
df0ff127 584 assert(manager->pid == getpid_cached());
cb49a4f2 585
955d98c9 586 event = new0(struct event, 1);
cb49a4f2
TG
587 if (!event)
588 return -ENOMEM;
912541b0 589
cb49a4f2 590 event->manager = manager;
912541b0 591 event->dev = dev;
6969c349
TG
592 event->dev_kernel = udev_device_shallow_clone(dev);
593 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
594 event->seqnum = udev_device_get_seqnum(dev);
595 event->devpath = udev_device_get_devpath(dev);
596 event->devpath_len = strlen(event->devpath);
597 event->devpath_old = udev_device_get_devpath_old(dev);
598 event->devnum = udev_device_get_devnum(dev);
ea6039a3 599 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
600 event->ifindex = udev_device_get_ifindex(dev);
601
9f6445e3 602 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
603 udev_device_get_action(dev), udev_device_get_subsystem(dev));
604
605 event->state = EVENT_QUEUED;
cb49a4f2 606
40a57716 607 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
608 r = touch("/run/udev/queue");
609 if (r < 0)
610 log_warning_errno(r, "could not touch /run/udev/queue: %m");
611 }
612
40a57716 613 LIST_APPEND(event, manager->events, event);
cb49a4f2 614
912541b0 615 return 0;
fc465079
KS
616}
617
c0c6806b 618static void manager_kill_workers(Manager *manager) {
a505965d
TG
619 struct worker *worker;
620 Iterator i;
1e03b754 621
c0c6806b
TG
622 assert(manager);
623
624 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
625 if (worker->state == WORKER_KILLED)
626 continue;
1e03b754 627
912541b0 628 worker->state = WORKER_KILLED;
cb542e84 629 (void) kill(worker->pid, SIGTERM);
912541b0 630 }
1e03b754
KS
631}
632
e3196993 633/* lookup event for identical, parent, child device */
ecb17862 634static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 635 struct event *loop_event;
912541b0
KS
636 size_t common;
637
638 /* check if queue contains events we depend on */
40a57716 639 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 640 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
641 if (loop_event->seqnum < event->delaying_seqnum)
642 continue;
643
644 /* event we checked earlier still exists, no need to check again */
645 if (loop_event->seqnum == event->delaying_seqnum)
646 return true;
647
648 /* found ourself, no later event can block us */
649 if (loop_event->seqnum >= event->seqnum)
650 break;
651
652 /* check major/minor */
653 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
654 return true;
655
656 /* check network device ifindex */
657 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
658 return true;
659
660 /* check our old name */
090be865 661 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
662 event->delaying_seqnum = loop_event->seqnum;
663 return true;
664 }
665
666 /* compare devpath */
667 common = MIN(loop_event->devpath_len, event->devpath_len);
668
669 /* one devpath is contained in the other? */
670 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
671 continue;
672
673 /* identical device event found */
674 if (loop_event->devpath_len == event->devpath_len) {
675 /* devices names might have changed/swapped in the meantime */
676 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
677 continue;
678 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
679 continue;
680 event->delaying_seqnum = loop_event->seqnum;
681 return true;
682 }
683
684 /* parent device event found */
685 if (event->devpath[common] == '/') {
686 event->delaying_seqnum = loop_event->seqnum;
687 return true;
688 }
689
690 /* child device event found */
691 if (loop_event->devpath[common] == '/') {
692 event->delaying_seqnum = loop_event->seqnum;
693 return true;
694 }
695
696 /* no matching device */
697 continue;
698 }
699
700 return false;
7fafc032
KS
701}
702
693d371d
TG
703static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
704 Manager *manager = userdata;
705
706 assert(manager);
707
708 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
709
710 sd_event_exit(manager->event, -ETIMEDOUT);
711
712 return 1;
713}
714
62d43dac 715static void manager_exit(Manager *manager) {
693d371d
TG
716 uint64_t usec;
717 int r;
62d43dac
TG
718
719 assert(manager);
720
721 manager->exit = true;
722
b79aacbf
TG
723 sd_notify(false,
724 "STOPPING=1\n"
725 "STATUS=Starting shutdown...");
726
62d43dac 727 /* close sources of new events and discard buffered events */
693d371d 728 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 729 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 730
693d371d 731 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 732 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 733
693d371d 734 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 735 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
736
737 /* discard queued events and kill workers */
738 event_queue_cleanup(manager, EVENT_QUEUED);
739 manager_kill_workers(manager);
693d371d 740
3285baa8 741 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 742
3285baa8 743 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
744 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
745 if (r < 0)
746 return;
62d43dac
TG
747}
748
749/* reload requested, HUP signal received, rules changed, builtin changed */
750static void manager_reload(Manager *manager) {
751
752 assert(manager);
753
b79aacbf
TG
754 sd_notify(false,
755 "RELOADING=1\n"
756 "STATUS=Flushing configuration...");
757
62d43dac
TG
758 manager_kill_workers(manager);
759 manager->rules = udev_rules_unref(manager->rules);
2024ed61 760 udev_builtin_exit();
b79aacbf 761
1ef72b55
MS
762 sd_notifyf(false,
763 "READY=1\n"
764 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
765}
766
eca195ec
YW
767static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
768 Manager *manager = userdata;
769
770 assert(manager);
771
772 log_debug("Cleanup idle workers");
773 manager_kill_workers(manager);
774
775 return 1;
776}
777
778static int manager_enable_kill_workers_event(Manager *manager) {
779 int enabled, r;
780
781 assert(manager);
782
783 if (!manager->kill_workers_event)
784 goto create_new;
785
786 r = sd_event_source_get_enabled(manager->kill_workers_event, &enabled);
787 if (r < 0) {
788 log_debug_errno(r, "Failed to query whether event source for killing idle workers is enabled or not, trying to create new event source: %m");
789 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
790 goto create_new;
791 }
792
793 if (enabled == SD_EVENT_ONESHOT)
794 return 0;
795
796 r = sd_event_source_set_time(manager->kill_workers_event, now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC);
797 if (r < 0) {
798 log_debug_errno(r, "Failed to set time to event source for killing idle workers, trying to create new event source: %m");
799 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
800 goto create_new;
801 }
802
803 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_ONESHOT);
804 if (r < 0) {
805 log_debug_errno(r, "Failed to enable event source for killing idle workers, trying to create new event source: %m");
806 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
807 goto create_new;
808 }
809
810 return 0;
811
812create_new:
813 r = sd_event_add_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
814 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC, on_kill_workers_event, manager);
815 if (r < 0)
816 return log_warning_errno(r, "Failed to create timer event for killing idle workers: %m");
817
818 return 0;
819}
820
821static int manager_disable_kill_workers_event(Manager *manager) {
822 int r;
823
824 if (!manager->kill_workers_event)
825 return 0;
826
827 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
828 if (r < 0)
829 return log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
830
831 return 0;
832}
833
c0c6806b 834static void event_queue_start(Manager *manager) {
40a57716 835 struct event *event;
693d371d 836 usec_t usec;
8ab44e3f 837
c0c6806b
TG
838 assert(manager);
839
40a57716 840 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
841 manager->exit || manager->stop_exec_queue)
842 return;
843
3285baa8 844 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
845 /* check for changed config, every 3 seconds at most */
846 if (manager->last_usec == 0 ||
847 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
848 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 849 udev_builtin_validate())
38a03f06 850 manager_reload(manager);
693d371d 851
38a03f06 852 manager->last_usec = usec;
7c4c7e89
TG
853 }
854
eca195ec
YW
855 (void) manager_disable_kill_workers_event(manager);
856
2024ed61 857 udev_builtin_init();
7c4c7e89
TG
858
859 if (!manager->rules) {
2024ed61 860 manager->rules = udev_rules_new(arg_resolve_names);
7c4c7e89
TG
861 if (!manager->rules)
862 return;
863 }
864
40a57716 865 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
866 if (event->state != EVENT_QUEUED)
867 continue;
0bc74ea7 868
912541b0 869 /* do not start event if parent or child event is still running */
ecb17862 870 if (is_devpath_busy(manager, event))
912541b0 871 continue;
fc465079 872
c0c6806b 873 event_run(manager, event);
912541b0 874 }
1e03b754
KS
875}
876
ecb17862 877static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 878 struct event *event, *tmp;
ff2c503d 879
40a57716 880 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
881 if (match_type != EVENT_UNDEF && match_type != event->state)
882 continue;
ff2c503d 883
c6aa11f2 884 event_free(event);
912541b0 885 }
ff2c503d
KS
886}
887
e82e8fa5 888static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
889 Manager *manager = userdata;
890
891 assert(manager);
892
912541b0
KS
893 for (;;) {
894 struct worker_message msg;
979558f3
TG
895 struct iovec iovec = {
896 .iov_base = &msg,
897 .iov_len = sizeof(msg),
898 };
899 union {
900 struct cmsghdr cmsghdr;
901 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
902 } control = {};
903 struct msghdr msghdr = {
904 .msg_iov = &iovec,
905 .msg_iovlen = 1,
906 .msg_control = &control,
907 .msg_controllen = sizeof(control),
908 };
909 struct cmsghdr *cmsg;
912541b0 910 ssize_t size;
979558f3 911 struct ucred *ucred = NULL;
a505965d 912 struct worker *worker;
912541b0 913
e82e8fa5 914 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 915 if (size < 0) {
738a7907
TG
916 if (errno == EINTR)
917 continue;
918 else if (errno == EAGAIN)
919 /* nothing more to read */
920 break;
979558f3 921
e82e8fa5 922 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
923 } else if (size != sizeof(struct worker_message)) {
924 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 925 continue;
979558f3
TG
926 }
927
2a1288ff 928 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
929 if (cmsg->cmsg_level == SOL_SOCKET &&
930 cmsg->cmsg_type == SCM_CREDENTIALS &&
931 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
932 ucred = (struct ucred*) CMSG_DATA(cmsg);
933 }
934
935 if (!ucred || ucred->pid <= 0) {
936 log_warning_errno(EIO, "ignoring worker message without valid PID");
937 continue;
938 }
912541b0
KS
939
940 /* lookup worker who sent the signal */
4a0b58c4 941 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
942 if (!worker) {
943 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
944 continue;
912541b0 945 }
c0bbfd72 946
a505965d
TG
947 if (worker->state != WORKER_KILLED)
948 worker->state = WORKER_IDLE;
949
950 /* worker returned */
951 event_free(worker->event);
912541b0 952 }
e82e8fa5 953
8302fe5a
TG
954 /* we have free workers, try to schedule events */
955 event_queue_start(manager);
956
e82e8fa5
TG
957 return 1;
958}
959
960static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 961 Manager *manager = userdata;
e82e8fa5
TG
962 struct udev_device *dev;
963 int r;
964
c0c6806b 965 assert(manager);
e82e8fa5 966
c0c6806b 967 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
968 if (dev) {
969 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 970 r = event_queue_insert(manager, dev);
e82e8fa5
TG
971 if (r < 0)
972 udev_device_unref(dev);
8302fe5a
TG
973 else
974 /* we have fresh events, try to schedule them */
975 event_queue_start(manager);
e82e8fa5
TG
976 }
977
978 return 1;
88f4b648
KS
979}
980
3b47c739 981/* receive the udevd message from userspace */
e82e8fa5 982static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 983 Manager *manager = userdata;
8e766630
LP
984 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
985 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 986 const char *str;
9b5150b6 987 int i, r;
912541b0 988
c0c6806b 989 assert(manager);
e4f66b77 990
c0c6806b 991 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 992 if (!ctrl_conn)
e82e8fa5 993 return 1;
912541b0
KS
994
995 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 996 if (!ctrl_msg)
e82e8fa5 997 return 1;
912541b0
KS
998
999 i = udev_ctrl_get_set_log_level(ctrl_msg);
1000 if (i >= 0) {
ed14edc0 1001 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 1002 log_set_max_level(i);
c0c6806b 1003 manager_kill_workers(manager);
912541b0
KS
1004 }
1005
1006 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 1007 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 1008 manager->stop_exec_queue = true;
912541b0
KS
1009 }
1010
1011 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 1012 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 1013 manager->stop_exec_queue = false;
8302fe5a 1014 event_queue_start(manager);
912541b0
KS
1015 }
1016
1017 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 1018 log_debug("udevd message (RELOAD) received");
62d43dac 1019 manager_reload(manager);
912541b0
KS
1020 }
1021
1022 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
1023 if (str) {
1024 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
1025 char *eq;
1026
1027 eq = strchr(str, '=');
1028 if (!eq) {
1029 log_error("Invalid key format '%s'", str);
1030 return 1;
1031 }
1032
1033 key = strndup(str, eq - str);
1034 if (!key) {
1035 log_oom();
1036 return 1;
1037 }
1038
1039 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1040
1041 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1042 if (r < 0) {
1043 log_oom();
1044 return 1;
912541b0 1045 }
9b5150b6
YW
1046
1047 eq++;
1048 if (!isempty(eq)) {
1049 log_debug("udevd message (ENV) received, unset '%s'", key);
1050
1051 r = hashmap_put(manager->properties, key, NULL);
1052 if (r < 0) {
1053 log_oom();
1054 return 1;
1055 }
1056 } else {
1057 val = strdup(eq);
1058 if (!val) {
1059 log_oom();
1060 return 1;
1061 }
1062
1063 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
1064
1065 r = hashmap_put(manager->properties, key, val);
1066 if (r < 0) {
1067 log_oom();
1068 return 1;
1069 }
1070 }
1071
1072 key = val = NULL;
c0c6806b 1073 manager_kill_workers(manager);
912541b0
KS
1074 }
1075
1076 i = udev_ctrl_get_set_children_max(ctrl_msg);
1077 if (i >= 0) {
9f6445e3 1078 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1079 arg_children_max = i;
1ef72b55
MS
1080
1081 (void) sd_notifyf(false,
1082 "READY=1\n"
1083 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1084 }
1085
cb49a4f2 1086 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1087 log_debug("udevd message (SYNC) received");
912541b0
KS
1088
1089 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1090 log_debug("udevd message (EXIT) received");
62d43dac 1091 manager_exit(manager);
c0c6806b
TG
1092 /* keep reference to block the client until we exit
1093 TODO: deal with several blocking exit requests */
1094 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1095 }
e4f66b77 1096
e82e8fa5 1097 return 1;
88f4b648 1098}
4a231017 1099
70068602
YW
1100static int synthesize_change(sd_device *dev) {
1101 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1102 char filename[PATH_MAX];
f3a740a5 1103 int r;
edd32000 1104
70068602
YW
1105 r = sd_device_get_subsystem(dev, &subsystem);
1106 if (r < 0)
1107 return r;
1108
1109 r = sd_device_get_sysname(dev, &sysname);
1110 if (r < 0)
1111 return r;
1112
1113 r = sd_device_get_devname(dev, &devname);
1114 if (r < 0)
1115 return r;
1116
1117 r = sd_device_get_syspath(dev, &syspath);
1118 if (r < 0)
1119 return r;
1120
1121 r = sd_device_get_devtype(dev, &devtype);
1122 if (r < 0)
1123 return r;
1124
1125 if (streq_ptr("block", subsystem) &&
1126 streq_ptr("disk", devtype) &&
1127 !startswith(sysname, "dm-")) {
1128 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1129 bool part_table_read = false, has_partitions = false;
1130 sd_device *d;
ede34445 1131 int fd;
f3a740a5 1132
ede34445 1133 /*
e9fc29f4
KS
1134 * Try to re-read the partition table. This only succeeds if
1135 * none of the devices is busy. The kernel returns 0 if no
1136 * partition table is found, and we will not get an event for
1137 * the disk.
ede34445 1138 */
70068602 1139 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1140 if (fd >= 0) {
02ba8fb3
KS
1141 r = flock(fd, LOCK_EX|LOCK_NB);
1142 if (r >= 0)
1143 r = ioctl(fd, BLKRRPART, 0);
1144
ede34445
KS
1145 close(fd);
1146 if (r >= 0)
e9fc29f4 1147 part_table_read = true;
ede34445
KS
1148 }
1149
e9fc29f4 1150 /* search for partitions */
70068602 1151 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1152 if (r < 0)
1153 return r;
1154
70068602 1155 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1156 if (r < 0)
1157 return r;
1158
70068602 1159 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1160 if (r < 0)
1161 return r;
e9fc29f4 1162
70068602
YW
1163 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1164 if (r < 0)
1165 return r;
e9fc29f4 1166
70068602
YW
1167 FOREACH_DEVICE(e, d) {
1168 const char *t;
e9fc29f4 1169
70068602
YW
1170 if (sd_device_get_devtype(d, &t) < 0 ||
1171 !streq("partition", t))
e9fc29f4
KS
1172 continue;
1173
1174 has_partitions = true;
1175 break;
1176 }
1177
1178 /*
1179 * We have partitions and re-read the table, the kernel already sent
1180 * out a "change" event for the disk, and "remove/add" for all
1181 * partitions.
1182 */
1183 if (part_table_read && has_partitions)
1184 return 0;
1185
1186 /*
1187 * We have partitions but re-reading the partition table did not
1188 * work, synthesize "change" for the disk and all partitions.
1189 */
70068602
YW
1190 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1191 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1192 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1193
70068602
YW
1194 FOREACH_DEVICE(e, d) {
1195 const char *t, *n, *s;
f3a740a5 1196
70068602
YW
1197 if (sd_device_get_devtype(d, &t) < 0 ||
1198 !streq("partition", t))
f3a740a5
KS
1199 continue;
1200
70068602
YW
1201 if (sd_device_get_devname(d, &n) < 0 ||
1202 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1203 continue;
1204
70068602
YW
1205 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1206 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1207 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1208 }
ede34445
KS
1209
1210 return 0;
f3a740a5
KS
1211 }
1212
70068602
YW
1213 log_debug("Device %s is closed, synthesising 'change'", devname);
1214 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1215 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1216
f3a740a5 1217 return 0;
edd32000
KS
1218}
1219
e82e8fa5 1220static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1221 Manager *manager = userdata;
0254e944 1222 union inotify_event_buffer buffer;
f7c1ad4f
LP
1223 struct inotify_event *e;
1224 ssize_t l;
912541b0 1225
c0c6806b 1226 assert(manager);
e82e8fa5 1227
eca195ec
YW
1228 (void) manager_disable_kill_workers_event(manager);
1229
e82e8fa5 1230 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1231 if (l < 0) {
3742095b 1232 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1233 return 1;
912541b0 1234
f7c1ad4f 1235 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1236 }
1237
f7c1ad4f 1238 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1239 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1240 const char *devnode;
1241
7fe3324c 1242 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1243 continue;
912541b0 1244
70068602 1245 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1246 continue;
912541b0 1247
7fe3324c 1248 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1249 if (e->mask & IN_CLOSE_WRITE)
edd32000 1250 synthesize_change(dev);
da143134 1251 else if (e->mask & IN_IGNORED)
2024ed61 1252 udev_watch_end(dev);
912541b0
KS
1253 }
1254
e82e8fa5 1255 return 1;
bd284db1
SJR
1256}
1257
0561329d 1258static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1259 Manager *manager = userdata;
1260
1261 assert(manager);
1262
62d43dac 1263 manager_exit(manager);
912541b0 1264
e82e8fa5
TG
1265 return 1;
1266}
912541b0 1267
0561329d 1268static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1269 Manager *manager = userdata;
1270
1271 assert(manager);
1272
62d43dac 1273 manager_reload(manager);
912541b0 1274
e82e8fa5
TG
1275 return 1;
1276}
912541b0 1277
e82e8fa5 1278static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1279 Manager *manager = userdata;
1280
1281 assert(manager);
1282
e82e8fa5
TG
1283 for (;;) {
1284 pid_t pid;
1285 int status;
1286 struct worker *worker;
d1317d02 1287
e82e8fa5
TG
1288 pid = waitpid(-1, &status, WNOHANG);
1289 if (pid <= 0)
f29328d6 1290 break;
e82e8fa5 1291
4a0b58c4 1292 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1293 if (!worker) {
1294 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1295 continue;
912541b0 1296 }
e82e8fa5
TG
1297
1298 if (WIFEXITED(status)) {
1299 if (WEXITSTATUS(status) == 0)
1300 log_debug("worker ["PID_FMT"] exited", pid);
1301 else
1302 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1303 } else if (WIFSIGNALED(status)) {
76341acc 1304 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1305 } else if (WIFSTOPPED(status)) {
1306 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1307 continue;
e82e8fa5
TG
1308 } else if (WIFCONTINUED(status)) {
1309 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1310 continue;
e82e8fa5
TG
1311 } else
1312 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1313
05e6d9c6
YW
1314 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1315 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1316 /* delete state from disk */
1317 udev_device_delete_db(worker->event->dev);
1318 udev_device_tag_index(worker->event->dev, NULL, false);
1319 /* forward kernel event without amending it */
1320 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1321 }
1322
1323 worker_free(worker);
912541b0 1324 }
e82e8fa5 1325
8302fe5a
TG
1326 /* we can start new workers, try to schedule events */
1327 event_queue_start(manager);
1328
eca195ec
YW
1329 /* Disable unnecessary cleanup event */
1330 if (hashmap_isempty(manager->workers) && manager->kill_workers_event)
1331 (void) sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
1332
e82e8fa5 1333 return 1;
f27125f9 1334}
1335
693d371d
TG
1336static int on_post(sd_event_source *s, void *userdata) {
1337 Manager *manager = userdata;
693d371d
TG
1338
1339 assert(manager);
1340
b6107f01
YW
1341 if (!LIST_IS_EMPTY(manager->events))
1342 return 1;
1343
1344 /* There are no pending events. Let's cleanup idle process. */
1345
1346 if (!hashmap_isempty(manager->workers)) {
1347 /* There are idle workers */
eca195ec 1348 (void) manager_enable_kill_workers_event(manager);
b6107f01 1349 return 1;
693d371d
TG
1350 }
1351
b6107f01
YW
1352 /* There are no idle workers. */
1353
1354 if (manager->exit)
1355 return sd_event_exit(manager->event, 0);
1356
1357 if (manager->cgroup)
1358 /* cleanup possible left-over processes in our cgroup */
1359 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1360
693d371d
TG
1361 return 1;
1362}
1363
fcff1e72
TG
1364static int listen_fds(int *rctrl, int *rnetlink) {
1365 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1366 int fd, n, r;
912541b0 1367
fcff1e72
TG
1368 assert(rctrl);
1369 assert(rnetlink);
1370
912541b0 1371 n = sd_listen_fds(true);
fcff1e72
TG
1372 if (n < 0)
1373 return n;
912541b0
KS
1374
1375 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1376 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1377 if (ctrl_fd >= 0)
1378 return -EINVAL;
1379 ctrl_fd = fd;
912541b0
KS
1380 continue;
1381 }
1382
1383 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1384 if (netlink_fd >= 0)
1385 return -EINVAL;
1386 netlink_fd = fd;
912541b0
KS
1387 continue;
1388 }
1389
fcff1e72 1390 return -EINVAL;
912541b0
KS
1391 }
1392
f59118ec 1393 if (ctrl_fd < 0) {
8e766630 1394 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1395
2024ed61 1396 ctrl = udev_ctrl_new();
f59118ec
TG
1397 if (!ctrl)
1398 return log_error_errno(EINVAL, "error initializing udev control socket");
1399
1400 r = udev_ctrl_enable_receiving(ctrl);
1401 if (r < 0)
1402 return log_error_errno(EINVAL, "error binding udev control socket");
1403
1404 fd = udev_ctrl_get_fd(ctrl);
1405 if (fd < 0)
1406 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1407
f59118ec
TG
1408 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1409 if (ctrl_fd < 0)
1410 return log_error_errno(errno, "could not dup ctrl fd: %m");
1411 }
1412
1413 if (netlink_fd < 0) {
8e766630 1414 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1415
2024ed61 1416 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1417 if (!monitor)
1418 return log_error_errno(EINVAL, "error initializing netlink socket");
1419
1420 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1421
1422 r = udev_monitor_enable_receiving(monitor);
1423 if (r < 0)
1424 return log_error_errno(EINVAL, "error binding netlink socket");
1425
1426 fd = udev_monitor_get_fd(monitor);
1427 if (fd < 0)
1428 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1429
1430 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1431 if (netlink_fd < 0)
f59118ec
TG
1432 return log_error_errno(errno, "could not dup netlink fd: %m");
1433 }
fcff1e72
TG
1434
1435 *rctrl = ctrl_fd;
1436 *rnetlink = netlink_fd;
912541b0 1437
912541b0 1438 return 0;
7459bcdc
KS
1439}
1440
e6f86cac 1441/*
3f85ef0f 1442 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1443 * udev.log_priority=<level> syslog priority
1444 * udev.children_max=<number of workers> events are fully serialized if set to 1
1445 * udev.exec_delay=<number of seconds> delay execution of every executed program
1446 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1447 */
96287a49 1448static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1449 int r = 0;
e6f86cac 1450
614a823c 1451 assert(key);
e6f86cac 1452
614a823c
TG
1453 if (!value)
1454 return 0;
e6f86cac 1455
1d84ad94
LP
1456 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1457
1458 if (proc_cmdline_value_missing(key, value))
1459 return 0;
1460
92e72467
ZJS
1461 r = util_log_priority(value);
1462 if (r >= 0)
1463 log_set_max_level(r);
1d84ad94
LP
1464
1465 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1466
1467 if (proc_cmdline_value_missing(key, value))
1468 return 0;
1469
92e72467
ZJS
1470 r = safe_atou64(value, &arg_event_timeout_usec);
1471 if (r >= 0) {
1472 arg_event_timeout_usec *= USEC_PER_SEC;
1473 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1474 }
1d84ad94
LP
1475
1476 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1477
1478 if (proc_cmdline_value_missing(key, value))
1479 return 0;
1480
020328e1 1481 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1482
1483 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1484
1485 if (proc_cmdline_value_missing(key, value))
1486 return 0;
1487
614a823c 1488 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1489
1490 } else if (startswith(key, "udev."))
92e72467 1491 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1492
92e72467
ZJS
1493 if (r < 0)
1494 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1495
614a823c 1496 return 0;
e6f86cac
KS
1497}
1498
37ec0fdd
LP
1499static int help(void) {
1500 _cleanup_free_ char *link = NULL;
1501 int r;
1502
1503 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1504 if (r < 0)
1505 return log_oom();
1506
ed216e1f
TG
1507 printf("%s [OPTIONS...]\n\n"
1508 "Manages devices.\n\n"
5ac0162c 1509 " -h --help Print this message\n"
2d19c17e
MF
1510 " -V --version Print version of the program\n"
1511 " -d --daemon Detach and run in the background\n"
1512 " -D --debug Enable debug output\n"
1513 " -c --children-max=INT Set maximum number of workers\n"
1514 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1515 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1516 " -N --resolve-names=early|late|never\n"
5ac0162c 1517 " When to resolve users and groups\n"
37ec0fdd
LP
1518 "\nSee the %s for details.\n"
1519 , program_invocation_short_name
1520 , link
1521 );
1522
1523 return 0;
ed216e1f
TG
1524}
1525
bba7a484 1526static int parse_argv(int argc, char *argv[]) {
912541b0 1527 static const struct option options[] = {
bba7a484
TG
1528 { "daemon", no_argument, NULL, 'd' },
1529 { "debug", no_argument, NULL, 'D' },
1530 { "children-max", required_argument, NULL, 'c' },
1531 { "exec-delay", required_argument, NULL, 'e' },
1532 { "event-timeout", required_argument, NULL, 't' },
1533 { "resolve-names", required_argument, NULL, 'N' },
1534 { "help", no_argument, NULL, 'h' },
1535 { "version", no_argument, NULL, 'V' },
912541b0
KS
1536 {}
1537 };
689a97f5 1538
bba7a484 1539 int c;
689a97f5 1540
bba7a484
TG
1541 assert(argc >= 0);
1542 assert(argv);
912541b0 1543
e14b6f21 1544 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1545 int r;
912541b0 1546
bba7a484 1547 switch (c) {
912541b0 1548
912541b0 1549 case 'd':
bba7a484 1550 arg_daemonize = true;
912541b0
KS
1551 break;
1552 case 'c':
020328e1 1553 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1554 if (r < 0)
1555 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1556 break;
1557 case 'e':
6f5cf8a8
TG
1558 r = safe_atoi(optarg, &arg_exec_delay);
1559 if (r < 0)
1560 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1561 break;
9719859c 1562 case 't':
f1e8664e
TG
1563 r = safe_atou64(optarg, &arg_event_timeout_usec);
1564 if (r < 0)
65fea570 1565 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1566 else {
1567 arg_event_timeout_usec *= USEC_PER_SEC;
1568 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1569 }
9719859c 1570 break;
912541b0 1571 case 'D':
bba7a484 1572 arg_debug = true;
912541b0
KS
1573 break;
1574 case 'N':
090be865 1575 if (streq(optarg, "early")) {
bba7a484 1576 arg_resolve_names = 1;
090be865 1577 } else if (streq(optarg, "late")) {
bba7a484 1578 arg_resolve_names = 0;
090be865 1579 } else if (streq(optarg, "never")) {
bba7a484 1580 arg_resolve_names = -1;
912541b0 1581 } else {
9f6445e3 1582 log_error("resolve-names must be early, late or never");
bba7a484 1583 return 0;
912541b0
KS
1584 }
1585 break;
1586 case 'h':
37ec0fdd 1587 return help();
912541b0 1588 case 'V':
948aaa7c 1589 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1590 return 0;
1591 case '?':
1592 return -EINVAL;
912541b0 1593 default:
bba7a484
TG
1594 assert_not_reached("Unhandled option");
1595
912541b0
KS
1596 }
1597 }
1598
bba7a484
TG
1599 return 1;
1600}
1601
b7f74dd4 1602static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1603 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1604 int r, fd_worker;
c0c6806b
TG
1605
1606 assert(ret);
11b1dd8c
TG
1607 assert(fd_ctrl >= 0);
1608 assert(fd_uevent >= 0);
c0c6806b
TG
1609
1610 manager = new0(Manager, 1);
1611 if (!manager)
1612 return log_oom();
1613
e237d8cb
TG
1614 manager->fd_inotify = -1;
1615 manager->worker_watch[WRITE_END] = -1;
1616 manager->worker_watch[READ_END] = -1;
1617
2024ed61 1618 udev_builtin_init();
b2d21d93 1619
2024ed61 1620 manager->rules = udev_rules_new(arg_resolve_names);
ecb17862
TG
1621 if (!manager->rules)
1622 return log_error_errno(ENOMEM, "error reading rules");
1623
40a57716 1624 LIST_HEAD_INIT(manager->events);
ecb17862 1625
c26d1879
TG
1626 manager->cgroup = cgroup;
1627
2024ed61 1628 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1629 if (!manager->ctrl)
1630 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1631
2024ed61 1632 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1633 if (!manager->monitor)
1634 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1635
1636 /* unnamed socket from workers to the main daemon */
1637 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1638 if (r < 0)
1639 return log_error_errno(errno, "error creating socketpair: %m");
1640
693d371d 1641 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1642
2ff48e98 1643 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1644 if (r < 0)
2ff48e98 1645 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1646
b7759e04
YW
1647 r = udev_watch_init();
1648 if (r < 0)
1649 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1650 manager->fd_inotify = r;
e237d8cb 1651
2024ed61 1652 udev_watch_restore();
e237d8cb
TG
1653
1654 /* block and listen to all signals on signalfd */
72c0a2c2 1655 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1656
49f997f3
TG
1657 r = sd_event_default(&manager->event);
1658 if (r < 0)
709f6e46 1659 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1660
693d371d
TG
1661 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1662 if (r < 0)
1663 return log_error_errno(r, "error creating sigint event source: %m");
1664
1665 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1666 if (r < 0)
1667 return log_error_errno(r, "error creating sigterm event source: %m");
1668
1669 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1670 if (r < 0)
1671 return log_error_errno(r, "error creating sighup event source: %m");
1672
1673 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1674 if (r < 0)
1675 return log_error_errno(r, "error creating sigchld event source: %m");
1676
1677 r = sd_event_set_watchdog(manager->event, true);
1678 if (r < 0)
1679 return log_error_errno(r, "error creating watchdog event source: %m");
1680
11b1dd8c 1681 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1682 if (r < 0)
1683 return log_error_errno(r, "error creating ctrl event source: %m");
1684
1685 /* This needs to be after the inotify and uevent handling, to make sure
1686 * that the ping is send back after fully processing the pending uevents
1687 * (including the synthetic ones we may create due to inotify events).
1688 */
1689 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1690 if (r < 0)
1691 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1692
1693 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1694 if (r < 0)
1695 return log_error_errno(r, "error creating inotify event source: %m");
1696
11b1dd8c 1697 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1698 if (r < 0)
1699 return log_error_errno(r, "error creating uevent event source: %m");
1700
1701 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1702 if (r < 0)
1703 return log_error_errno(r, "error creating worker event source: %m");
1704
1705 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1706 if (r < 0)
1707 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1708
1cc6c93a 1709 *ret = TAKE_PTR(manager);
11b1dd8c 1710
86c3bece 1711 return 0;
c0c6806b
TG
1712}
1713
077fc5e2 1714static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1715 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1716 int r;
1717
1718 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1719 if (r < 0) {
1720 r = log_error_errno(r, "failed to allocate manager object: %m");
1721 goto exit;
1722 }
1723
1724 r = udev_rules_apply_static_dev_perms(manager->rules);
1725 if (r < 0)
1726 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1727
1ef72b55
MS
1728 (void) sd_notifyf(false,
1729 "READY=1\n"
1730 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1731
1732 r = sd_event_loop(manager->event);
1733 if (r < 0) {
1734 log_error_errno(r, "event loop failed: %m");
1735 goto exit;
1736 }
1737
1738 sd_event_get_exit_code(manager->event, &r);
1739
1740exit:
1741 sd_notify(false,
1742 "STOPPING=1\n"
1743 "STATUS=Shutting down...");
1744 if (manager)
1745 udev_ctrl_cleanup(manager->ctrl);
1746 return r;
1747}
1748
1749int main(int argc, char *argv[]) {
c26d1879 1750 _cleanup_free_ char *cgroup = NULL;
efa1606e 1751 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1752 int r;
bba7a484 1753
bba7a484 1754 log_set_target(LOG_TARGET_AUTO);
b237a168 1755 udev_parse_config();
bba7a484
TG
1756 log_parse_environment();
1757 log_open();
1758
bba7a484
TG
1759 r = parse_argv(argc, argv);
1760 if (r <= 0)
1761 goto exit;
1762
1d84ad94 1763 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1764 if (r < 0)
1765 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1766
78d3e041
KS
1767 if (arg_debug) {
1768 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1769 log_set_max_level(LOG_DEBUG);
78d3e041 1770 }
bba7a484 1771
6174a243
YW
1772 log_set_max_level_realm(LOG_REALM_SYSTEMD, log_get_max_level());
1773
fba868fa
LP
1774 r = must_be_root();
1775 if (r < 0)
912541b0 1776 goto exit;
912541b0 1777
712cebf1
TG
1778 if (arg_children_max == 0) {
1779 cpu_set_t cpu_set;
e438c57a 1780 unsigned long mem_limit;
ebc164ef 1781
712cebf1 1782 arg_children_max = 8;
d457ff83 1783
ece174c5 1784 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
faae64fa 1785 arg_children_max += CPU_COUNT(&cpu_set) * 8;
912541b0 1786
e438c57a
MW
1787 mem_limit = physical_memory() / (128LU*1024*1024);
1788 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1789
712cebf1 1790 log_debug("set children_max to %u", arg_children_max);
d457ff83 1791 }
912541b0 1792
712cebf1
TG
1793 /* set umask before creating any file/directory */
1794 r = chdir("/");
1795 if (r < 0) {
1796 r = log_error_errno(errno, "could not change dir to /: %m");
1797 goto exit;
1798 }
194bbe33 1799
712cebf1 1800 umask(022);
912541b0 1801
c3dacc8b 1802 r = mac_selinux_init();
712cebf1
TG
1803 if (r < 0) {
1804 log_error_errno(r, "could not initialize labelling: %m");
1805 goto exit;
912541b0
KS
1806 }
1807
dae8b82e
ZJS
1808 r = mkdir_errno_wrapper("/run/udev", 0755);
1809 if (r < 0 && r != -EEXIST) {
1810 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1811 goto exit;
1812 }
1813
03cfe0d5 1814 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1815
c26d1879
TG
1816 if (getppid() == 1) {
1817 /* get our own cgroup, we regularly kill everything udev has left behind
1818 we only do this on systemd systems, and only if we are directly spawned
1819 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1820 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1821 if (r < 0) {
a2d61f07 1822 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1823 log_debug_errno(r, "did not find dedicated cgroup: %m");
1824 else
1825 log_warning_errno(r, "failed to get cgroup: %m");
1826 }
c26d1879
TG
1827 }
1828
b7f74dd4
TG
1829 r = listen_fds(&fd_ctrl, &fd_uevent);
1830 if (r < 0) {
1831 r = log_error_errno(r, "could not listen on fds: %m");
1832 goto exit;
1833 }
1834
bba7a484 1835 if (arg_daemonize) {
912541b0 1836 pid_t pid;
912541b0 1837
948aaa7c 1838 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1839
40e749b5 1840 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1841 if (log_get_max_level() < LOG_DEBUG) {
1842 r = make_null_stdio();
1843 if (r < 0)
1844 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1845 }
1846
912541b0
KS
1847 pid = fork();
1848 switch (pid) {
1849 case 0:
1850 break;
1851 case -1:
6af5e6a4 1852 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1853 goto exit;
1854 default:
f53d1fcd
TG
1855 mac_selinux_finish();
1856 log_close();
1857 _exit(EXIT_SUCCESS);
912541b0
KS
1858 }
1859
1860 setsid();
1861
76cdddfb
YW
1862 r = set_oom_score_adjust(-1000);
1863 if (r < 0)
1864 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1865 }
912541b0 1866
077fc5e2 1867 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1868
53921bfa 1869exit:
cc56fafe 1870 mac_selinux_finish();
baa30fbc 1871 log_close();
6af5e6a4 1872 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1873}