]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
libudev: conserve previous behavior
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
07a26e42 55#include "udev-builtin.h"
7d68eb1b 56#include "udev-ctrl.h"
618234a5 57#include "udev-util.h"
70068602 58#include "udev-watch.h"
618234a5 59#include "udev.h"
ee104e11 60#include "user-util.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
64static int arg_resolve_names = 1;
020328e1 65static unsigned arg_children_max;
bba7a484
TG
66static int arg_exec_delay;
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
693d371d 71 sd_event *event;
c0c6806b 72 Hashmap *workers;
40a57716 73 LIST_HEAD(struct event, events);
c26d1879 74 const char *cgroup;
cb49a4f2 75 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 76
ecb17862 77 struct udev_rules *rules;
9b5150b6 78 Hashmap *properties;
c0c6806b
TG
79
80 struct udev_monitor *monitor;
81 struct udev_ctrl *ctrl;
82 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 83 int fd_inotify;
e237d8cb
TG
84 int worker_watch[2];
85
693d371d
TG
86 sd_event_source *ctrl_event;
87 sd_event_source *uevent_event;
88 sd_event_source *inotify_event;
eca195ec 89 sd_event_source *kill_workers_event;
693d371d 90
7c4c7e89
TG
91 usec_t last_usec;
92
c0c6806b 93 bool stop_exec_queue:1;
c0c6806b
TG
94 bool exit:1;
95} Manager;
1e03b754 96
1e03b754 97enum event_state {
912541b0
KS
98 EVENT_UNDEF,
99 EVENT_QUEUED,
100 EVENT_RUNNING,
1e03b754
KS
101};
102
103struct event {
40a57716 104 LIST_FIELDS(struct event, event);
cb49a4f2 105 Manager *manager;
912541b0 106 struct udev_device *dev;
6969c349 107 struct udev_device *dev_kernel;
c6aa11f2 108 struct worker *worker;
912541b0 109 enum event_state state;
912541b0
KS
110 unsigned long long int delaying_seqnum;
111 unsigned long long int seqnum;
112 const char *devpath;
113 size_t devpath_len;
114 const char *devpath_old;
115 dev_t devnum;
912541b0 116 int ifindex;
ea6039a3 117 bool is_block;
693d371d
TG
118 sd_event_source *timeout_warning;
119 sd_event_source *timeout;
1e03b754
KS
120};
121
ecb17862 122static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 123
1e03b754 124enum worker_state {
912541b0
KS
125 WORKER_UNDEF,
126 WORKER_RUNNING,
127 WORKER_IDLE,
128 WORKER_KILLED,
1e03b754
KS
129};
130
131struct worker {
c0c6806b 132 Manager *manager;
912541b0
KS
133 pid_t pid;
134 struct udev_monitor *monitor;
135 enum worker_state state;
136 struct event *event;
1e03b754
KS
137};
138
139/* passed from worker to main process */
140struct worker_message {
1e03b754
KS
141};
142
c6aa11f2 143static void event_free(struct event *event) {
cb49a4f2
TG
144 int r;
145
c6aa11f2
TG
146 if (!event)
147 return;
40a57716 148 assert(event->manager);
c6aa11f2 149
40a57716 150 LIST_REMOVE(event, event->manager->events, event);
912541b0 151 udev_device_unref(event->dev);
6969c349 152 udev_device_unref(event->dev_kernel);
c6aa11f2 153
693d371d
TG
154 sd_event_source_unref(event->timeout_warning);
155 sd_event_source_unref(event->timeout);
156
c6aa11f2
TG
157 if (event->worker)
158 event->worker->event = NULL;
159
40a57716 160 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 161 /* only clean up the queue from the process that created it */
df0ff127 162 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
163 r = unlink("/run/udev/queue");
164 if (r < 0)
165 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
166 }
167 }
168
912541b0 169 free(event);
aa8734ff 170}
7a770250 171
c6aa11f2
TG
172static void worker_free(struct worker *worker) {
173 if (!worker)
174 return;
bc113de9 175
c0c6806b
TG
176 assert(worker->manager);
177
4a0b58c4 178 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 179 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
180 event_free(worker->event);
181
c6aa11f2 182 free(worker);
ff2c503d
KS
183}
184
c0c6806b 185static void manager_workers_free(Manager *manager) {
a505965d
TG
186 struct worker *worker;
187 Iterator i;
ff2c503d 188
c0c6806b
TG
189 assert(manager);
190
191 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 192 worker_free(worker);
a505965d 193
c0c6806b 194 manager->workers = hashmap_free(manager->workers);
fc465079
KS
195}
196
c0c6806b 197static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
198 _cleanup_free_ struct worker *worker = NULL;
199 int r;
3a19b32a
TG
200
201 assert(ret);
c0c6806b 202 assert(manager);
3a19b32a
TG
203 assert(worker_monitor);
204 assert(pid > 1);
205
206 worker = new0(struct worker, 1);
207 if (!worker)
208 return -ENOMEM;
209
c0c6806b 210 worker->manager = manager;
3a19b32a
TG
211 /* close monitor, but keep address around */
212 udev_monitor_disconnect(worker_monitor);
213 worker->monitor = udev_monitor_ref(worker_monitor);
214 worker->pid = pid;
a505965d 215
c0c6806b 216 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
217 if (r < 0)
218 return r;
219
4a0b58c4 220 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
221 if (r < 0)
222 return r;
223
ae2a15bc 224 *ret = TAKE_PTR(worker);
3a19b32a
TG
225
226 return 0;
227}
228
4fa4d885
TG
229static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
230 struct event *event = userdata;
231
232 assert(event);
233 assert(event->worker);
234
235 kill_and_sigcont(event->worker->pid, SIGKILL);
236 event->worker->state = WORKER_KILLED;
237
238 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
239
240 return 1;
241}
242
243static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
244 struct event *event = userdata;
245
246 assert(event);
247
248 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
249
250 return 1;
251}
252
39c19cf1 253static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
254 sd_event *e;
255 uint64_t usec;
693d371d 256
c6aa11f2 257 assert(worker);
693d371d 258 assert(worker->manager);
c6aa11f2
TG
259 assert(event);
260 assert(!event->worker);
261 assert(!worker->event);
262
39c19cf1 263 worker->state = WORKER_RUNNING;
39c19cf1
TG
264 worker->event = event;
265 event->state = EVENT_RUNNING;
c6aa11f2 266 event->worker = worker;
693d371d
TG
267
268 e = worker->manager->event;
269
3285baa8 270 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 271
3285baa8 272 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
273 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
274
3285baa8 275 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 276 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
277}
278
e237d8cb
TG
279static void manager_free(Manager *manager) {
280 if (!manager)
281 return;
282
2024ed61 283 udev_builtin_exit();
b2d21d93 284
693d371d
TG
285 sd_event_source_unref(manager->ctrl_event);
286 sd_event_source_unref(manager->uevent_event);
287 sd_event_source_unref(manager->inotify_event);
eca195ec 288 sd_event_source_unref(manager->kill_workers_event);
693d371d 289
693d371d 290 sd_event_unref(manager->event);
e237d8cb
TG
291 manager_workers_free(manager);
292 event_queue_cleanup(manager, EVENT_UNDEF);
293
294 udev_monitor_unref(manager->monitor);
295 udev_ctrl_unref(manager->ctrl);
296 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
297
9b5150b6 298 hashmap_free_free_free(manager->properties);
e237d8cb 299 udev_rules_unref(manager->rules);
e237d8cb 300
e237d8cb
TG
301 safe_close(manager->fd_inotify);
302 safe_close_pair(manager->worker_watch);
303
304 free(manager);
305}
306
307DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
308
9a73bd7c
TG
309static int worker_send_message(int fd) {
310 struct worker_message message = {};
311
312 return loop_write(fd, &message, sizeof(message), false);
313}
314
fee854ee
RK
315static bool shall_lock_device(struct udev_device *dev) {
316 const char *sysname;
317
318 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
319 return false;
320
321 sysname = udev_device_get_sysname(dev);
322 return !startswith(sysname, "dm-") &&
323 !startswith(sysname, "md") &&
324 !startswith(sysname, "drbd");
325}
326
c0c6806b 327static void worker_spawn(Manager *manager, struct event *event) {
8e766630 328 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 329 pid_t pid;
b6aab8ef 330 int r = 0;
912541b0
KS
331
332 /* listen for new events */
2024ed61 333 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
334 if (worker_monitor == NULL)
335 return;
336 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 337 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
338 r = udev_monitor_enable_receiving(worker_monitor);
339 if (r < 0)
340 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 341
912541b0
KS
342 pid = fork();
343 switch (pid) {
344 case 0: {
cf28ad46 345 _cleanup_(udev_device_unrefp) struct udev_device *dev = NULL;
4afd3348 346 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 347 int fd_monitor;
e237d8cb 348 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
349 struct epoll_event ep_signal = { .events = EPOLLIN };
350 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 351 sigset_t mask;
912541b0 352
43095991 353 /* take initial device from queue */
1cc6c93a 354 dev = TAKE_PTR(event->dev);
912541b0 355
39fd2ca1
TG
356 unsetenv("NOTIFY_SOCKET");
357
c0c6806b 358 manager_workers_free(manager);
ecb17862 359 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 360
e237d8cb 361 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 362 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 363 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 364 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 365
693d371d
TG
366 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
367 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
368 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
eca195ec 369 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
693d371d
TG
370
371 manager->event = sd_event_unref(manager->event);
372
912541b0
KS
373 sigfillset(&mask);
374 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
375 if (fd_signal < 0) {
6af5e6a4 376 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
377 goto out;
378 }
2dd9f98d
TG
379 ep_signal.data.fd = fd_signal;
380
381 fd_monitor = udev_monitor_get_fd(worker_monitor);
382 ep_monitor.data.fd = fd_monitor;
912541b0
KS
383
384 fd_ep = epoll_create1(EPOLL_CLOEXEC);
385 if (fd_ep < 0) {
6af5e6a4 386 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
387 goto out;
388 }
389
912541b0
KS
390 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
391 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 392 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
393 goto out;
394 }
395
045e00cf
ZJS
396 /* Request TERM signal if parent exits.
397 Ignore error, not much we can do in that case. */
398 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 399
045e00cf 400 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
401 r = set_oom_score_adjust(0);
402 if (r < 0)
403 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 404
912541b0 405 for (;;) {
c1118ceb 406 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 407 int fd_lock = -1;
912541b0 408
3b64e4d4
TG
409 assert(dev);
410
9f6445e3 411 log_debug("seq %llu running", udev_device_get_seqnum(dev));
cf28ad46 412 udev_event = udev_event_new(dev->device, arg_exec_delay, rtnl);
0f86dc90 413 if (!udev_event) {
6af5e6a4 414 r = -ENOMEM;
912541b0
KS
415 goto out;
416 }
417
3ebdb81e 418 /*
2e5b17d0 419 * Take a shared lock on the device node; this establishes
3ebdb81e 420 * a concept of device "ownership" to serialize device
2e5b17d0 421 * access. External processes holding an exclusive lock will
3ebdb81e 422 * cause udev to skip the event handling; in the case udev
2e5b17d0 423 * acquired the lock, the external process can block until
3ebdb81e
KS
424 * udev has finished its event handling.
425 */
2e5b17d0 426 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 427 shall_lock_device(dev)) {
3ebdb81e
KS
428 struct udev_device *d = dev;
429
430 if (streq_ptr("partition", udev_device_get_devtype(d)))
431 d = udev_device_get_parent(d);
432
433 if (d) {
434 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
435 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 436 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 437 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
438 goto skip;
439 }
440 }
441 }
442
912541b0 443 /* apply rules, create node, symlinks */
adeba500
KS
444 udev_event_execute_rules(udev_event,
445 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 446 manager->properties,
8314de1d 447 manager->rules);
adeba500
KS
448
449 udev_event_execute_run(udev_event,
8314de1d 450 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 451
e0bb2ff9 452 if (!rtnl)
523c620b 453 /* in case rtnl was initialized */
1c4baffc 454 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 455
912541b0 456 /* apply/restore inotify watch */
bf9bead1 457 if (udev_event->inotify_watch) {
70068602 458 udev_watch_begin(dev->device);
912541b0
KS
459 udev_device_update_db(dev);
460 }
461
3d06f418 462 safe_close(fd_lock);
3ebdb81e 463
912541b0
KS
464 /* send processed event back to libudev listeners */
465 udev_monitor_send_device(worker_monitor, NULL, dev);
466
3ebdb81e 467skip:
4914cb2d 468 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 469
912541b0 470 /* send udevd the result of the event execution */
e237d8cb 471 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 472 if (r < 0)
9a73bd7c 473 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 474 udev_device_get_seqnum(dev));
912541b0 475
cf28ad46 476 dev = udev_device_unref(dev);
912541b0 477
912541b0
KS
478 /* wait for more device messages from main udevd, or term signal */
479 while (dev == NULL) {
480 struct epoll_event ev[4];
481 int fdcount;
482 int i;
483
8fef0ff2 484 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
485 if (fdcount < 0) {
486 if (errno == EINTR)
487 continue;
6af5e6a4 488 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
489 goto out;
490 }
491
492 for (i = 0; i < fdcount; i++) {
493 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
494 dev = udev_monitor_receive_device(worker_monitor);
495 break;
496 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
497 struct signalfd_siginfo fdsi;
498 ssize_t size;
499
500 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
501 if (size != sizeof(struct signalfd_siginfo))
502 continue;
503 switch (fdsi.ssi_signo) {
504 case SIGTERM:
505 goto out;
506 }
507 }
508 }
509 }
510 }
82063a88 511out:
912541b0 512 udev_device_unref(dev);
e237d8cb 513 manager_free(manager);
baa30fbc 514 log_close();
8b46c3fc 515 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
516 }
517 case -1:
912541b0 518 event->state = EVENT_QUEUED;
56f64d95 519 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
520 break;
521 default:
e03c7cc2
TG
522 {
523 struct worker *worker;
524
c0c6806b 525 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 526 if (r < 0)
e03c7cc2 527 return;
e03c7cc2 528
39c19cf1
TG
529 worker_attach_event(worker, event);
530
1fa2f38f 531 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
532 break;
533 }
e03c7cc2 534 }
7fafc032
KS
535}
536
c0c6806b 537static void event_run(Manager *manager, struct event *event) {
a505965d
TG
538 struct worker *worker;
539 Iterator i;
912541b0 540
c0c6806b
TG
541 assert(manager);
542 assert(event);
543
544 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
545 ssize_t count;
546
547 if (worker->state != WORKER_IDLE)
548 continue;
549
c0c6806b 550 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 551 if (count < 0) {
1fa2f38f
ZJS
552 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
553 worker->pid, count);
cb542e84 554 (void) kill(worker->pid, SIGKILL);
912541b0
KS
555 worker->state = WORKER_KILLED;
556 continue;
557 }
39c19cf1 558 worker_attach_event(worker, event);
912541b0
KS
559 return;
560 }
561
c0c6806b 562 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 563 if (arg_children_max > 1)
c0c6806b 564 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
565 return;
566 }
567
568 /* start new worker and pass initial device */
c0c6806b 569 worker_spawn(manager, event);
1e03b754
KS
570}
571
ecb17862 572static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 573 struct event *event;
cb49a4f2 574 int r;
912541b0 575
ecb17862
TG
576 assert(manager);
577 assert(dev);
578
040e6896
TG
579 /* only one process can add events to the queue */
580 if (manager->pid == 0)
df0ff127 581 manager->pid = getpid_cached();
040e6896 582
df0ff127 583 assert(manager->pid == getpid_cached());
cb49a4f2 584
955d98c9 585 event = new0(struct event, 1);
cb49a4f2
TG
586 if (!event)
587 return -ENOMEM;
912541b0 588
cb49a4f2 589 event->manager = manager;
912541b0 590 event->dev = dev;
6969c349
TG
591 event->dev_kernel = udev_device_shallow_clone(dev);
592 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
593 event->seqnum = udev_device_get_seqnum(dev);
594 event->devpath = udev_device_get_devpath(dev);
595 event->devpath_len = strlen(event->devpath);
596 event->devpath_old = udev_device_get_devpath_old(dev);
597 event->devnum = udev_device_get_devnum(dev);
ea6039a3 598 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
599 event->ifindex = udev_device_get_ifindex(dev);
600
9f6445e3 601 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
602 udev_device_get_action(dev), udev_device_get_subsystem(dev));
603
604 event->state = EVENT_QUEUED;
cb49a4f2 605
40a57716 606 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
607 r = touch("/run/udev/queue");
608 if (r < 0)
609 log_warning_errno(r, "could not touch /run/udev/queue: %m");
610 }
611
40a57716 612 LIST_APPEND(event, manager->events, event);
cb49a4f2 613
912541b0 614 return 0;
fc465079
KS
615}
616
c0c6806b 617static void manager_kill_workers(Manager *manager) {
a505965d
TG
618 struct worker *worker;
619 Iterator i;
1e03b754 620
c0c6806b
TG
621 assert(manager);
622
623 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
624 if (worker->state == WORKER_KILLED)
625 continue;
1e03b754 626
912541b0 627 worker->state = WORKER_KILLED;
cb542e84 628 (void) kill(worker->pid, SIGTERM);
912541b0 629 }
1e03b754
KS
630}
631
e3196993 632/* lookup event for identical, parent, child device */
ecb17862 633static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 634 struct event *loop_event;
912541b0
KS
635 size_t common;
636
637 /* check if queue contains events we depend on */
40a57716 638 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 639 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
640 if (loop_event->seqnum < event->delaying_seqnum)
641 continue;
642
643 /* event we checked earlier still exists, no need to check again */
644 if (loop_event->seqnum == event->delaying_seqnum)
645 return true;
646
647 /* found ourself, no later event can block us */
648 if (loop_event->seqnum >= event->seqnum)
649 break;
650
651 /* check major/minor */
652 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
653 return true;
654
655 /* check network device ifindex */
656 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
657 return true;
658
659 /* check our old name */
090be865 660 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
661 event->delaying_seqnum = loop_event->seqnum;
662 return true;
663 }
664
665 /* compare devpath */
666 common = MIN(loop_event->devpath_len, event->devpath_len);
667
668 /* one devpath is contained in the other? */
669 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
670 continue;
671
672 /* identical device event found */
673 if (loop_event->devpath_len == event->devpath_len) {
674 /* devices names might have changed/swapped in the meantime */
675 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
676 continue;
677 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
678 continue;
679 event->delaying_seqnum = loop_event->seqnum;
680 return true;
681 }
682
683 /* parent device event found */
684 if (event->devpath[common] == '/') {
685 event->delaying_seqnum = loop_event->seqnum;
686 return true;
687 }
688
689 /* child device event found */
690 if (loop_event->devpath[common] == '/') {
691 event->delaying_seqnum = loop_event->seqnum;
692 return true;
693 }
694
695 /* no matching device */
696 continue;
697 }
698
699 return false;
7fafc032
KS
700}
701
693d371d
TG
702static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
703 Manager *manager = userdata;
704
705 assert(manager);
706
707 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
708
709 sd_event_exit(manager->event, -ETIMEDOUT);
710
711 return 1;
712}
713
62d43dac 714static void manager_exit(Manager *manager) {
693d371d
TG
715 uint64_t usec;
716 int r;
62d43dac
TG
717
718 assert(manager);
719
720 manager->exit = true;
721
b79aacbf
TG
722 sd_notify(false,
723 "STOPPING=1\n"
724 "STATUS=Starting shutdown...");
725
62d43dac 726 /* close sources of new events and discard buffered events */
693d371d 727 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 728 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 729
693d371d 730 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 731 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 732
693d371d 733 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 734 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
735
736 /* discard queued events and kill workers */
737 event_queue_cleanup(manager, EVENT_QUEUED);
738 manager_kill_workers(manager);
693d371d 739
3285baa8 740 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 741
3285baa8 742 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
743 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
744 if (r < 0)
745 return;
62d43dac
TG
746}
747
748/* reload requested, HUP signal received, rules changed, builtin changed */
749static void manager_reload(Manager *manager) {
750
751 assert(manager);
752
b79aacbf
TG
753 sd_notify(false,
754 "RELOADING=1\n"
755 "STATUS=Flushing configuration...");
756
62d43dac
TG
757 manager_kill_workers(manager);
758 manager->rules = udev_rules_unref(manager->rules);
2024ed61 759 udev_builtin_exit();
b79aacbf 760
1ef72b55
MS
761 sd_notifyf(false,
762 "READY=1\n"
763 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
764}
765
eca195ec
YW
766static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
767 Manager *manager = userdata;
768
769 assert(manager);
770
771 log_debug("Cleanup idle workers");
772 manager_kill_workers(manager);
773
774 return 1;
775}
776
777static int manager_enable_kill_workers_event(Manager *manager) {
778 int enabled, r;
779
780 assert(manager);
781
782 if (!manager->kill_workers_event)
783 goto create_new;
784
785 r = sd_event_source_get_enabled(manager->kill_workers_event, &enabled);
786 if (r < 0) {
787 log_debug_errno(r, "Failed to query whether event source for killing idle workers is enabled or not, trying to create new event source: %m");
788 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
789 goto create_new;
790 }
791
792 if (enabled == SD_EVENT_ONESHOT)
793 return 0;
794
795 r = sd_event_source_set_time(manager->kill_workers_event, now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC);
796 if (r < 0) {
797 log_debug_errno(r, "Failed to set time to event source for killing idle workers, trying to create new event source: %m");
798 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
799 goto create_new;
800 }
801
802 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_ONESHOT);
803 if (r < 0) {
804 log_debug_errno(r, "Failed to enable event source for killing idle workers, trying to create new event source: %m");
805 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
806 goto create_new;
807 }
808
809 return 0;
810
811create_new:
812 r = sd_event_add_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
813 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC, on_kill_workers_event, manager);
814 if (r < 0)
815 return log_warning_errno(r, "Failed to create timer event for killing idle workers: %m");
816
817 return 0;
818}
819
820static int manager_disable_kill_workers_event(Manager *manager) {
821 int r;
822
823 if (!manager->kill_workers_event)
824 return 0;
825
826 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
827 if (r < 0)
828 return log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
829
830 return 0;
831}
832
c0c6806b 833static void event_queue_start(Manager *manager) {
40a57716 834 struct event *event;
693d371d 835 usec_t usec;
8ab44e3f 836
c0c6806b
TG
837 assert(manager);
838
40a57716 839 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
840 manager->exit || manager->stop_exec_queue)
841 return;
842
3285baa8 843 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
844 /* check for changed config, every 3 seconds at most */
845 if (manager->last_usec == 0 ||
846 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
847 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 848 udev_builtin_validate())
38a03f06 849 manager_reload(manager);
693d371d 850
38a03f06 851 manager->last_usec = usec;
7c4c7e89
TG
852 }
853
eca195ec
YW
854 (void) manager_disable_kill_workers_event(manager);
855
2024ed61 856 udev_builtin_init();
7c4c7e89
TG
857
858 if (!manager->rules) {
2024ed61 859 manager->rules = udev_rules_new(arg_resolve_names);
7c4c7e89
TG
860 if (!manager->rules)
861 return;
862 }
863
40a57716 864 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
865 if (event->state != EVENT_QUEUED)
866 continue;
0bc74ea7 867
912541b0 868 /* do not start event if parent or child event is still running */
ecb17862 869 if (is_devpath_busy(manager, event))
912541b0 870 continue;
fc465079 871
c0c6806b 872 event_run(manager, event);
912541b0 873 }
1e03b754
KS
874}
875
ecb17862 876static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 877 struct event *event, *tmp;
ff2c503d 878
40a57716 879 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
880 if (match_type != EVENT_UNDEF && match_type != event->state)
881 continue;
ff2c503d 882
c6aa11f2 883 event_free(event);
912541b0 884 }
ff2c503d
KS
885}
886
e82e8fa5 887static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
888 Manager *manager = userdata;
889
890 assert(manager);
891
912541b0
KS
892 for (;;) {
893 struct worker_message msg;
979558f3
TG
894 struct iovec iovec = {
895 .iov_base = &msg,
896 .iov_len = sizeof(msg),
897 };
898 union {
899 struct cmsghdr cmsghdr;
900 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
901 } control = {};
902 struct msghdr msghdr = {
903 .msg_iov = &iovec,
904 .msg_iovlen = 1,
905 .msg_control = &control,
906 .msg_controllen = sizeof(control),
907 };
908 struct cmsghdr *cmsg;
912541b0 909 ssize_t size;
979558f3 910 struct ucred *ucred = NULL;
a505965d 911 struct worker *worker;
912541b0 912
e82e8fa5 913 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 914 if (size < 0) {
738a7907
TG
915 if (errno == EINTR)
916 continue;
917 else if (errno == EAGAIN)
918 /* nothing more to read */
919 break;
979558f3 920
e82e8fa5 921 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
922 } else if (size != sizeof(struct worker_message)) {
923 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 924 continue;
979558f3
TG
925 }
926
2a1288ff 927 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
928 if (cmsg->cmsg_level == SOL_SOCKET &&
929 cmsg->cmsg_type == SCM_CREDENTIALS &&
930 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
931 ucred = (struct ucred*) CMSG_DATA(cmsg);
932 }
933
934 if (!ucred || ucred->pid <= 0) {
935 log_warning_errno(EIO, "ignoring worker message without valid PID");
936 continue;
937 }
912541b0
KS
938
939 /* lookup worker who sent the signal */
4a0b58c4 940 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
941 if (!worker) {
942 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
943 continue;
912541b0 944 }
c0bbfd72 945
a505965d
TG
946 if (worker->state != WORKER_KILLED)
947 worker->state = WORKER_IDLE;
948
949 /* worker returned */
950 event_free(worker->event);
912541b0 951 }
e82e8fa5 952
8302fe5a
TG
953 /* we have free workers, try to schedule events */
954 event_queue_start(manager);
955
e82e8fa5
TG
956 return 1;
957}
958
959static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 960 Manager *manager = userdata;
e82e8fa5
TG
961 struct udev_device *dev;
962 int r;
963
c0c6806b 964 assert(manager);
e82e8fa5 965
c0c6806b 966 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
967 if (dev) {
968 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 969 r = event_queue_insert(manager, dev);
e82e8fa5
TG
970 if (r < 0)
971 udev_device_unref(dev);
8302fe5a
TG
972 else
973 /* we have fresh events, try to schedule them */
974 event_queue_start(manager);
e82e8fa5
TG
975 }
976
977 return 1;
88f4b648
KS
978}
979
3b47c739 980/* receive the udevd message from userspace */
e82e8fa5 981static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 982 Manager *manager = userdata;
8e766630
LP
983 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
984 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 985 const char *str;
9b5150b6 986 int i, r;
912541b0 987
c0c6806b 988 assert(manager);
e4f66b77 989
c0c6806b 990 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 991 if (!ctrl_conn)
e82e8fa5 992 return 1;
912541b0
KS
993
994 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 995 if (!ctrl_msg)
e82e8fa5 996 return 1;
912541b0
KS
997
998 i = udev_ctrl_get_set_log_level(ctrl_msg);
999 if (i >= 0) {
ed14edc0 1000 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 1001 log_set_max_level(i);
c0c6806b 1002 manager_kill_workers(manager);
912541b0
KS
1003 }
1004
1005 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 1006 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 1007 manager->stop_exec_queue = true;
912541b0
KS
1008 }
1009
1010 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 1011 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 1012 manager->stop_exec_queue = false;
8302fe5a 1013 event_queue_start(manager);
912541b0
KS
1014 }
1015
1016 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 1017 log_debug("udevd message (RELOAD) received");
62d43dac 1018 manager_reload(manager);
912541b0
KS
1019 }
1020
1021 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
1022 if (str) {
1023 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
1024 char *eq;
1025
1026 eq = strchr(str, '=');
1027 if (!eq) {
1028 log_error("Invalid key format '%s'", str);
1029 return 1;
1030 }
1031
1032 key = strndup(str, eq - str);
1033 if (!key) {
1034 log_oom();
1035 return 1;
1036 }
1037
1038 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1039
1040 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1041 if (r < 0) {
1042 log_oom();
1043 return 1;
912541b0 1044 }
9b5150b6
YW
1045
1046 eq++;
1047 if (!isempty(eq)) {
1048 log_debug("udevd message (ENV) received, unset '%s'", key);
1049
1050 r = hashmap_put(manager->properties, key, NULL);
1051 if (r < 0) {
1052 log_oom();
1053 return 1;
1054 }
1055 } else {
1056 val = strdup(eq);
1057 if (!val) {
1058 log_oom();
1059 return 1;
1060 }
1061
1062 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
1063
1064 r = hashmap_put(manager->properties, key, val);
1065 if (r < 0) {
1066 log_oom();
1067 return 1;
1068 }
1069 }
1070
1071 key = val = NULL;
c0c6806b 1072 manager_kill_workers(manager);
912541b0
KS
1073 }
1074
1075 i = udev_ctrl_get_set_children_max(ctrl_msg);
1076 if (i >= 0) {
9f6445e3 1077 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1078 arg_children_max = i;
1ef72b55
MS
1079
1080 (void) sd_notifyf(false,
1081 "READY=1\n"
1082 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1083 }
1084
cb49a4f2 1085 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1086 log_debug("udevd message (SYNC) received");
912541b0
KS
1087
1088 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1089 log_debug("udevd message (EXIT) received");
62d43dac 1090 manager_exit(manager);
c0c6806b
TG
1091 /* keep reference to block the client until we exit
1092 TODO: deal with several blocking exit requests */
1093 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1094 }
e4f66b77 1095
e82e8fa5 1096 return 1;
88f4b648 1097}
4a231017 1098
70068602
YW
1099static int synthesize_change(sd_device *dev) {
1100 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1101 char filename[PATH_MAX];
f3a740a5 1102 int r;
edd32000 1103
70068602
YW
1104 r = sd_device_get_subsystem(dev, &subsystem);
1105 if (r < 0)
1106 return r;
1107
1108 r = sd_device_get_sysname(dev, &sysname);
1109 if (r < 0)
1110 return r;
1111
1112 r = sd_device_get_devname(dev, &devname);
1113 if (r < 0)
1114 return r;
1115
1116 r = sd_device_get_syspath(dev, &syspath);
1117 if (r < 0)
1118 return r;
1119
1120 r = sd_device_get_devtype(dev, &devtype);
1121 if (r < 0)
1122 return r;
1123
1124 if (streq_ptr("block", subsystem) &&
1125 streq_ptr("disk", devtype) &&
1126 !startswith(sysname, "dm-")) {
1127 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1128 bool part_table_read = false, has_partitions = false;
1129 sd_device *d;
ede34445 1130 int fd;
f3a740a5 1131
ede34445 1132 /*
e9fc29f4
KS
1133 * Try to re-read the partition table. This only succeeds if
1134 * none of the devices is busy. The kernel returns 0 if no
1135 * partition table is found, and we will not get an event for
1136 * the disk.
ede34445 1137 */
70068602 1138 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1139 if (fd >= 0) {
02ba8fb3
KS
1140 r = flock(fd, LOCK_EX|LOCK_NB);
1141 if (r >= 0)
1142 r = ioctl(fd, BLKRRPART, 0);
1143
ede34445
KS
1144 close(fd);
1145 if (r >= 0)
e9fc29f4 1146 part_table_read = true;
ede34445
KS
1147 }
1148
e9fc29f4 1149 /* search for partitions */
70068602 1150 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1151 if (r < 0)
1152 return r;
1153
70068602 1154 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1155 if (r < 0)
1156 return r;
1157
70068602 1158 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1159 if (r < 0)
1160 return r;
e9fc29f4 1161
70068602
YW
1162 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1163 if (r < 0)
1164 return r;
e9fc29f4 1165
70068602
YW
1166 FOREACH_DEVICE(e, d) {
1167 const char *t;
e9fc29f4 1168
70068602
YW
1169 if (sd_device_get_devtype(d, &t) < 0 ||
1170 !streq("partition", t))
e9fc29f4
KS
1171 continue;
1172
1173 has_partitions = true;
1174 break;
1175 }
1176
1177 /*
1178 * We have partitions and re-read the table, the kernel already sent
1179 * out a "change" event for the disk, and "remove/add" for all
1180 * partitions.
1181 */
1182 if (part_table_read && has_partitions)
1183 return 0;
1184
1185 /*
1186 * We have partitions but re-reading the partition table did not
1187 * work, synthesize "change" for the disk and all partitions.
1188 */
70068602
YW
1189 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1190 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1191 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1192
70068602
YW
1193 FOREACH_DEVICE(e, d) {
1194 const char *t, *n, *s;
f3a740a5 1195
70068602
YW
1196 if (sd_device_get_devtype(d, &t) < 0 ||
1197 !streq("partition", t))
f3a740a5
KS
1198 continue;
1199
70068602
YW
1200 if (sd_device_get_devname(d, &n) < 0 ||
1201 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1202 continue;
1203
70068602
YW
1204 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1205 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1206 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1207 }
ede34445
KS
1208
1209 return 0;
f3a740a5
KS
1210 }
1211
70068602
YW
1212 log_debug("Device %s is closed, synthesising 'change'", devname);
1213 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1214 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1215
f3a740a5 1216 return 0;
edd32000
KS
1217}
1218
e82e8fa5 1219static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1220 Manager *manager = userdata;
0254e944 1221 union inotify_event_buffer buffer;
f7c1ad4f
LP
1222 struct inotify_event *e;
1223 ssize_t l;
912541b0 1224
c0c6806b 1225 assert(manager);
e82e8fa5 1226
eca195ec
YW
1227 (void) manager_disable_kill_workers_event(manager);
1228
e82e8fa5 1229 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1230 if (l < 0) {
3742095b 1231 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1232 return 1;
912541b0 1233
f7c1ad4f 1234 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1235 }
1236
f7c1ad4f 1237 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1238 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1239 const char *devnode;
1240
7fe3324c 1241 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1242 continue;
912541b0 1243
70068602 1244 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1245 continue;
912541b0 1246
7fe3324c 1247 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1248 if (e->mask & IN_CLOSE_WRITE)
edd32000 1249 synthesize_change(dev);
da143134 1250 else if (e->mask & IN_IGNORED)
2024ed61 1251 udev_watch_end(dev);
912541b0
KS
1252 }
1253
e82e8fa5 1254 return 1;
bd284db1
SJR
1255}
1256
0561329d 1257static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1258 Manager *manager = userdata;
1259
1260 assert(manager);
1261
62d43dac 1262 manager_exit(manager);
912541b0 1263
e82e8fa5
TG
1264 return 1;
1265}
912541b0 1266
0561329d 1267static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1268 Manager *manager = userdata;
1269
1270 assert(manager);
1271
62d43dac 1272 manager_reload(manager);
912541b0 1273
e82e8fa5
TG
1274 return 1;
1275}
912541b0 1276
e82e8fa5 1277static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1278 Manager *manager = userdata;
1279
1280 assert(manager);
1281
e82e8fa5
TG
1282 for (;;) {
1283 pid_t pid;
1284 int status;
1285 struct worker *worker;
d1317d02 1286
e82e8fa5
TG
1287 pid = waitpid(-1, &status, WNOHANG);
1288 if (pid <= 0)
f29328d6 1289 break;
e82e8fa5 1290
4a0b58c4 1291 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1292 if (!worker) {
1293 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1294 continue;
912541b0 1295 }
e82e8fa5
TG
1296
1297 if (WIFEXITED(status)) {
1298 if (WEXITSTATUS(status) == 0)
1299 log_debug("worker ["PID_FMT"] exited", pid);
1300 else
1301 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1302 } else if (WIFSIGNALED(status)) {
76341acc 1303 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1304 } else if (WIFSTOPPED(status)) {
1305 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1306 continue;
e82e8fa5
TG
1307 } else if (WIFCONTINUED(status)) {
1308 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1309 continue;
e82e8fa5
TG
1310 } else
1311 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1312
05e6d9c6
YW
1313 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1314 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1315 /* delete state from disk */
1316 udev_device_delete_db(worker->event->dev);
1317 udev_device_tag_index(worker->event->dev, NULL, false);
1318 /* forward kernel event without amending it */
1319 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1320 }
1321
1322 worker_free(worker);
912541b0 1323 }
e82e8fa5 1324
8302fe5a
TG
1325 /* we can start new workers, try to schedule events */
1326 event_queue_start(manager);
1327
eca195ec
YW
1328 /* Disable unnecessary cleanup event */
1329 if (hashmap_isempty(manager->workers) && manager->kill_workers_event)
1330 (void) sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
1331
e82e8fa5 1332 return 1;
f27125f9 1333}
1334
693d371d
TG
1335static int on_post(sd_event_source *s, void *userdata) {
1336 Manager *manager = userdata;
693d371d
TG
1337
1338 assert(manager);
1339
b6107f01
YW
1340 if (!LIST_IS_EMPTY(manager->events))
1341 return 1;
1342
1343 /* There are no pending events. Let's cleanup idle process. */
1344
1345 if (!hashmap_isempty(manager->workers)) {
1346 /* There are idle workers */
eca195ec 1347 (void) manager_enable_kill_workers_event(manager);
b6107f01 1348 return 1;
693d371d
TG
1349 }
1350
b6107f01
YW
1351 /* There are no idle workers. */
1352
1353 if (manager->exit)
1354 return sd_event_exit(manager->event, 0);
1355
1356 if (manager->cgroup)
1357 /* cleanup possible left-over processes in our cgroup */
1358 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1359
693d371d
TG
1360 return 1;
1361}
1362
fcff1e72
TG
1363static int listen_fds(int *rctrl, int *rnetlink) {
1364 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1365 int fd, n, r;
912541b0 1366
fcff1e72
TG
1367 assert(rctrl);
1368 assert(rnetlink);
1369
912541b0 1370 n = sd_listen_fds(true);
fcff1e72
TG
1371 if (n < 0)
1372 return n;
912541b0
KS
1373
1374 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1375 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1376 if (ctrl_fd >= 0)
1377 return -EINVAL;
1378 ctrl_fd = fd;
912541b0
KS
1379 continue;
1380 }
1381
1382 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1383 if (netlink_fd >= 0)
1384 return -EINVAL;
1385 netlink_fd = fd;
912541b0
KS
1386 continue;
1387 }
1388
fcff1e72 1389 return -EINVAL;
912541b0
KS
1390 }
1391
f59118ec 1392 if (ctrl_fd < 0) {
8e766630 1393 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1394
2024ed61 1395 ctrl = udev_ctrl_new();
f59118ec
TG
1396 if (!ctrl)
1397 return log_error_errno(EINVAL, "error initializing udev control socket");
1398
1399 r = udev_ctrl_enable_receiving(ctrl);
1400 if (r < 0)
1401 return log_error_errno(EINVAL, "error binding udev control socket");
1402
1403 fd = udev_ctrl_get_fd(ctrl);
1404 if (fd < 0)
1405 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1406
f59118ec
TG
1407 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1408 if (ctrl_fd < 0)
1409 return log_error_errno(errno, "could not dup ctrl fd: %m");
1410 }
1411
1412 if (netlink_fd < 0) {
8e766630 1413 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1414
2024ed61 1415 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1416 if (!monitor)
1417 return log_error_errno(EINVAL, "error initializing netlink socket");
1418
1419 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1420
1421 r = udev_monitor_enable_receiving(monitor);
1422 if (r < 0)
1423 return log_error_errno(EINVAL, "error binding netlink socket");
1424
1425 fd = udev_monitor_get_fd(monitor);
1426 if (fd < 0)
1427 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1428
1429 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1430 if (netlink_fd < 0)
f59118ec
TG
1431 return log_error_errno(errno, "could not dup netlink fd: %m");
1432 }
fcff1e72
TG
1433
1434 *rctrl = ctrl_fd;
1435 *rnetlink = netlink_fd;
912541b0 1436
912541b0 1437 return 0;
7459bcdc
KS
1438}
1439
e6f86cac 1440/*
3f85ef0f 1441 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1442 * udev.log_priority=<level> syslog priority
1443 * udev.children_max=<number of workers> events are fully serialized if set to 1
1444 * udev.exec_delay=<number of seconds> delay execution of every executed program
1445 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1446 */
96287a49 1447static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1448 int r = 0;
e6f86cac 1449
614a823c 1450 assert(key);
e6f86cac 1451
614a823c
TG
1452 if (!value)
1453 return 0;
e6f86cac 1454
1d84ad94
LP
1455 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1456
1457 if (proc_cmdline_value_missing(key, value))
1458 return 0;
1459
92e72467
ZJS
1460 r = util_log_priority(value);
1461 if (r >= 0)
1462 log_set_max_level(r);
1d84ad94
LP
1463
1464 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1465
1466 if (proc_cmdline_value_missing(key, value))
1467 return 0;
1468
92e72467
ZJS
1469 r = safe_atou64(value, &arg_event_timeout_usec);
1470 if (r >= 0) {
1471 arg_event_timeout_usec *= USEC_PER_SEC;
1472 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1473 }
1d84ad94
LP
1474
1475 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1476
1477 if (proc_cmdline_value_missing(key, value))
1478 return 0;
1479
020328e1 1480 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1481
1482 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1483
1484 if (proc_cmdline_value_missing(key, value))
1485 return 0;
1486
614a823c 1487 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1488
1489 } else if (startswith(key, "udev."))
92e72467 1490 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1491
92e72467
ZJS
1492 if (r < 0)
1493 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1494
614a823c 1495 return 0;
e6f86cac
KS
1496}
1497
37ec0fdd
LP
1498static int help(void) {
1499 _cleanup_free_ char *link = NULL;
1500 int r;
1501
1502 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1503 if (r < 0)
1504 return log_oom();
1505
ed216e1f
TG
1506 printf("%s [OPTIONS...]\n\n"
1507 "Manages devices.\n\n"
5ac0162c 1508 " -h --help Print this message\n"
2d19c17e
MF
1509 " -V --version Print version of the program\n"
1510 " -d --daemon Detach and run in the background\n"
1511 " -D --debug Enable debug output\n"
1512 " -c --children-max=INT Set maximum number of workers\n"
1513 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1514 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1515 " -N --resolve-names=early|late|never\n"
5ac0162c 1516 " When to resolve users and groups\n"
37ec0fdd
LP
1517 "\nSee the %s for details.\n"
1518 , program_invocation_short_name
1519 , link
1520 );
1521
1522 return 0;
ed216e1f
TG
1523}
1524
bba7a484 1525static int parse_argv(int argc, char *argv[]) {
912541b0 1526 static const struct option options[] = {
bba7a484
TG
1527 { "daemon", no_argument, NULL, 'd' },
1528 { "debug", no_argument, NULL, 'D' },
1529 { "children-max", required_argument, NULL, 'c' },
1530 { "exec-delay", required_argument, NULL, 'e' },
1531 { "event-timeout", required_argument, NULL, 't' },
1532 { "resolve-names", required_argument, NULL, 'N' },
1533 { "help", no_argument, NULL, 'h' },
1534 { "version", no_argument, NULL, 'V' },
912541b0
KS
1535 {}
1536 };
689a97f5 1537
bba7a484 1538 int c;
689a97f5 1539
bba7a484
TG
1540 assert(argc >= 0);
1541 assert(argv);
912541b0 1542
e14b6f21 1543 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1544 int r;
912541b0 1545
bba7a484 1546 switch (c) {
912541b0 1547
912541b0 1548 case 'd':
bba7a484 1549 arg_daemonize = true;
912541b0
KS
1550 break;
1551 case 'c':
020328e1 1552 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1553 if (r < 0)
1554 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1555 break;
1556 case 'e':
6f5cf8a8
TG
1557 r = safe_atoi(optarg, &arg_exec_delay);
1558 if (r < 0)
1559 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1560 break;
9719859c 1561 case 't':
f1e8664e
TG
1562 r = safe_atou64(optarg, &arg_event_timeout_usec);
1563 if (r < 0)
65fea570 1564 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1565 else {
1566 arg_event_timeout_usec *= USEC_PER_SEC;
1567 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1568 }
9719859c 1569 break;
912541b0 1570 case 'D':
bba7a484 1571 arg_debug = true;
912541b0
KS
1572 break;
1573 case 'N':
090be865 1574 if (streq(optarg, "early")) {
bba7a484 1575 arg_resolve_names = 1;
090be865 1576 } else if (streq(optarg, "late")) {
bba7a484 1577 arg_resolve_names = 0;
090be865 1578 } else if (streq(optarg, "never")) {
bba7a484 1579 arg_resolve_names = -1;
912541b0 1580 } else {
9f6445e3 1581 log_error("resolve-names must be early, late or never");
bba7a484 1582 return 0;
912541b0
KS
1583 }
1584 break;
1585 case 'h':
37ec0fdd 1586 return help();
912541b0 1587 case 'V':
948aaa7c 1588 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1589 return 0;
1590 case '?':
1591 return -EINVAL;
912541b0 1592 default:
bba7a484
TG
1593 assert_not_reached("Unhandled option");
1594
912541b0
KS
1595 }
1596 }
1597
bba7a484
TG
1598 return 1;
1599}
1600
b7f74dd4 1601static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1602 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1603 int r, fd_worker;
c0c6806b
TG
1604
1605 assert(ret);
11b1dd8c
TG
1606 assert(fd_ctrl >= 0);
1607 assert(fd_uevent >= 0);
c0c6806b
TG
1608
1609 manager = new0(Manager, 1);
1610 if (!manager)
1611 return log_oom();
1612
e237d8cb
TG
1613 manager->fd_inotify = -1;
1614 manager->worker_watch[WRITE_END] = -1;
1615 manager->worker_watch[READ_END] = -1;
1616
2024ed61 1617 udev_builtin_init();
b2d21d93 1618
2024ed61 1619 manager->rules = udev_rules_new(arg_resolve_names);
ecb17862
TG
1620 if (!manager->rules)
1621 return log_error_errno(ENOMEM, "error reading rules");
1622
40a57716 1623 LIST_HEAD_INIT(manager->events);
ecb17862 1624
c26d1879
TG
1625 manager->cgroup = cgroup;
1626
2024ed61 1627 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1628 if (!manager->ctrl)
1629 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1630
2024ed61 1631 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1632 if (!manager->monitor)
1633 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1634
1635 /* unnamed socket from workers to the main daemon */
1636 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1637 if (r < 0)
1638 return log_error_errno(errno, "error creating socketpair: %m");
1639
693d371d 1640 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1641
2ff48e98 1642 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1643 if (r < 0)
2ff48e98 1644 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1645
b7759e04
YW
1646 r = udev_watch_init();
1647 if (r < 0)
1648 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1649 manager->fd_inotify = r;
e237d8cb 1650
2024ed61 1651 udev_watch_restore();
e237d8cb
TG
1652
1653 /* block and listen to all signals on signalfd */
72c0a2c2 1654 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1655
49f997f3
TG
1656 r = sd_event_default(&manager->event);
1657 if (r < 0)
709f6e46 1658 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1659
693d371d
TG
1660 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1661 if (r < 0)
1662 return log_error_errno(r, "error creating sigint event source: %m");
1663
1664 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1665 if (r < 0)
1666 return log_error_errno(r, "error creating sigterm event source: %m");
1667
1668 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1669 if (r < 0)
1670 return log_error_errno(r, "error creating sighup event source: %m");
1671
1672 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1673 if (r < 0)
1674 return log_error_errno(r, "error creating sigchld event source: %m");
1675
1676 r = sd_event_set_watchdog(manager->event, true);
1677 if (r < 0)
1678 return log_error_errno(r, "error creating watchdog event source: %m");
1679
11b1dd8c 1680 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1681 if (r < 0)
1682 return log_error_errno(r, "error creating ctrl event source: %m");
1683
1684 /* This needs to be after the inotify and uevent handling, to make sure
1685 * that the ping is send back after fully processing the pending uevents
1686 * (including the synthetic ones we may create due to inotify events).
1687 */
1688 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1689 if (r < 0)
1690 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1691
1692 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1693 if (r < 0)
1694 return log_error_errno(r, "error creating inotify event source: %m");
1695
11b1dd8c 1696 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1697 if (r < 0)
1698 return log_error_errno(r, "error creating uevent event source: %m");
1699
1700 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1701 if (r < 0)
1702 return log_error_errno(r, "error creating worker event source: %m");
1703
1704 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1705 if (r < 0)
1706 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1707
1cc6c93a 1708 *ret = TAKE_PTR(manager);
11b1dd8c 1709
86c3bece 1710 return 0;
c0c6806b
TG
1711}
1712
077fc5e2 1713static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1714 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1715 int r;
1716
1717 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1718 if (r < 0) {
1719 r = log_error_errno(r, "failed to allocate manager object: %m");
1720 goto exit;
1721 }
1722
1723 r = udev_rules_apply_static_dev_perms(manager->rules);
1724 if (r < 0)
1725 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1726
1ef72b55
MS
1727 (void) sd_notifyf(false,
1728 "READY=1\n"
1729 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1730
1731 r = sd_event_loop(manager->event);
1732 if (r < 0) {
1733 log_error_errno(r, "event loop failed: %m");
1734 goto exit;
1735 }
1736
1737 sd_event_get_exit_code(manager->event, &r);
1738
1739exit:
1740 sd_notify(false,
1741 "STOPPING=1\n"
1742 "STATUS=Shutting down...");
1743 if (manager)
1744 udev_ctrl_cleanup(manager->ctrl);
1745 return r;
1746}
1747
1748int main(int argc, char *argv[]) {
c26d1879 1749 _cleanup_free_ char *cgroup = NULL;
efa1606e 1750 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1751 int r;
bba7a484 1752
bba7a484 1753 log_set_target(LOG_TARGET_AUTO);
b237a168 1754 udev_parse_config();
bba7a484
TG
1755 log_parse_environment();
1756 log_open();
1757
bba7a484
TG
1758 r = parse_argv(argc, argv);
1759 if (r <= 0)
1760 goto exit;
1761
1d84ad94 1762 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1763 if (r < 0)
1764 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1765
78d3e041
KS
1766 if (arg_debug) {
1767 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1768 log_set_max_level(LOG_DEBUG);
78d3e041 1769 }
bba7a484 1770
6174a243
YW
1771 log_set_max_level_realm(LOG_REALM_SYSTEMD, log_get_max_level());
1772
fba868fa
LP
1773 r = must_be_root();
1774 if (r < 0)
912541b0 1775 goto exit;
912541b0 1776
712cebf1
TG
1777 if (arg_children_max == 0) {
1778 cpu_set_t cpu_set;
e438c57a 1779 unsigned long mem_limit;
ebc164ef 1780
712cebf1 1781 arg_children_max = 8;
d457ff83 1782
ece174c5 1783 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
faae64fa 1784 arg_children_max += CPU_COUNT(&cpu_set) * 8;
912541b0 1785
e438c57a
MW
1786 mem_limit = physical_memory() / (128LU*1024*1024);
1787 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1788
712cebf1 1789 log_debug("set children_max to %u", arg_children_max);
d457ff83 1790 }
912541b0 1791
712cebf1
TG
1792 /* set umask before creating any file/directory */
1793 r = chdir("/");
1794 if (r < 0) {
1795 r = log_error_errno(errno, "could not change dir to /: %m");
1796 goto exit;
1797 }
194bbe33 1798
712cebf1 1799 umask(022);
912541b0 1800
c3dacc8b 1801 r = mac_selinux_init();
712cebf1
TG
1802 if (r < 0) {
1803 log_error_errno(r, "could not initialize labelling: %m");
1804 goto exit;
912541b0
KS
1805 }
1806
dae8b82e
ZJS
1807 r = mkdir_errno_wrapper("/run/udev", 0755);
1808 if (r < 0 && r != -EEXIST) {
1809 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1810 goto exit;
1811 }
1812
03cfe0d5 1813 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1814
c26d1879
TG
1815 if (getppid() == 1) {
1816 /* get our own cgroup, we regularly kill everything udev has left behind
1817 we only do this on systemd systems, and only if we are directly spawned
1818 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1819 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1820 if (r < 0) {
a2d61f07 1821 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1822 log_debug_errno(r, "did not find dedicated cgroup: %m");
1823 else
1824 log_warning_errno(r, "failed to get cgroup: %m");
1825 }
c26d1879
TG
1826 }
1827
b7f74dd4
TG
1828 r = listen_fds(&fd_ctrl, &fd_uevent);
1829 if (r < 0) {
1830 r = log_error_errno(r, "could not listen on fds: %m");
1831 goto exit;
1832 }
1833
bba7a484 1834 if (arg_daemonize) {
912541b0 1835 pid_t pid;
912541b0 1836
948aaa7c 1837 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1838
40e749b5 1839 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1840 if (log_get_max_level() < LOG_DEBUG) {
1841 r = make_null_stdio();
1842 if (r < 0)
1843 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1844 }
1845
912541b0
KS
1846 pid = fork();
1847 switch (pid) {
1848 case 0:
1849 break;
1850 case -1:
6af5e6a4 1851 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1852 goto exit;
1853 default:
f53d1fcd
TG
1854 mac_selinux_finish();
1855 log_close();
1856 _exit(EXIT_SUCCESS);
912541b0
KS
1857 }
1858
1859 setsid();
1860
76cdddfb
YW
1861 r = set_oom_score_adjust(-1000);
1862 if (r < 0)
1863 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1864 }
912541b0 1865
077fc5e2 1866 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1867
53921bfa 1868exit:
cc56fafe 1869 mac_selinux_finish();
baa30fbc 1870 log_close();
6af5e6a4 1871 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1872}