]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udev: handle sd_is_socket() failure
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
46f0fbd8 54#include "syslog-util.h"
618234a5 55#include "terminal-util.h"
07a26e42 56#include "udev-builtin.h"
7d68eb1b 57#include "udev-ctrl.h"
618234a5 58#include "udev-util.h"
70068602 59#include "udev-watch.h"
618234a5 60#include "udev.h"
ee104e11 61#include "user-util.h"
7fafc032 62
bba7a484
TG
63static bool arg_debug = false;
64static int arg_daemonize = false;
c4d44cba 65static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
216e8bbe 66static unsigned arg_children_max = 0;
6b92f429 67static usec_t arg_exec_delay_usec = 0;
bba7a484
TG
68static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
69static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
70
71typedef struct Manager {
693d371d 72 sd_event *event;
c0c6806b 73 Hashmap *workers;
40a57716 74 LIST_HEAD(struct event, events);
c26d1879 75 const char *cgroup;
cb49a4f2 76 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 77
ecb17862 78 struct udev_rules *rules;
9b5150b6 79 Hashmap *properties;
c0c6806b
TG
80
81 struct udev_monitor *monitor;
82 struct udev_ctrl *ctrl;
83 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 84 int fd_inotify;
e237d8cb
TG
85 int worker_watch[2];
86
693d371d
TG
87 sd_event_source *ctrl_event;
88 sd_event_source *uevent_event;
89 sd_event_source *inotify_event;
eca195ec 90 sd_event_source *kill_workers_event;
693d371d 91
7c4c7e89
TG
92 usec_t last_usec;
93
c0c6806b 94 bool stop_exec_queue:1;
c0c6806b
TG
95 bool exit:1;
96} Manager;
1e03b754 97
1e03b754 98enum event_state {
912541b0
KS
99 EVENT_UNDEF,
100 EVENT_QUEUED,
101 EVENT_RUNNING,
1e03b754
KS
102};
103
104struct event {
40a57716 105 LIST_FIELDS(struct event, event);
cb49a4f2 106 Manager *manager;
912541b0 107 struct udev_device *dev;
6969c349 108 struct udev_device *dev_kernel;
c6aa11f2 109 struct worker *worker;
912541b0 110 enum event_state state;
912541b0
KS
111 unsigned long long int delaying_seqnum;
112 unsigned long long int seqnum;
113 const char *devpath;
114 size_t devpath_len;
115 const char *devpath_old;
116 dev_t devnum;
912541b0 117 int ifindex;
ea6039a3 118 bool is_block;
693d371d
TG
119 sd_event_source *timeout_warning;
120 sd_event_source *timeout;
1e03b754
KS
121};
122
ecb17862 123static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 124
1e03b754 125enum worker_state {
912541b0
KS
126 WORKER_UNDEF,
127 WORKER_RUNNING,
128 WORKER_IDLE,
129 WORKER_KILLED,
1e03b754
KS
130};
131
132struct worker {
c0c6806b 133 Manager *manager;
912541b0
KS
134 pid_t pid;
135 struct udev_monitor *monitor;
136 enum worker_state state;
137 struct event *event;
1e03b754
KS
138};
139
140/* passed from worker to main process */
141struct worker_message {
1e03b754
KS
142};
143
c6aa11f2 144static void event_free(struct event *event) {
cb49a4f2
TG
145 int r;
146
c6aa11f2
TG
147 if (!event)
148 return;
40a57716 149 assert(event->manager);
c6aa11f2 150
40a57716 151 LIST_REMOVE(event, event->manager->events, event);
912541b0 152 udev_device_unref(event->dev);
6969c349 153 udev_device_unref(event->dev_kernel);
c6aa11f2 154
693d371d
TG
155 sd_event_source_unref(event->timeout_warning);
156 sd_event_source_unref(event->timeout);
157
c6aa11f2
TG
158 if (event->worker)
159 event->worker->event = NULL;
160
40a57716 161 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 162 /* only clean up the queue from the process that created it */
df0ff127 163 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
164 r = unlink("/run/udev/queue");
165 if (r < 0)
166 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
167 }
168 }
169
912541b0 170 free(event);
aa8734ff 171}
7a770250 172
c6aa11f2
TG
173static void worker_free(struct worker *worker) {
174 if (!worker)
175 return;
bc113de9 176
c0c6806b
TG
177 assert(worker->manager);
178
4a0b58c4 179 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 180 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
181 event_free(worker->event);
182
c6aa11f2 183 free(worker);
ff2c503d
KS
184}
185
c0c6806b 186static void manager_workers_free(Manager *manager) {
a505965d
TG
187 struct worker *worker;
188 Iterator i;
ff2c503d 189
c0c6806b
TG
190 assert(manager);
191
192 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 193 worker_free(worker);
a505965d 194
c0c6806b 195 manager->workers = hashmap_free(manager->workers);
fc465079
KS
196}
197
c0c6806b 198static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
199 _cleanup_free_ struct worker *worker = NULL;
200 int r;
3a19b32a
TG
201
202 assert(ret);
c0c6806b 203 assert(manager);
3a19b32a
TG
204 assert(worker_monitor);
205 assert(pid > 1);
206
207 worker = new0(struct worker, 1);
208 if (!worker)
209 return -ENOMEM;
210
c0c6806b 211 worker->manager = manager;
3a19b32a
TG
212 /* close monitor, but keep address around */
213 udev_monitor_disconnect(worker_monitor);
214 worker->monitor = udev_monitor_ref(worker_monitor);
215 worker->pid = pid;
a505965d 216
c0c6806b 217 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
218 if (r < 0)
219 return r;
220
4a0b58c4 221 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
222 if (r < 0)
223 return r;
224
ae2a15bc 225 *ret = TAKE_PTR(worker);
3a19b32a
TG
226
227 return 0;
228}
229
4fa4d885
TG
230static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
231 struct event *event = userdata;
232
233 assert(event);
234 assert(event->worker);
235
236 kill_and_sigcont(event->worker->pid, SIGKILL);
237 event->worker->state = WORKER_KILLED;
238
239 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
240
241 return 1;
242}
243
244static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
245 struct event *event = userdata;
246
247 assert(event);
248
249 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
250
251 return 1;
252}
253
39c19cf1 254static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
255 sd_event *e;
256 uint64_t usec;
693d371d 257
c6aa11f2 258 assert(worker);
693d371d 259 assert(worker->manager);
c6aa11f2
TG
260 assert(event);
261 assert(!event->worker);
262 assert(!worker->event);
263
39c19cf1 264 worker->state = WORKER_RUNNING;
39c19cf1
TG
265 worker->event = event;
266 event->state = EVENT_RUNNING;
c6aa11f2 267 event->worker = worker;
693d371d
TG
268
269 e = worker->manager->event;
270
3285baa8 271 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 272
3285baa8 273 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
274 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
275
3285baa8 276 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 277 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
278}
279
e237d8cb
TG
280static void manager_free(Manager *manager) {
281 if (!manager)
282 return;
283
2024ed61 284 udev_builtin_exit();
b2d21d93 285
693d371d
TG
286 sd_event_source_unref(manager->ctrl_event);
287 sd_event_source_unref(manager->uevent_event);
288 sd_event_source_unref(manager->inotify_event);
eca195ec 289 sd_event_source_unref(manager->kill_workers_event);
693d371d 290
693d371d 291 sd_event_unref(manager->event);
e237d8cb
TG
292 manager_workers_free(manager);
293 event_queue_cleanup(manager, EVENT_UNDEF);
294
295 udev_monitor_unref(manager->monitor);
296 udev_ctrl_unref(manager->ctrl);
297 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
298
9b5150b6 299 hashmap_free_free_free(manager->properties);
e237d8cb 300 udev_rules_unref(manager->rules);
e237d8cb 301
e237d8cb
TG
302 safe_close(manager->fd_inotify);
303 safe_close_pair(manager->worker_watch);
304
305 free(manager);
306}
307
308DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
309
9a73bd7c
TG
310static int worker_send_message(int fd) {
311 struct worker_message message = {};
312
313 return loop_write(fd, &message, sizeof(message), false);
314}
315
fee854ee
RK
316static bool shall_lock_device(struct udev_device *dev) {
317 const char *sysname;
318
319 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
320 return false;
321
322 sysname = udev_device_get_sysname(dev);
323 return !startswith(sysname, "dm-") &&
324 !startswith(sysname, "md") &&
325 !startswith(sysname, "drbd");
326}
327
c0c6806b 328static void worker_spawn(Manager *manager, struct event *event) {
8e766630 329 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 330 pid_t pid;
b6aab8ef 331 int r = 0;
912541b0
KS
332
333 /* listen for new events */
2024ed61 334 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
335 if (worker_monitor == NULL)
336 return;
337 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 338 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
339 r = udev_monitor_enable_receiving(worker_monitor);
340 if (r < 0)
341 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 342
912541b0
KS
343 pid = fork();
344 switch (pid) {
345 case 0: {
cf28ad46 346 _cleanup_(udev_device_unrefp) struct udev_device *dev = NULL;
4afd3348 347 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 348 int fd_monitor;
e237d8cb 349 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
350 struct epoll_event ep_signal = { .events = EPOLLIN };
351 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 352 sigset_t mask;
912541b0 353
43095991 354 /* take initial device from queue */
1cc6c93a 355 dev = TAKE_PTR(event->dev);
912541b0 356
39fd2ca1
TG
357 unsetenv("NOTIFY_SOCKET");
358
c0c6806b 359 manager_workers_free(manager);
ecb17862 360 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 361
e237d8cb 362 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 363 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 364 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 365 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 366
693d371d
TG
367 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
368 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
369 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
eca195ec 370 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
693d371d
TG
371
372 manager->event = sd_event_unref(manager->event);
373
912541b0
KS
374 sigfillset(&mask);
375 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
376 if (fd_signal < 0) {
6af5e6a4 377 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
378 goto out;
379 }
2dd9f98d
TG
380 ep_signal.data.fd = fd_signal;
381
382 fd_monitor = udev_monitor_get_fd(worker_monitor);
383 ep_monitor.data.fd = fd_monitor;
912541b0
KS
384
385 fd_ep = epoll_create1(EPOLL_CLOEXEC);
386 if (fd_ep < 0) {
6af5e6a4 387 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
388 goto out;
389 }
390
912541b0
KS
391 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
392 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 393 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
394 goto out;
395 }
396
045e00cf
ZJS
397 /* Request TERM signal if parent exits.
398 Ignore error, not much we can do in that case. */
399 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 400
045e00cf 401 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
402 r = set_oom_score_adjust(0);
403 if (r < 0)
404 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 405
912541b0 406 for (;;) {
c1118ceb 407 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 408 int fd_lock = -1;
912541b0 409
3b64e4d4
TG
410 assert(dev);
411
9f6445e3 412 log_debug("seq %llu running", udev_device_get_seqnum(dev));
6b92f429 413 udev_event = udev_event_new(dev->device, arg_exec_delay_usec, rtnl);
0f86dc90 414 if (!udev_event) {
6af5e6a4 415 r = -ENOMEM;
912541b0
KS
416 goto out;
417 }
418
3ebdb81e 419 /*
2e5b17d0 420 * Take a shared lock on the device node; this establishes
3ebdb81e 421 * a concept of device "ownership" to serialize device
2e5b17d0 422 * access. External processes holding an exclusive lock will
3ebdb81e 423 * cause udev to skip the event handling; in the case udev
2e5b17d0 424 * acquired the lock, the external process can block until
3ebdb81e
KS
425 * udev has finished its event handling.
426 */
2e5b17d0 427 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 428 shall_lock_device(dev)) {
3ebdb81e
KS
429 struct udev_device *d = dev;
430
431 if (streq_ptr("partition", udev_device_get_devtype(d)))
432 d = udev_device_get_parent(d);
433
434 if (d) {
435 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
436 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 437 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 438 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
439 goto skip;
440 }
441 }
442 }
443
912541b0 444 /* apply rules, create node, symlinks */
adeba500
KS
445 udev_event_execute_rules(udev_event,
446 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 447 manager->properties,
8314de1d 448 manager->rules);
adeba500
KS
449
450 udev_event_execute_run(udev_event,
8314de1d 451 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 452
e0bb2ff9 453 if (!rtnl)
523c620b 454 /* in case rtnl was initialized */
1c4baffc 455 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 456
912541b0 457 /* apply/restore inotify watch */
bf9bead1 458 if (udev_event->inotify_watch) {
70068602 459 udev_watch_begin(dev->device);
912541b0
KS
460 udev_device_update_db(dev);
461 }
462
3d06f418 463 safe_close(fd_lock);
3ebdb81e 464
912541b0
KS
465 /* send processed event back to libudev listeners */
466 udev_monitor_send_device(worker_monitor, NULL, dev);
467
3ebdb81e 468skip:
4914cb2d 469 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 470
912541b0 471 /* send udevd the result of the event execution */
e237d8cb 472 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 473 if (r < 0)
9a73bd7c 474 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 475 udev_device_get_seqnum(dev));
912541b0 476
cf28ad46 477 dev = udev_device_unref(dev);
912541b0 478
912541b0
KS
479 /* wait for more device messages from main udevd, or term signal */
480 while (dev == NULL) {
481 struct epoll_event ev[4];
482 int fdcount;
483 int i;
484
8fef0ff2 485 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
486 if (fdcount < 0) {
487 if (errno == EINTR)
488 continue;
6af5e6a4 489 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
490 goto out;
491 }
492
493 for (i = 0; i < fdcount; i++) {
494 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
495 dev = udev_monitor_receive_device(worker_monitor);
496 break;
497 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
498 struct signalfd_siginfo fdsi;
499 ssize_t size;
500
501 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
502 if (size != sizeof(struct signalfd_siginfo))
503 continue;
504 switch (fdsi.ssi_signo) {
505 case SIGTERM:
506 goto out;
507 }
508 }
509 }
510 }
511 }
82063a88 512out:
912541b0 513 udev_device_unref(dev);
e237d8cb 514 manager_free(manager);
baa30fbc 515 log_close();
8b46c3fc 516 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
517 }
518 case -1:
912541b0 519 event->state = EVENT_QUEUED;
56f64d95 520 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
521 break;
522 default:
e03c7cc2
TG
523 {
524 struct worker *worker;
525
c0c6806b 526 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 527 if (r < 0)
e03c7cc2 528 return;
e03c7cc2 529
39c19cf1
TG
530 worker_attach_event(worker, event);
531
1fa2f38f 532 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
533 break;
534 }
e03c7cc2 535 }
7fafc032
KS
536}
537
c0c6806b 538static void event_run(Manager *manager, struct event *event) {
a505965d
TG
539 struct worker *worker;
540 Iterator i;
912541b0 541
c0c6806b
TG
542 assert(manager);
543 assert(event);
544
545 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
546 ssize_t count;
547
548 if (worker->state != WORKER_IDLE)
549 continue;
550
c0c6806b 551 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 552 if (count < 0) {
1fa2f38f
ZJS
553 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
554 worker->pid, count);
cb542e84 555 (void) kill(worker->pid, SIGKILL);
912541b0
KS
556 worker->state = WORKER_KILLED;
557 continue;
558 }
39c19cf1 559 worker_attach_event(worker, event);
912541b0
KS
560 return;
561 }
562
c0c6806b 563 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 564 if (arg_children_max > 1)
c0c6806b 565 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
566 return;
567 }
568
569 /* start new worker and pass initial device */
c0c6806b 570 worker_spawn(manager, event);
1e03b754
KS
571}
572
ecb17862 573static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 574 struct event *event;
cb49a4f2 575 int r;
912541b0 576
ecb17862
TG
577 assert(manager);
578 assert(dev);
579
040e6896
TG
580 /* only one process can add events to the queue */
581 if (manager->pid == 0)
df0ff127 582 manager->pid = getpid_cached();
040e6896 583
df0ff127 584 assert(manager->pid == getpid_cached());
cb49a4f2 585
955d98c9 586 event = new0(struct event, 1);
cb49a4f2
TG
587 if (!event)
588 return -ENOMEM;
912541b0 589
cb49a4f2 590 event->manager = manager;
912541b0 591 event->dev = dev;
6969c349
TG
592 event->dev_kernel = udev_device_shallow_clone(dev);
593 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
594 event->seqnum = udev_device_get_seqnum(dev);
595 event->devpath = udev_device_get_devpath(dev);
596 event->devpath_len = strlen(event->devpath);
597 event->devpath_old = udev_device_get_devpath_old(dev);
598 event->devnum = udev_device_get_devnum(dev);
ea6039a3 599 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
600 event->ifindex = udev_device_get_ifindex(dev);
601
9f6445e3 602 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
603 udev_device_get_action(dev), udev_device_get_subsystem(dev));
604
605 event->state = EVENT_QUEUED;
cb49a4f2 606
40a57716 607 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
608 r = touch("/run/udev/queue");
609 if (r < 0)
610 log_warning_errno(r, "could not touch /run/udev/queue: %m");
611 }
612
40a57716 613 LIST_APPEND(event, manager->events, event);
cb49a4f2 614
912541b0 615 return 0;
fc465079
KS
616}
617
c0c6806b 618static void manager_kill_workers(Manager *manager) {
a505965d
TG
619 struct worker *worker;
620 Iterator i;
1e03b754 621
c0c6806b
TG
622 assert(manager);
623
624 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
625 if (worker->state == WORKER_KILLED)
626 continue;
1e03b754 627
912541b0 628 worker->state = WORKER_KILLED;
cb542e84 629 (void) kill(worker->pid, SIGTERM);
912541b0 630 }
1e03b754
KS
631}
632
e3196993 633/* lookup event for identical, parent, child device */
ecb17862 634static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 635 struct event *loop_event;
912541b0
KS
636 size_t common;
637
638 /* check if queue contains events we depend on */
40a57716 639 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 640 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
641 if (loop_event->seqnum < event->delaying_seqnum)
642 continue;
643
644 /* event we checked earlier still exists, no need to check again */
645 if (loop_event->seqnum == event->delaying_seqnum)
646 return true;
647
648 /* found ourself, no later event can block us */
649 if (loop_event->seqnum >= event->seqnum)
650 break;
651
652 /* check major/minor */
653 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
654 return true;
655
656 /* check network device ifindex */
edc81c1c 657 if (event->ifindex > 0 && event->ifindex == loop_event->ifindex)
912541b0
KS
658 return true;
659
660 /* check our old name */
edc81c1c 661 if (event->devpath_old && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
662 event->delaying_seqnum = loop_event->seqnum;
663 return true;
664 }
665
666 /* compare devpath */
667 common = MIN(loop_event->devpath_len, event->devpath_len);
668
669 /* one devpath is contained in the other? */
670 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
671 continue;
672
673 /* identical device event found */
674 if (loop_event->devpath_len == event->devpath_len) {
675 /* devices names might have changed/swapped in the meantime */
edc81c1c 676 if (major(event->devnum) != 0 || event->ifindex > 0)
912541b0
KS
677 continue;
678 event->delaying_seqnum = loop_event->seqnum;
679 return true;
680 }
681
682 /* parent device event found */
683 if (event->devpath[common] == '/') {
684 event->delaying_seqnum = loop_event->seqnum;
685 return true;
686 }
687
688 /* child device event found */
689 if (loop_event->devpath[common] == '/') {
690 event->delaying_seqnum = loop_event->seqnum;
691 return true;
692 }
912541b0
KS
693 }
694
695 return false;
7fafc032
KS
696}
697
693d371d
TG
698static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
699 Manager *manager = userdata;
700
701 assert(manager);
702
703 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
704
705 sd_event_exit(manager->event, -ETIMEDOUT);
706
707 return 1;
708}
709
62d43dac 710static void manager_exit(Manager *manager) {
693d371d
TG
711 uint64_t usec;
712 int r;
62d43dac
TG
713
714 assert(manager);
715
716 manager->exit = true;
717
b79aacbf
TG
718 sd_notify(false,
719 "STOPPING=1\n"
720 "STATUS=Starting shutdown...");
721
62d43dac 722 /* close sources of new events and discard buffered events */
693d371d 723 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 724 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 725
693d371d 726 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 727 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 728
693d371d 729 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 730 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
731
732 /* discard queued events and kill workers */
733 event_queue_cleanup(manager, EVENT_QUEUED);
734 manager_kill_workers(manager);
693d371d 735
3285baa8 736 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 737
3285baa8 738 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
739 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
740 if (r < 0)
741 return;
62d43dac
TG
742}
743
744/* reload requested, HUP signal received, rules changed, builtin changed */
745static void manager_reload(Manager *manager) {
746
747 assert(manager);
748
b79aacbf
TG
749 sd_notify(false,
750 "RELOADING=1\n"
751 "STATUS=Flushing configuration...");
752
62d43dac
TG
753 manager_kill_workers(manager);
754 manager->rules = udev_rules_unref(manager->rules);
2024ed61 755 udev_builtin_exit();
b79aacbf 756
1ef72b55
MS
757 sd_notifyf(false,
758 "READY=1\n"
759 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
760}
761
eca195ec
YW
762static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
763 Manager *manager = userdata;
764
765 assert(manager);
766
767 log_debug("Cleanup idle workers");
768 manager_kill_workers(manager);
769
770 return 1;
771}
772
773static int manager_enable_kill_workers_event(Manager *manager) {
774 int enabled, r;
775
776 assert(manager);
777
778 if (!manager->kill_workers_event)
779 goto create_new;
780
781 r = sd_event_source_get_enabled(manager->kill_workers_event, &enabled);
782 if (r < 0) {
783 log_debug_errno(r, "Failed to query whether event source for killing idle workers is enabled or not, trying to create new event source: %m");
784 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
785 goto create_new;
786 }
787
788 if (enabled == SD_EVENT_ONESHOT)
789 return 0;
790
791 r = sd_event_source_set_time(manager->kill_workers_event, now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC);
792 if (r < 0) {
793 log_debug_errno(r, "Failed to set time to event source for killing idle workers, trying to create new event source: %m");
794 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
795 goto create_new;
796 }
797
798 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_ONESHOT);
799 if (r < 0) {
800 log_debug_errno(r, "Failed to enable event source for killing idle workers, trying to create new event source: %m");
801 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
802 goto create_new;
803 }
804
805 return 0;
806
807create_new:
808 r = sd_event_add_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
809 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC, on_kill_workers_event, manager);
810 if (r < 0)
811 return log_warning_errno(r, "Failed to create timer event for killing idle workers: %m");
812
813 return 0;
814}
815
816static int manager_disable_kill_workers_event(Manager *manager) {
817 int r;
818
819 if (!manager->kill_workers_event)
820 return 0;
821
822 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
823 if (r < 0)
824 return log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
825
826 return 0;
827}
828
c0c6806b 829static void event_queue_start(Manager *manager) {
40a57716 830 struct event *event;
693d371d 831 usec_t usec;
8ab44e3f 832
c0c6806b
TG
833 assert(manager);
834
40a57716 835 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
836 manager->exit || manager->stop_exec_queue)
837 return;
838
3285baa8 839 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
840 /* check for changed config, every 3 seconds at most */
841 if (manager->last_usec == 0 ||
842 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
843 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 844 udev_builtin_validate())
38a03f06 845 manager_reload(manager);
693d371d 846
38a03f06 847 manager->last_usec = usec;
7c4c7e89
TG
848 }
849
eca195ec
YW
850 (void) manager_disable_kill_workers_event(manager);
851
2024ed61 852 udev_builtin_init();
7c4c7e89
TG
853
854 if (!manager->rules) {
c4d44cba 855 manager->rules = udev_rules_new(arg_resolve_name_timing);
7c4c7e89
TG
856 if (!manager->rules)
857 return;
858 }
859
40a57716 860 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
861 if (event->state != EVENT_QUEUED)
862 continue;
0bc74ea7 863
912541b0 864 /* do not start event if parent or child event is still running */
ecb17862 865 if (is_devpath_busy(manager, event))
912541b0 866 continue;
fc465079 867
c0c6806b 868 event_run(manager, event);
912541b0 869 }
1e03b754
KS
870}
871
ecb17862 872static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 873 struct event *event, *tmp;
ff2c503d 874
40a57716 875 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
876 if (match_type != EVENT_UNDEF && match_type != event->state)
877 continue;
ff2c503d 878
c6aa11f2 879 event_free(event);
912541b0 880 }
ff2c503d
KS
881}
882
e82e8fa5 883static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
884 Manager *manager = userdata;
885
886 assert(manager);
887
912541b0
KS
888 for (;;) {
889 struct worker_message msg;
979558f3
TG
890 struct iovec iovec = {
891 .iov_base = &msg,
892 .iov_len = sizeof(msg),
893 };
894 union {
895 struct cmsghdr cmsghdr;
896 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
897 } control = {};
898 struct msghdr msghdr = {
899 .msg_iov = &iovec,
900 .msg_iovlen = 1,
901 .msg_control = &control,
902 .msg_controllen = sizeof(control),
903 };
904 struct cmsghdr *cmsg;
912541b0 905 ssize_t size;
979558f3 906 struct ucred *ucred = NULL;
a505965d 907 struct worker *worker;
912541b0 908
e82e8fa5 909 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 910 if (size < 0) {
738a7907
TG
911 if (errno == EINTR)
912 continue;
913 else if (errno == EAGAIN)
914 /* nothing more to read */
915 break;
979558f3 916
e82e8fa5 917 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
918 } else if (size != sizeof(struct worker_message)) {
919 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 920 continue;
979558f3
TG
921 }
922
2a1288ff 923 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
924 if (cmsg->cmsg_level == SOL_SOCKET &&
925 cmsg->cmsg_type == SCM_CREDENTIALS &&
926 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
927 ucred = (struct ucred*) CMSG_DATA(cmsg);
928 }
929
930 if (!ucred || ucred->pid <= 0) {
931 log_warning_errno(EIO, "ignoring worker message without valid PID");
932 continue;
933 }
912541b0
KS
934
935 /* lookup worker who sent the signal */
4a0b58c4 936 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
937 if (!worker) {
938 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
939 continue;
912541b0 940 }
c0bbfd72 941
a505965d
TG
942 if (worker->state != WORKER_KILLED)
943 worker->state = WORKER_IDLE;
944
945 /* worker returned */
946 event_free(worker->event);
912541b0 947 }
e82e8fa5 948
8302fe5a
TG
949 /* we have free workers, try to schedule events */
950 event_queue_start(manager);
951
e82e8fa5
TG
952 return 1;
953}
954
955static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 956 Manager *manager = userdata;
e82e8fa5
TG
957 struct udev_device *dev;
958 int r;
959
c0c6806b 960 assert(manager);
e82e8fa5 961
c0c6806b 962 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
963 if (dev) {
964 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 965 r = event_queue_insert(manager, dev);
e82e8fa5
TG
966 if (r < 0)
967 udev_device_unref(dev);
8302fe5a
TG
968 else
969 /* we have fresh events, try to schedule them */
970 event_queue_start(manager);
e82e8fa5
TG
971 }
972
973 return 1;
88f4b648
KS
974}
975
3b47c739 976/* receive the udevd message from userspace */
e82e8fa5 977static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 978 Manager *manager = userdata;
8e766630
LP
979 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
980 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 981 const char *str;
9b5150b6 982 int i, r;
912541b0 983
c0c6806b 984 assert(manager);
e4f66b77 985
c0c6806b 986 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 987 if (!ctrl_conn)
e82e8fa5 988 return 1;
912541b0
KS
989
990 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 991 if (!ctrl_msg)
e82e8fa5 992 return 1;
912541b0
KS
993
994 i = udev_ctrl_get_set_log_level(ctrl_msg);
995 if (i >= 0) {
ed14edc0 996 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 997 log_set_max_level(i);
c0c6806b 998 manager_kill_workers(manager);
912541b0
KS
999 }
1000
1001 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 1002 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 1003 manager->stop_exec_queue = true;
912541b0
KS
1004 }
1005
1006 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 1007 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 1008 manager->stop_exec_queue = false;
8302fe5a 1009 event_queue_start(manager);
912541b0
KS
1010 }
1011
1012 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 1013 log_debug("udevd message (RELOAD) received");
62d43dac 1014 manager_reload(manager);
912541b0
KS
1015 }
1016
1017 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
1018 if (str) {
1019 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
1020 char *eq;
1021
1022 eq = strchr(str, '=');
1023 if (!eq) {
1024 log_error("Invalid key format '%s'", str);
1025 return 1;
1026 }
1027
1028 key = strndup(str, eq - str);
1029 if (!key) {
1030 log_oom();
1031 return 1;
1032 }
1033
1034 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
1035
1036 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
1037 if (r < 0) {
1038 log_oom();
1039 return 1;
912541b0 1040 }
9b5150b6
YW
1041
1042 eq++;
1043 if (!isempty(eq)) {
1044 log_debug("udevd message (ENV) received, unset '%s'", key);
1045
1046 r = hashmap_put(manager->properties, key, NULL);
1047 if (r < 0) {
1048 log_oom();
1049 return 1;
1050 }
1051 } else {
1052 val = strdup(eq);
1053 if (!val) {
1054 log_oom();
1055 return 1;
1056 }
1057
1058 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
1059
1060 r = hashmap_put(manager->properties, key, val);
1061 if (r < 0) {
1062 log_oom();
1063 return 1;
1064 }
1065 }
1066
1067 key = val = NULL;
c0c6806b 1068 manager_kill_workers(manager);
912541b0
KS
1069 }
1070
1071 i = udev_ctrl_get_set_children_max(ctrl_msg);
1072 if (i >= 0) {
9f6445e3 1073 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1074 arg_children_max = i;
1ef72b55
MS
1075
1076 (void) sd_notifyf(false,
1077 "READY=1\n"
1078 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1079 }
1080
cb49a4f2 1081 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1082 log_debug("udevd message (SYNC) received");
912541b0
KS
1083
1084 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1085 log_debug("udevd message (EXIT) received");
62d43dac 1086 manager_exit(manager);
c0c6806b
TG
1087 /* keep reference to block the client until we exit
1088 TODO: deal with several blocking exit requests */
1089 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1090 }
e4f66b77 1091
e82e8fa5 1092 return 1;
88f4b648 1093}
4a231017 1094
70068602
YW
1095static int synthesize_change(sd_device *dev) {
1096 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1097 char filename[PATH_MAX];
f3a740a5 1098 int r;
edd32000 1099
70068602
YW
1100 r = sd_device_get_subsystem(dev, &subsystem);
1101 if (r < 0)
1102 return r;
1103
1104 r = sd_device_get_sysname(dev, &sysname);
1105 if (r < 0)
1106 return r;
1107
1108 r = sd_device_get_devname(dev, &devname);
1109 if (r < 0)
1110 return r;
1111
1112 r = sd_device_get_syspath(dev, &syspath);
1113 if (r < 0)
1114 return r;
1115
1116 r = sd_device_get_devtype(dev, &devtype);
1117 if (r < 0)
1118 return r;
1119
1120 if (streq_ptr("block", subsystem) &&
1121 streq_ptr("disk", devtype) &&
1122 !startswith(sysname, "dm-")) {
1123 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1124 bool part_table_read = false, has_partitions = false;
1125 sd_device *d;
ede34445 1126 int fd;
f3a740a5 1127
ede34445 1128 /*
e9fc29f4
KS
1129 * Try to re-read the partition table. This only succeeds if
1130 * none of the devices is busy. The kernel returns 0 if no
1131 * partition table is found, and we will not get an event for
1132 * the disk.
ede34445 1133 */
70068602 1134 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1135 if (fd >= 0) {
02ba8fb3
KS
1136 r = flock(fd, LOCK_EX|LOCK_NB);
1137 if (r >= 0)
1138 r = ioctl(fd, BLKRRPART, 0);
1139
ede34445
KS
1140 close(fd);
1141 if (r >= 0)
e9fc29f4 1142 part_table_read = true;
ede34445
KS
1143 }
1144
e9fc29f4 1145 /* search for partitions */
70068602 1146 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1147 if (r < 0)
1148 return r;
1149
70068602 1150 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1151 if (r < 0)
1152 return r;
1153
70068602 1154 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1155 if (r < 0)
1156 return r;
e9fc29f4 1157
70068602
YW
1158 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1159 if (r < 0)
1160 return r;
e9fc29f4 1161
70068602
YW
1162 FOREACH_DEVICE(e, d) {
1163 const char *t;
e9fc29f4 1164
70068602
YW
1165 if (sd_device_get_devtype(d, &t) < 0 ||
1166 !streq("partition", t))
e9fc29f4
KS
1167 continue;
1168
1169 has_partitions = true;
1170 break;
1171 }
1172
1173 /*
1174 * We have partitions and re-read the table, the kernel already sent
1175 * out a "change" event for the disk, and "remove/add" for all
1176 * partitions.
1177 */
1178 if (part_table_read && has_partitions)
1179 return 0;
1180
1181 /*
1182 * We have partitions but re-reading the partition table did not
1183 * work, synthesize "change" for the disk and all partitions.
1184 */
70068602
YW
1185 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1186 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1187 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1188
70068602
YW
1189 FOREACH_DEVICE(e, d) {
1190 const char *t, *n, *s;
f3a740a5 1191
70068602
YW
1192 if (sd_device_get_devtype(d, &t) < 0 ||
1193 !streq("partition", t))
f3a740a5
KS
1194 continue;
1195
70068602
YW
1196 if (sd_device_get_devname(d, &n) < 0 ||
1197 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1198 continue;
1199
70068602
YW
1200 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1201 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1202 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1203 }
ede34445
KS
1204
1205 return 0;
f3a740a5
KS
1206 }
1207
70068602
YW
1208 log_debug("Device %s is closed, synthesising 'change'", devname);
1209 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1210 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1211
f3a740a5 1212 return 0;
edd32000
KS
1213}
1214
e82e8fa5 1215static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1216 Manager *manager = userdata;
0254e944 1217 union inotify_event_buffer buffer;
f7c1ad4f
LP
1218 struct inotify_event *e;
1219 ssize_t l;
912541b0 1220
c0c6806b 1221 assert(manager);
e82e8fa5 1222
eca195ec
YW
1223 (void) manager_disable_kill_workers_event(manager);
1224
e82e8fa5 1225 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1226 if (l < 0) {
3742095b 1227 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1228 return 1;
912541b0 1229
f7c1ad4f 1230 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1231 }
1232
f7c1ad4f 1233 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1234 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1235 const char *devnode;
1236
7fe3324c 1237 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1238 continue;
912541b0 1239
70068602 1240 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1241 continue;
912541b0 1242
7fe3324c 1243 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1244 if (e->mask & IN_CLOSE_WRITE)
edd32000 1245 synthesize_change(dev);
da143134 1246 else if (e->mask & IN_IGNORED)
2024ed61 1247 udev_watch_end(dev);
912541b0
KS
1248 }
1249
e82e8fa5 1250 return 1;
bd284db1
SJR
1251}
1252
0561329d 1253static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1254 Manager *manager = userdata;
1255
1256 assert(manager);
1257
62d43dac 1258 manager_exit(manager);
912541b0 1259
e82e8fa5
TG
1260 return 1;
1261}
912541b0 1262
0561329d 1263static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1264 Manager *manager = userdata;
1265
1266 assert(manager);
1267
62d43dac 1268 manager_reload(manager);
912541b0 1269
e82e8fa5
TG
1270 return 1;
1271}
912541b0 1272
e82e8fa5 1273static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1274 Manager *manager = userdata;
1275
1276 assert(manager);
1277
e82e8fa5
TG
1278 for (;;) {
1279 pid_t pid;
1280 int status;
1281 struct worker *worker;
d1317d02 1282
e82e8fa5
TG
1283 pid = waitpid(-1, &status, WNOHANG);
1284 if (pid <= 0)
f29328d6 1285 break;
e82e8fa5 1286
4a0b58c4 1287 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1288 if (!worker) {
1289 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1290 continue;
912541b0 1291 }
e82e8fa5
TG
1292
1293 if (WIFEXITED(status)) {
1294 if (WEXITSTATUS(status) == 0)
1295 log_debug("worker ["PID_FMT"] exited", pid);
1296 else
1297 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1298 } else if (WIFSIGNALED(status)) {
76341acc 1299 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1300 } else if (WIFSTOPPED(status)) {
1301 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1302 continue;
e82e8fa5
TG
1303 } else if (WIFCONTINUED(status)) {
1304 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1305 continue;
e82e8fa5
TG
1306 } else
1307 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1308
05e6d9c6
YW
1309 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1310 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1311 /* delete state from disk */
1312 udev_device_delete_db(worker->event->dev);
1313 udev_device_tag_index(worker->event->dev, NULL, false);
1314 /* forward kernel event without amending it */
1315 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1316 }
1317
1318 worker_free(worker);
912541b0 1319 }
e82e8fa5 1320
8302fe5a
TG
1321 /* we can start new workers, try to schedule events */
1322 event_queue_start(manager);
1323
eca195ec
YW
1324 /* Disable unnecessary cleanup event */
1325 if (hashmap_isempty(manager->workers) && manager->kill_workers_event)
1326 (void) sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
1327
e82e8fa5 1328 return 1;
f27125f9 1329}
1330
693d371d
TG
1331static int on_post(sd_event_source *s, void *userdata) {
1332 Manager *manager = userdata;
693d371d
TG
1333
1334 assert(manager);
1335
b6107f01
YW
1336 if (!LIST_IS_EMPTY(manager->events))
1337 return 1;
1338
1339 /* There are no pending events. Let's cleanup idle process. */
1340
1341 if (!hashmap_isempty(manager->workers)) {
1342 /* There are idle workers */
eca195ec 1343 (void) manager_enable_kill_workers_event(manager);
b6107f01 1344 return 1;
693d371d
TG
1345 }
1346
b6107f01
YW
1347 /* There are no idle workers. */
1348
1349 if (manager->exit)
1350 return sd_event_exit(manager->event, 0);
1351
1352 if (manager->cgroup)
1353 /* cleanup possible left-over processes in our cgroup */
1354 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1355
693d371d
TG
1356 return 1;
1357}
1358
fcff1e72
TG
1359static int listen_fds(int *rctrl, int *rnetlink) {
1360 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1361 int fd, n, r;
912541b0 1362
fcff1e72
TG
1363 assert(rctrl);
1364 assert(rnetlink);
1365
912541b0 1366 n = sd_listen_fds(true);
fcff1e72
TG
1367 if (n < 0)
1368 return n;
912541b0
KS
1369
1370 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
c52cff07 1371 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1) > 0) {
fcff1e72
TG
1372 if (ctrl_fd >= 0)
1373 return -EINVAL;
1374 ctrl_fd = fd;
912541b0
KS
1375 continue;
1376 }
1377
c52cff07 1378 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
fcff1e72
TG
1379 if (netlink_fd >= 0)
1380 return -EINVAL;
1381 netlink_fd = fd;
912541b0
KS
1382 continue;
1383 }
1384
fcff1e72 1385 return -EINVAL;
912541b0
KS
1386 }
1387
f59118ec 1388 if (ctrl_fd < 0) {
8e766630 1389 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1390
2024ed61 1391 ctrl = udev_ctrl_new();
f59118ec
TG
1392 if (!ctrl)
1393 return log_error_errno(EINVAL, "error initializing udev control socket");
1394
1395 r = udev_ctrl_enable_receiving(ctrl);
1396 if (r < 0)
1397 return log_error_errno(EINVAL, "error binding udev control socket");
1398
1399 fd = udev_ctrl_get_fd(ctrl);
1400 if (fd < 0)
1401 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1402
f59118ec
TG
1403 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1404 if (ctrl_fd < 0)
1405 return log_error_errno(errno, "could not dup ctrl fd: %m");
1406 }
1407
1408 if (netlink_fd < 0) {
8e766630 1409 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1410
2024ed61 1411 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1412 if (!monitor)
1413 return log_error_errno(EINVAL, "error initializing netlink socket");
1414
1415 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1416
1417 r = udev_monitor_enable_receiving(monitor);
1418 if (r < 0)
1419 return log_error_errno(EINVAL, "error binding netlink socket");
1420
1421 fd = udev_monitor_get_fd(monitor);
1422 if (fd < 0)
1423 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1424
1425 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1426 if (netlink_fd < 0)
f59118ec
TG
1427 return log_error_errno(errno, "could not dup netlink fd: %m");
1428 }
fcff1e72
TG
1429
1430 *rctrl = ctrl_fd;
1431 *rnetlink = netlink_fd;
912541b0 1432
912541b0 1433 return 0;
7459bcdc
KS
1434}
1435
e6f86cac 1436/*
3f85ef0f 1437 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1438 * udev.log_priority=<level> syslog priority
1439 * udev.children_max=<number of workers> events are fully serialized if set to 1
1440 * udev.exec_delay=<number of seconds> delay execution of every executed program
1441 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1442 */
96287a49 1443static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1444 int r = 0;
e6f86cac 1445
614a823c 1446 assert(key);
e6f86cac 1447
614a823c
TG
1448 if (!value)
1449 return 0;
e6f86cac 1450
1d84ad94
LP
1451 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1452
1453 if (proc_cmdline_value_missing(key, value))
1454 return 0;
1455
46f0fbd8 1456 r = log_level_from_string(value);
92e72467
ZJS
1457 if (r >= 0)
1458 log_set_max_level(r);
1d84ad94
LP
1459
1460 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1461
1462 if (proc_cmdline_value_missing(key, value))
1463 return 0;
1464
9d9264ba
YW
1465 r = parse_sec(value, &arg_event_timeout_usec);
1466 if (r >= 0)
1467 arg_event_timeout_warn_usec = DIV_ROUND_UP(arg_event_timeout_usec, 3);
1d84ad94
LP
1468
1469 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1470
1471 if (proc_cmdline_value_missing(key, value))
1472 return 0;
1473
020328e1 1474 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1475
1476 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1477
1478 if (proc_cmdline_value_missing(key, value))
1479 return 0;
1480
6b92f429 1481 r = parse_sec(value, &arg_exec_delay_usec);
1d84ad94
LP
1482
1483 } else if (startswith(key, "udev."))
92e72467 1484 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1485
92e72467
ZJS
1486 if (r < 0)
1487 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1488
614a823c 1489 return 0;
e6f86cac
KS
1490}
1491
37ec0fdd
LP
1492static int help(void) {
1493 _cleanup_free_ char *link = NULL;
1494 int r;
1495
1496 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1497 if (r < 0)
1498 return log_oom();
1499
ed216e1f
TG
1500 printf("%s [OPTIONS...]\n\n"
1501 "Manages devices.\n\n"
5ac0162c 1502 " -h --help Print this message\n"
2d19c17e
MF
1503 " -V --version Print version of the program\n"
1504 " -d --daemon Detach and run in the background\n"
1505 " -D --debug Enable debug output\n"
1506 " -c --children-max=INT Set maximum number of workers\n"
1507 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1508 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1509 " -N --resolve-names=early|late|never\n"
5ac0162c 1510 " When to resolve users and groups\n"
37ec0fdd
LP
1511 "\nSee the %s for details.\n"
1512 , program_invocation_short_name
1513 , link
1514 );
1515
1516 return 0;
ed216e1f
TG
1517}
1518
bba7a484 1519static int parse_argv(int argc, char *argv[]) {
912541b0 1520 static const struct option options[] = {
bba7a484
TG
1521 { "daemon", no_argument, NULL, 'd' },
1522 { "debug", no_argument, NULL, 'D' },
1523 { "children-max", required_argument, NULL, 'c' },
1524 { "exec-delay", required_argument, NULL, 'e' },
1525 { "event-timeout", required_argument, NULL, 't' },
1526 { "resolve-names", required_argument, NULL, 'N' },
1527 { "help", no_argument, NULL, 'h' },
1528 { "version", no_argument, NULL, 'V' },
912541b0
KS
1529 {}
1530 };
689a97f5 1531
bba7a484 1532 int c;
689a97f5 1533
bba7a484
TG
1534 assert(argc >= 0);
1535 assert(argv);
912541b0 1536
e14b6f21 1537 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1538 int r;
912541b0 1539
bba7a484 1540 switch (c) {
912541b0 1541
912541b0 1542 case 'd':
bba7a484 1543 arg_daemonize = true;
912541b0
KS
1544 break;
1545 case 'c':
020328e1 1546 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8 1547 if (r < 0)
389f9bf2 1548 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
912541b0
KS
1549 break;
1550 case 'e':
6b92f429 1551 r = parse_sec(optarg, &arg_exec_delay_usec);
6f5cf8a8 1552 if (r < 0)
6b92f429 1553 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
912541b0 1554 break;
9719859c 1555 case 't':
9d9264ba 1556 r = parse_sec(optarg, &arg_event_timeout_usec);
f1e8664e 1557 if (r < 0)
9d9264ba
YW
1558 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
1559
1560 arg_event_timeout_warn_usec = DIV_ROUND_UP(arg_event_timeout_usec, 3);
9719859c 1561 break;
912541b0 1562 case 'D':
bba7a484 1563 arg_debug = true;
912541b0 1564 break;
c4d44cba
YW
1565 case 'N': {
1566 ResolveNameTiming t;
1567
1568 t = resolve_name_timing_from_string(optarg);
1569 if (t < 0)
1570 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1571 else
1572 arg_resolve_name_timing = t;
912541b0 1573 break;
c4d44cba 1574 }
912541b0 1575 case 'h':
37ec0fdd 1576 return help();
912541b0 1577 case 'V':
948aaa7c 1578 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1579 return 0;
1580 case '?':
1581 return -EINVAL;
912541b0 1582 default:
bba7a484
TG
1583 assert_not_reached("Unhandled option");
1584
912541b0
KS
1585 }
1586 }
1587
bba7a484
TG
1588 return 1;
1589}
1590
b7f74dd4 1591static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1592 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1593 int r, fd_worker;
c0c6806b
TG
1594
1595 assert(ret);
11b1dd8c
TG
1596 assert(fd_ctrl >= 0);
1597 assert(fd_uevent >= 0);
c0c6806b
TG
1598
1599 manager = new0(Manager, 1);
1600 if (!manager)
1601 return log_oom();
1602
e237d8cb
TG
1603 manager->fd_inotify = -1;
1604 manager->worker_watch[WRITE_END] = -1;
1605 manager->worker_watch[READ_END] = -1;
1606
2024ed61 1607 udev_builtin_init();
b2d21d93 1608
c4d44cba 1609 manager->rules = udev_rules_new(arg_resolve_name_timing);
ecb17862
TG
1610 if (!manager->rules)
1611 return log_error_errno(ENOMEM, "error reading rules");
1612
40a57716 1613 LIST_HEAD_INIT(manager->events);
ecb17862 1614
c26d1879
TG
1615 manager->cgroup = cgroup;
1616
2024ed61 1617 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1618 if (!manager->ctrl)
1619 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1620
2024ed61 1621 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1622 if (!manager->monitor)
1623 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1624
1625 /* unnamed socket from workers to the main daemon */
1626 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1627 if (r < 0)
1628 return log_error_errno(errno, "error creating socketpair: %m");
1629
693d371d 1630 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1631
2ff48e98 1632 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1633 if (r < 0)
2ff48e98 1634 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1635
b7759e04
YW
1636 r = udev_watch_init();
1637 if (r < 0)
1638 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1639 manager->fd_inotify = r;
e237d8cb 1640
2024ed61 1641 udev_watch_restore();
e237d8cb
TG
1642
1643 /* block and listen to all signals on signalfd */
72c0a2c2 1644 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1645
49f997f3
TG
1646 r = sd_event_default(&manager->event);
1647 if (r < 0)
709f6e46 1648 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1649
693d371d
TG
1650 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1651 if (r < 0)
1652 return log_error_errno(r, "error creating sigint event source: %m");
1653
1654 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1655 if (r < 0)
1656 return log_error_errno(r, "error creating sigterm event source: %m");
1657
1658 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1659 if (r < 0)
1660 return log_error_errno(r, "error creating sighup event source: %m");
1661
1662 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1663 if (r < 0)
1664 return log_error_errno(r, "error creating sigchld event source: %m");
1665
1666 r = sd_event_set_watchdog(manager->event, true);
1667 if (r < 0)
1668 return log_error_errno(r, "error creating watchdog event source: %m");
1669
11b1dd8c 1670 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1671 if (r < 0)
1672 return log_error_errno(r, "error creating ctrl event source: %m");
1673
1674 /* This needs to be after the inotify and uevent handling, to make sure
1675 * that the ping is send back after fully processing the pending uevents
1676 * (including the synthetic ones we may create due to inotify events).
1677 */
1678 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1679 if (r < 0)
1680 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1681
1682 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1683 if (r < 0)
1684 return log_error_errno(r, "error creating inotify event source: %m");
1685
11b1dd8c 1686 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1687 if (r < 0)
1688 return log_error_errno(r, "error creating uevent event source: %m");
1689
1690 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1691 if (r < 0)
1692 return log_error_errno(r, "error creating worker event source: %m");
1693
1694 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1695 if (r < 0)
1696 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1697
1cc6c93a 1698 *ret = TAKE_PTR(manager);
11b1dd8c 1699
86c3bece 1700 return 0;
c0c6806b
TG
1701}
1702
077fc5e2 1703static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1704 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1705 int r;
1706
1707 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1708 if (r < 0) {
1709 r = log_error_errno(r, "failed to allocate manager object: %m");
1710 goto exit;
1711 }
1712
1713 r = udev_rules_apply_static_dev_perms(manager->rules);
1714 if (r < 0)
1715 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1716
1ef72b55
MS
1717 (void) sd_notifyf(false,
1718 "READY=1\n"
1719 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1720
1721 r = sd_event_loop(manager->event);
1722 if (r < 0) {
1723 log_error_errno(r, "event loop failed: %m");
1724 goto exit;
1725 }
1726
1727 sd_event_get_exit_code(manager->event, &r);
1728
1729exit:
1730 sd_notify(false,
1731 "STOPPING=1\n"
1732 "STATUS=Shutting down...");
1733 if (manager)
1734 udev_ctrl_cleanup(manager->ctrl);
1735 return r;
1736}
1737
1738int main(int argc, char *argv[]) {
c26d1879 1739 _cleanup_free_ char *cgroup = NULL;
efa1606e 1740 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1741 int r;
bba7a484 1742
bba7a484 1743 log_set_target(LOG_TARGET_AUTO);
b237a168 1744 udev_parse_config();
bba7a484
TG
1745 log_parse_environment();
1746 log_open();
1747
bba7a484
TG
1748 r = parse_argv(argc, argv);
1749 if (r <= 0)
1750 goto exit;
1751
1d84ad94 1752 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1753 if (r < 0)
1754 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1755
78d3e041
KS
1756 if (arg_debug) {
1757 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1758 log_set_max_level(LOG_DEBUG);
78d3e041 1759 }
bba7a484 1760
6174a243
YW
1761 log_set_max_level_realm(LOG_REALM_SYSTEMD, log_get_max_level());
1762
fba868fa
LP
1763 r = must_be_root();
1764 if (r < 0)
912541b0 1765 goto exit;
912541b0 1766
712cebf1
TG
1767 if (arg_children_max == 0) {
1768 cpu_set_t cpu_set;
e438c57a 1769 unsigned long mem_limit;
ebc164ef 1770
712cebf1 1771 arg_children_max = 8;
d457ff83 1772
ece174c5 1773 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
faae64fa 1774 arg_children_max += CPU_COUNT(&cpu_set) * 8;
912541b0 1775
e438c57a
MW
1776 mem_limit = physical_memory() / (128LU*1024*1024);
1777 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1778
712cebf1 1779 log_debug("set children_max to %u", arg_children_max);
d457ff83 1780 }
912541b0 1781
712cebf1
TG
1782 /* set umask before creating any file/directory */
1783 r = chdir("/");
1784 if (r < 0) {
1785 r = log_error_errno(errno, "could not change dir to /: %m");
1786 goto exit;
1787 }
194bbe33 1788
712cebf1 1789 umask(022);
912541b0 1790
c3dacc8b 1791 r = mac_selinux_init();
712cebf1
TG
1792 if (r < 0) {
1793 log_error_errno(r, "could not initialize labelling: %m");
1794 goto exit;
912541b0
KS
1795 }
1796
dae8b82e
ZJS
1797 r = mkdir_errno_wrapper("/run/udev", 0755);
1798 if (r < 0 && r != -EEXIST) {
1799 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1800 goto exit;
1801 }
1802
03cfe0d5 1803 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1804
c26d1879
TG
1805 if (getppid() == 1) {
1806 /* get our own cgroup, we regularly kill everything udev has left behind
1807 we only do this on systemd systems, and only if we are directly spawned
1808 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1809 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1810 if (r < 0) {
a2d61f07 1811 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1812 log_debug_errno(r, "did not find dedicated cgroup: %m");
1813 else
1814 log_warning_errno(r, "failed to get cgroup: %m");
1815 }
c26d1879
TG
1816 }
1817
b7f74dd4
TG
1818 r = listen_fds(&fd_ctrl, &fd_uevent);
1819 if (r < 0) {
1820 r = log_error_errno(r, "could not listen on fds: %m");
1821 goto exit;
1822 }
1823
bba7a484 1824 if (arg_daemonize) {
912541b0 1825 pid_t pid;
912541b0 1826
948aaa7c 1827 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1828
40e749b5 1829 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1830 if (log_get_max_level() < LOG_DEBUG) {
1831 r = make_null_stdio();
1832 if (r < 0)
1833 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1834 }
1835
912541b0
KS
1836 pid = fork();
1837 switch (pid) {
1838 case 0:
1839 break;
1840 case -1:
6af5e6a4 1841 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1842 goto exit;
1843 default:
f53d1fcd
TG
1844 mac_selinux_finish();
1845 log_close();
1846 _exit(EXIT_SUCCESS);
912541b0
KS
1847 }
1848
1849 setsid();
1850
76cdddfb
YW
1851 r = set_oom_score_adjust(-1000);
1852 if (r < 0)
1853 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1854 }
912541b0 1855
077fc5e2 1856 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1857
53921bfa 1858exit:
cc56fafe 1859 mac_selinux_finish();
baa30fbc 1860 log_close();
6af5e6a4 1861 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1862}