]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
udevd: use event_reset_time() to update kill_workers_event
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
6d63048a 38#include "event-util.h"
3ffd4af2 39#include "fd-util.h"
a5c32cff 40#include "fileio.h"
f97b34a6 41#include "format-util.h"
f4f15635 42#include "fs-util.h"
a505965d 43#include "hashmap.h"
c004493c 44#include "io-util.h"
70068602 45#include "libudev-device-internal.h"
40a57716 46#include "list.h"
618234a5 47#include "netlink-util.h"
6bedfcbb 48#include "parse-util.h"
4e731273 49#include "proc-cmdline.h"
618234a5
LP
50#include "process-util.h"
51#include "selinux-util.h"
52#include "signal-util.h"
8f328d36 53#include "socket-util.h"
07630cea 54#include "string-util.h"
46f0fbd8 55#include "syslog-util.h"
618234a5 56#include "terminal-util.h"
07a26e42 57#include "udev-builtin.h"
7d68eb1b 58#include "udev-ctrl.h"
618234a5 59#include "udev-util.h"
70068602 60#include "udev-watch.h"
618234a5 61#include "udev.h"
ee104e11 62#include "user-util.h"
7fafc032 63
bba7a484
TG
64static bool arg_debug = false;
65static int arg_daemonize = false;
c4d44cba 66static ResolveNameTiming arg_resolve_name_timing = RESOLVE_NAME_EARLY;
216e8bbe 67static unsigned arg_children_max = 0;
6b92f429 68static usec_t arg_exec_delay_usec = 0;
bba7a484 69static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
c0c6806b
TG
70
71typedef struct Manager {
693d371d 72 sd_event *event;
c0c6806b 73 Hashmap *workers;
40a57716 74 LIST_HEAD(struct event, events);
c26d1879 75 const char *cgroup;
cb49a4f2 76 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 77
ecb17862 78 struct udev_rules *rules;
9b5150b6 79 Hashmap *properties;
c0c6806b
TG
80
81 struct udev_monitor *monitor;
82 struct udev_ctrl *ctrl;
83 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 84 int fd_inotify;
e237d8cb
TG
85 int worker_watch[2];
86
693d371d
TG
87 sd_event_source *ctrl_event;
88 sd_event_source *uevent_event;
89 sd_event_source *inotify_event;
eca195ec 90 sd_event_source *kill_workers_event;
693d371d 91
7c4c7e89
TG
92 usec_t last_usec;
93
c0c6806b 94 bool stop_exec_queue:1;
c0c6806b
TG
95 bool exit:1;
96} Manager;
1e03b754 97
1e03b754 98enum event_state {
912541b0
KS
99 EVENT_UNDEF,
100 EVENT_QUEUED,
101 EVENT_RUNNING,
1e03b754
KS
102};
103
104struct event {
40a57716 105 LIST_FIELDS(struct event, event);
cb49a4f2 106 Manager *manager;
912541b0 107 struct udev_device *dev;
6969c349 108 struct udev_device *dev_kernel;
c6aa11f2 109 struct worker *worker;
912541b0 110 enum event_state state;
912541b0
KS
111 unsigned long long int delaying_seqnum;
112 unsigned long long int seqnum;
113 const char *devpath;
114 size_t devpath_len;
115 const char *devpath_old;
116 dev_t devnum;
912541b0 117 int ifindex;
ea6039a3 118 bool is_block;
693d371d
TG
119 sd_event_source *timeout_warning;
120 sd_event_source *timeout;
1e03b754
KS
121};
122
ecb17862 123static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 124
1e03b754 125enum worker_state {
912541b0
KS
126 WORKER_UNDEF,
127 WORKER_RUNNING,
128 WORKER_IDLE,
129 WORKER_KILLED,
1e03b754
KS
130};
131
132struct worker {
c0c6806b 133 Manager *manager;
912541b0
KS
134 pid_t pid;
135 struct udev_monitor *monitor;
136 enum worker_state state;
137 struct event *event;
1e03b754
KS
138};
139
140/* passed from worker to main process */
141struct worker_message {
1e03b754
KS
142};
143
c6aa11f2 144static void event_free(struct event *event) {
cb49a4f2
TG
145 int r;
146
c6aa11f2
TG
147 if (!event)
148 return;
40a57716 149 assert(event->manager);
c6aa11f2 150
40a57716 151 LIST_REMOVE(event, event->manager->events, event);
912541b0 152 udev_device_unref(event->dev);
6969c349 153 udev_device_unref(event->dev_kernel);
c6aa11f2 154
693d371d
TG
155 sd_event_source_unref(event->timeout_warning);
156 sd_event_source_unref(event->timeout);
157
c6aa11f2
TG
158 if (event->worker)
159 event->worker->event = NULL;
160
40a57716 161 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 162 /* only clean up the queue from the process that created it */
df0ff127 163 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
164 r = unlink("/run/udev/queue");
165 if (r < 0)
166 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
167 }
168 }
169
912541b0 170 free(event);
aa8734ff 171}
7a770250 172
c6aa11f2
TG
173static void worker_free(struct worker *worker) {
174 if (!worker)
175 return;
bc113de9 176
c0c6806b
TG
177 assert(worker->manager);
178
4a0b58c4 179 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 180 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
181 event_free(worker->event);
182
c6aa11f2 183 free(worker);
ff2c503d
KS
184}
185
c0c6806b 186static void manager_workers_free(Manager *manager) {
a505965d
TG
187 struct worker *worker;
188 Iterator i;
ff2c503d 189
c0c6806b
TG
190 assert(manager);
191
192 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 193 worker_free(worker);
a505965d 194
c0c6806b 195 manager->workers = hashmap_free(manager->workers);
fc465079
KS
196}
197
c0c6806b 198static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
199 _cleanup_free_ struct worker *worker = NULL;
200 int r;
3a19b32a
TG
201
202 assert(ret);
c0c6806b 203 assert(manager);
3a19b32a
TG
204 assert(worker_monitor);
205 assert(pid > 1);
206
207 worker = new0(struct worker, 1);
208 if (!worker)
209 return -ENOMEM;
210
c0c6806b 211 worker->manager = manager;
3a19b32a
TG
212 /* close monitor, but keep address around */
213 udev_monitor_disconnect(worker_monitor);
214 worker->monitor = udev_monitor_ref(worker_monitor);
215 worker->pid = pid;
a505965d 216
c0c6806b 217 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
218 if (r < 0)
219 return r;
220
4a0b58c4 221 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
222 if (r < 0)
223 return r;
224
ae2a15bc 225 *ret = TAKE_PTR(worker);
3a19b32a
TG
226
227 return 0;
228}
229
4fa4d885
TG
230static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
231 struct event *event = userdata;
232
233 assert(event);
234 assert(event->worker);
235
236 kill_and_sigcont(event->worker->pid, SIGKILL);
237 event->worker->state = WORKER_KILLED;
238
239 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
240
241 return 1;
242}
243
244static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
245 struct event *event = userdata;
246
247 assert(event);
248
249 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
250
251 return 1;
252}
253
39c19cf1 254static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
255 sd_event *e;
256 uint64_t usec;
693d371d 257
c6aa11f2 258 assert(worker);
693d371d 259 assert(worker->manager);
c6aa11f2
TG
260 assert(event);
261 assert(!event->worker);
262 assert(!worker->event);
263
39c19cf1 264 worker->state = WORKER_RUNNING;
39c19cf1
TG
265 worker->event = event;
266 event->state = EVENT_RUNNING;
c6aa11f2 267 event->worker = worker;
693d371d
TG
268
269 e = worker->manager->event;
270
3285baa8 271 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 272
3285baa8 273 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
66f737b4 274 usec + udev_warn_timeout(arg_event_timeout_usec), USEC_PER_SEC, on_event_timeout_warning, event);
693d371d 275
3285baa8 276 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 277 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
278}
279
e237d8cb
TG
280static void manager_free(Manager *manager) {
281 if (!manager)
282 return;
283
2024ed61 284 udev_builtin_exit();
b2d21d93 285
693d371d
TG
286 sd_event_source_unref(manager->ctrl_event);
287 sd_event_source_unref(manager->uevent_event);
288 sd_event_source_unref(manager->inotify_event);
eca195ec 289 sd_event_source_unref(manager->kill_workers_event);
693d371d 290
693d371d 291 sd_event_unref(manager->event);
e237d8cb
TG
292 manager_workers_free(manager);
293 event_queue_cleanup(manager, EVENT_UNDEF);
294
295 udev_monitor_unref(manager->monitor);
296 udev_ctrl_unref(manager->ctrl);
297 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
298
9b5150b6 299 hashmap_free_free_free(manager->properties);
e237d8cb 300 udev_rules_unref(manager->rules);
e237d8cb 301
e237d8cb
TG
302 safe_close(manager->fd_inotify);
303 safe_close_pair(manager->worker_watch);
304
305 free(manager);
306}
307
308DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
309
9a73bd7c
TG
310static int worker_send_message(int fd) {
311 struct worker_message message = {};
312
313 return loop_write(fd, &message, sizeof(message), false);
314}
315
fee854ee
RK
316static bool shall_lock_device(struct udev_device *dev) {
317 const char *sysname;
318
319 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
320 return false;
321
322 sysname = udev_device_get_sysname(dev);
323 return !startswith(sysname, "dm-") &&
324 !startswith(sysname, "md") &&
325 !startswith(sysname, "drbd");
326}
327
c0c6806b 328static void worker_spawn(Manager *manager, struct event *event) {
8e766630 329 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 330 pid_t pid;
b6aab8ef 331 int r = 0;
912541b0
KS
332
333 /* listen for new events */
2024ed61 334 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
335 if (worker_monitor == NULL)
336 return;
337 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 338 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
339 r = udev_monitor_enable_receiving(worker_monitor);
340 if (r < 0)
341 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 342
912541b0
KS
343 pid = fork();
344 switch (pid) {
345 case 0: {
cf28ad46 346 _cleanup_(udev_device_unrefp) struct udev_device *dev = NULL;
4afd3348 347 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 348 int fd_monitor;
e237d8cb 349 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
350 struct epoll_event ep_signal = { .events = EPOLLIN };
351 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 352 sigset_t mask;
912541b0 353
43095991 354 /* take initial device from queue */
1cc6c93a 355 dev = TAKE_PTR(event->dev);
912541b0 356
39fd2ca1
TG
357 unsetenv("NOTIFY_SOCKET");
358
c0c6806b 359 manager_workers_free(manager);
ecb17862 360 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 361
e237d8cb 362 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 363 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 364 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 365 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 366
693d371d
TG
367 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
368 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
369 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
eca195ec 370 manager->kill_workers_event = sd_event_source_unref(manager->kill_workers_event);
693d371d
TG
371
372 manager->event = sd_event_unref(manager->event);
373
912541b0
KS
374 sigfillset(&mask);
375 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
376 if (fd_signal < 0) {
6af5e6a4 377 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
378 goto out;
379 }
2dd9f98d
TG
380 ep_signal.data.fd = fd_signal;
381
382 fd_monitor = udev_monitor_get_fd(worker_monitor);
383 ep_monitor.data.fd = fd_monitor;
912541b0
KS
384
385 fd_ep = epoll_create1(EPOLL_CLOEXEC);
386 if (fd_ep < 0) {
6af5e6a4 387 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
388 goto out;
389 }
390
912541b0
KS
391 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
392 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 393 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
394 goto out;
395 }
396
045e00cf
ZJS
397 /* Request TERM signal if parent exits.
398 Ignore error, not much we can do in that case. */
399 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 400
045e00cf 401 /* Reset OOM score, we only protect the main daemon. */
76cdddfb
YW
402 r = set_oom_score_adjust(0);
403 if (r < 0)
404 log_debug_errno(r, "Failed to reset OOM score, ignoring: %m");
145dae7e 405
912541b0 406 for (;;) {
c1118ceb 407 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 408 int fd_lock = -1;
912541b0 409
3b64e4d4
TG
410 assert(dev);
411
9f6445e3 412 log_debug("seq %llu running", udev_device_get_seqnum(dev));
6b92f429 413 udev_event = udev_event_new(dev->device, arg_exec_delay_usec, rtnl);
0f86dc90 414 if (!udev_event) {
6af5e6a4 415 r = -ENOMEM;
912541b0
KS
416 goto out;
417 }
418
3ebdb81e 419 /*
2e5b17d0 420 * Take a shared lock on the device node; this establishes
3ebdb81e 421 * a concept of device "ownership" to serialize device
2e5b17d0 422 * access. External processes holding an exclusive lock will
3ebdb81e 423 * cause udev to skip the event handling; in the case udev
2e5b17d0 424 * acquired the lock, the external process can block until
3ebdb81e
KS
425 * udev has finished its event handling.
426 */
2e5b17d0 427 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 428 shall_lock_device(dev)) {
3ebdb81e
KS
429 struct udev_device *d = dev;
430
431 if (streq_ptr("partition", udev_device_get_devtype(d)))
432 d = udev_device_get_parent(d);
433
434 if (d) {
435 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
436 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 437 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 438 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
439 goto skip;
440 }
441 }
442 }
443
912541b0 444 /* apply rules, create node, symlinks */
66f737b4
ZJS
445 udev_event_execute_rules(udev_event, arg_event_timeout_usec, manager->properties, manager->rules);
446 udev_event_execute_run(udev_event, arg_event_timeout_usec);
912541b0 447
e0bb2ff9 448 if (!rtnl)
523c620b 449 /* in case rtnl was initialized */
1c4baffc 450 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 451
912541b0 452 /* apply/restore inotify watch */
bf9bead1 453 if (udev_event->inotify_watch) {
7fe0d0d5 454 (void) udev_watch_begin(dev->device);
912541b0
KS
455 udev_device_update_db(dev);
456 }
457
3d06f418 458 safe_close(fd_lock);
3ebdb81e 459
912541b0
KS
460 /* send processed event back to libudev listeners */
461 udev_monitor_send_device(worker_monitor, NULL, dev);
462
3ebdb81e 463skip:
4914cb2d 464 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 465
912541b0 466 /* send udevd the result of the event execution */
e237d8cb 467 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 468 if (r < 0)
9a73bd7c 469 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 470 udev_device_get_seqnum(dev));
912541b0 471
cf28ad46 472 dev = udev_device_unref(dev);
912541b0 473
912541b0
KS
474 /* wait for more device messages from main udevd, or term signal */
475 while (dev == NULL) {
476 struct epoll_event ev[4];
477 int fdcount;
478 int i;
479
8fef0ff2 480 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
481 if (fdcount < 0) {
482 if (errno == EINTR)
483 continue;
6af5e6a4 484 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
485 goto out;
486 }
487
488 for (i = 0; i < fdcount; i++) {
489 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
490 dev = udev_monitor_receive_device(worker_monitor);
491 break;
492 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
493 struct signalfd_siginfo fdsi;
494 ssize_t size;
495
496 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
497 if (size != sizeof(struct signalfd_siginfo))
498 continue;
499 switch (fdsi.ssi_signo) {
500 case SIGTERM:
501 goto out;
502 }
503 }
504 }
505 }
506 }
82063a88 507out:
912541b0 508 udev_device_unref(dev);
e237d8cb 509 manager_free(manager);
baa30fbc 510 log_close();
8b46c3fc 511 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
512 }
513 case -1:
912541b0 514 event->state = EVENT_QUEUED;
56f64d95 515 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
516 break;
517 default:
e03c7cc2
TG
518 {
519 struct worker *worker;
520
c0c6806b 521 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 522 if (r < 0)
e03c7cc2 523 return;
e03c7cc2 524
39c19cf1
TG
525 worker_attach_event(worker, event);
526
1fa2f38f 527 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
528 break;
529 }
e03c7cc2 530 }
7fafc032
KS
531}
532
c0c6806b 533static void event_run(Manager *manager, struct event *event) {
a505965d
TG
534 struct worker *worker;
535 Iterator i;
912541b0 536
c0c6806b
TG
537 assert(manager);
538 assert(event);
539
540 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
541 ssize_t count;
542
543 if (worker->state != WORKER_IDLE)
544 continue;
545
c0c6806b 546 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 547 if (count < 0) {
1fa2f38f
ZJS
548 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
549 worker->pid, count);
cb542e84 550 (void) kill(worker->pid, SIGKILL);
912541b0
KS
551 worker->state = WORKER_KILLED;
552 continue;
553 }
39c19cf1 554 worker_attach_event(worker, event);
912541b0
KS
555 return;
556 }
557
c0c6806b 558 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 559 if (arg_children_max > 1)
c0c6806b 560 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
561 return;
562 }
563
564 /* start new worker and pass initial device */
c0c6806b 565 worker_spawn(manager, event);
1e03b754
KS
566}
567
ecb17862 568static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 569 struct event *event;
cb49a4f2 570 int r;
912541b0 571
ecb17862
TG
572 assert(manager);
573 assert(dev);
574
040e6896
TG
575 /* only one process can add events to the queue */
576 if (manager->pid == 0)
df0ff127 577 manager->pid = getpid_cached();
040e6896 578
df0ff127 579 assert(manager->pid == getpid_cached());
cb49a4f2 580
955d98c9 581 event = new0(struct event, 1);
cb49a4f2
TG
582 if (!event)
583 return -ENOMEM;
912541b0 584
cb49a4f2 585 event->manager = manager;
912541b0 586 event->dev = dev;
6969c349
TG
587 event->dev_kernel = udev_device_shallow_clone(dev);
588 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
589 event->seqnum = udev_device_get_seqnum(dev);
590 event->devpath = udev_device_get_devpath(dev);
591 event->devpath_len = strlen(event->devpath);
592 event->devpath_old = udev_device_get_devpath_old(dev);
593 event->devnum = udev_device_get_devnum(dev);
ea6039a3 594 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
595 event->ifindex = udev_device_get_ifindex(dev);
596
9f6445e3 597 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
598 udev_device_get_action(dev), udev_device_get_subsystem(dev));
599
600 event->state = EVENT_QUEUED;
cb49a4f2 601
40a57716 602 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
603 r = touch("/run/udev/queue");
604 if (r < 0)
605 log_warning_errno(r, "could not touch /run/udev/queue: %m");
606 }
607
40a57716 608 LIST_APPEND(event, manager->events, event);
cb49a4f2 609
912541b0 610 return 0;
fc465079
KS
611}
612
c0c6806b 613static void manager_kill_workers(Manager *manager) {
a505965d
TG
614 struct worker *worker;
615 Iterator i;
1e03b754 616
c0c6806b
TG
617 assert(manager);
618
619 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
620 if (worker->state == WORKER_KILLED)
621 continue;
1e03b754 622
912541b0 623 worker->state = WORKER_KILLED;
cb542e84 624 (void) kill(worker->pid, SIGTERM);
912541b0 625 }
1e03b754
KS
626}
627
e3196993 628/* lookup event for identical, parent, child device */
ecb17862 629static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 630 struct event *loop_event;
912541b0
KS
631 size_t common;
632
633 /* check if queue contains events we depend on */
40a57716 634 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 635 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
636 if (loop_event->seqnum < event->delaying_seqnum)
637 continue;
638
639 /* event we checked earlier still exists, no need to check again */
640 if (loop_event->seqnum == event->delaying_seqnum)
641 return true;
642
643 /* found ourself, no later event can block us */
644 if (loop_event->seqnum >= event->seqnum)
645 break;
646
647 /* check major/minor */
648 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
649 return true;
650
651 /* check network device ifindex */
edc81c1c 652 if (event->ifindex > 0 && event->ifindex == loop_event->ifindex)
912541b0
KS
653 return true;
654
655 /* check our old name */
edc81c1c 656 if (event->devpath_old && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
657 event->delaying_seqnum = loop_event->seqnum;
658 return true;
659 }
660
661 /* compare devpath */
662 common = MIN(loop_event->devpath_len, event->devpath_len);
663
664 /* one devpath is contained in the other? */
665 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
666 continue;
667
668 /* identical device event found */
669 if (loop_event->devpath_len == event->devpath_len) {
670 /* devices names might have changed/swapped in the meantime */
edc81c1c 671 if (major(event->devnum) != 0 || event->ifindex > 0)
912541b0
KS
672 continue;
673 event->delaying_seqnum = loop_event->seqnum;
674 return true;
675 }
676
677 /* parent device event found */
678 if (event->devpath[common] == '/') {
679 event->delaying_seqnum = loop_event->seqnum;
680 return true;
681 }
682
683 /* child device event found */
684 if (loop_event->devpath[common] == '/') {
685 event->delaying_seqnum = loop_event->seqnum;
686 return true;
687 }
912541b0
KS
688 }
689
690 return false;
7fafc032
KS
691}
692
693d371d
TG
693static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
694 Manager *manager = userdata;
695
696 assert(manager);
697
698 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
699
700 sd_event_exit(manager->event, -ETIMEDOUT);
701
702 return 1;
703}
704
62d43dac 705static void manager_exit(Manager *manager) {
693d371d
TG
706 uint64_t usec;
707 int r;
62d43dac
TG
708
709 assert(manager);
710
711 manager->exit = true;
712
b79aacbf
TG
713 sd_notify(false,
714 "STOPPING=1\n"
715 "STATUS=Starting shutdown...");
716
62d43dac 717 /* close sources of new events and discard buffered events */
693d371d 718 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 719 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 720
693d371d 721 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 722 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 723
693d371d 724 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 725 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
726
727 /* discard queued events and kill workers */
728 event_queue_cleanup(manager, EVENT_QUEUED);
729 manager_kill_workers(manager);
693d371d 730
3285baa8 731 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 732
3285baa8 733 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
734 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
735 if (r < 0)
736 return;
62d43dac
TG
737}
738
739/* reload requested, HUP signal received, rules changed, builtin changed */
740static void manager_reload(Manager *manager) {
741
742 assert(manager);
743
b79aacbf
TG
744 sd_notify(false,
745 "RELOADING=1\n"
746 "STATUS=Flushing configuration...");
747
62d43dac
TG
748 manager_kill_workers(manager);
749 manager->rules = udev_rules_unref(manager->rules);
2024ed61 750 udev_builtin_exit();
b79aacbf 751
1ef72b55
MS
752 sd_notifyf(false,
753 "READY=1\n"
754 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
755}
756
eca195ec
YW
757static int on_kill_workers_event(sd_event_source *s, uint64_t usec, void *userdata) {
758 Manager *manager = userdata;
759
760 assert(manager);
761
762 log_debug("Cleanup idle workers");
763 manager_kill_workers(manager);
764
765 return 1;
766}
767
eca195ec
YW
768static int manager_disable_kill_workers_event(Manager *manager) {
769 int r;
770
771 if (!manager->kill_workers_event)
772 return 0;
773
774 r = sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
775 if (r < 0)
776 return log_warning_errno(r, "Failed to disable event source for cleaning up idle workers, ignoring: %m");
777
778 return 0;
779}
780
c0c6806b 781static void event_queue_start(Manager *manager) {
40a57716 782 struct event *event;
693d371d 783 usec_t usec;
8ab44e3f 784
c0c6806b
TG
785 assert(manager);
786
40a57716 787 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
788 manager->exit || manager->stop_exec_queue)
789 return;
790
3285baa8 791 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
792 /* check for changed config, every 3 seconds at most */
793 if (manager->last_usec == 0 ||
794 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
795 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 796 udev_builtin_validate())
38a03f06 797 manager_reload(manager);
693d371d 798
38a03f06 799 manager->last_usec = usec;
7c4c7e89
TG
800 }
801
eca195ec
YW
802 (void) manager_disable_kill_workers_event(manager);
803
2024ed61 804 udev_builtin_init();
7c4c7e89
TG
805
806 if (!manager->rules) {
c4d44cba 807 manager->rules = udev_rules_new(arg_resolve_name_timing);
7c4c7e89
TG
808 if (!manager->rules)
809 return;
810 }
811
40a57716 812 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
813 if (event->state != EVENT_QUEUED)
814 continue;
0bc74ea7 815
912541b0 816 /* do not start event if parent or child event is still running */
ecb17862 817 if (is_devpath_busy(manager, event))
912541b0 818 continue;
fc465079 819
c0c6806b 820 event_run(manager, event);
912541b0 821 }
1e03b754
KS
822}
823
ecb17862 824static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 825 struct event *event, *tmp;
ff2c503d 826
40a57716 827 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
828 if (match_type != EVENT_UNDEF && match_type != event->state)
829 continue;
ff2c503d 830
c6aa11f2 831 event_free(event);
912541b0 832 }
ff2c503d
KS
833}
834
e82e8fa5 835static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
836 Manager *manager = userdata;
837
838 assert(manager);
839
912541b0
KS
840 for (;;) {
841 struct worker_message msg;
979558f3
TG
842 struct iovec iovec = {
843 .iov_base = &msg,
844 .iov_len = sizeof(msg),
845 };
846 union {
847 struct cmsghdr cmsghdr;
848 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
849 } control = {};
850 struct msghdr msghdr = {
851 .msg_iov = &iovec,
852 .msg_iovlen = 1,
853 .msg_control = &control,
854 .msg_controllen = sizeof(control),
855 };
856 struct cmsghdr *cmsg;
912541b0 857 ssize_t size;
979558f3 858 struct ucred *ucred = NULL;
a505965d 859 struct worker *worker;
912541b0 860
e82e8fa5 861 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 862 if (size < 0) {
738a7907
TG
863 if (errno == EINTR)
864 continue;
865 else if (errno == EAGAIN)
866 /* nothing more to read */
867 break;
979558f3 868
e82e8fa5 869 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
870 } else if (size != sizeof(struct worker_message)) {
871 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 872 continue;
979558f3
TG
873 }
874
2a1288ff 875 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
876 if (cmsg->cmsg_level == SOL_SOCKET &&
877 cmsg->cmsg_type == SCM_CREDENTIALS &&
878 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
879 ucred = (struct ucred*) CMSG_DATA(cmsg);
880 }
881
882 if (!ucred || ucred->pid <= 0) {
883 log_warning_errno(EIO, "ignoring worker message without valid PID");
884 continue;
885 }
912541b0
KS
886
887 /* lookup worker who sent the signal */
4a0b58c4 888 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
889 if (!worker) {
890 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
891 continue;
912541b0 892 }
c0bbfd72 893
a505965d
TG
894 if (worker->state != WORKER_KILLED)
895 worker->state = WORKER_IDLE;
896
897 /* worker returned */
898 event_free(worker->event);
912541b0 899 }
e82e8fa5 900
8302fe5a
TG
901 /* we have free workers, try to schedule events */
902 event_queue_start(manager);
903
e82e8fa5
TG
904 return 1;
905}
906
907static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 908 Manager *manager = userdata;
e82e8fa5
TG
909 struct udev_device *dev;
910 int r;
911
c0c6806b 912 assert(manager);
e82e8fa5 913
c0c6806b 914 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
915 if (dev) {
916 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 917 r = event_queue_insert(manager, dev);
e82e8fa5
TG
918 if (r < 0)
919 udev_device_unref(dev);
8302fe5a
TG
920 else
921 /* we have fresh events, try to schedule them */
922 event_queue_start(manager);
e82e8fa5
TG
923 }
924
925 return 1;
88f4b648
KS
926}
927
3b47c739 928/* receive the udevd message from userspace */
e82e8fa5 929static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 930 Manager *manager = userdata;
8e766630
LP
931 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
932 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 933 const char *str;
9b5150b6 934 int i, r;
912541b0 935
c0c6806b 936 assert(manager);
e4f66b77 937
c0c6806b 938 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 939 if (!ctrl_conn)
e82e8fa5 940 return 1;
912541b0
KS
941
942 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 943 if (!ctrl_msg)
e82e8fa5 944 return 1;
912541b0
KS
945
946 i = udev_ctrl_get_set_log_level(ctrl_msg);
947 if (i >= 0) {
ed14edc0 948 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 949 log_set_max_level(i);
c0c6806b 950 manager_kill_workers(manager);
912541b0
KS
951 }
952
953 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 954 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 955 manager->stop_exec_queue = true;
912541b0
KS
956 }
957
958 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 959 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 960 manager->stop_exec_queue = false;
8302fe5a 961 event_queue_start(manager);
912541b0
KS
962 }
963
964 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 965 log_debug("udevd message (RELOAD) received");
62d43dac 966 manager_reload(manager);
912541b0
KS
967 }
968
969 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
970 if (str) {
971 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
972 char *eq;
973
974 eq = strchr(str, '=');
975 if (!eq) {
976 log_error("Invalid key format '%s'", str);
977 return 1;
978 }
979
980 key = strndup(str, eq - str);
981 if (!key) {
982 log_oom();
983 return 1;
984 }
985
986 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
987
988 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
989 if (r < 0) {
990 log_oom();
991 return 1;
912541b0 992 }
9b5150b6
YW
993
994 eq++;
995 if (!isempty(eq)) {
996 log_debug("udevd message (ENV) received, unset '%s'", key);
997
998 r = hashmap_put(manager->properties, key, NULL);
999 if (r < 0) {
1000 log_oom();
1001 return 1;
1002 }
1003 } else {
1004 val = strdup(eq);
1005 if (!val) {
1006 log_oom();
1007 return 1;
1008 }
1009
1010 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
1011
1012 r = hashmap_put(manager->properties, key, val);
1013 if (r < 0) {
1014 log_oom();
1015 return 1;
1016 }
1017 }
1018
1019 key = val = NULL;
c0c6806b 1020 manager_kill_workers(manager);
912541b0
KS
1021 }
1022
1023 i = udev_ctrl_get_set_children_max(ctrl_msg);
1024 if (i >= 0) {
9f6445e3 1025 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1026 arg_children_max = i;
1ef72b55
MS
1027
1028 (void) sd_notifyf(false,
1029 "READY=1\n"
1030 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1031 }
1032
cb49a4f2 1033 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1034 log_debug("udevd message (SYNC) received");
912541b0
KS
1035
1036 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1037 log_debug("udevd message (EXIT) received");
62d43dac 1038 manager_exit(manager);
c0c6806b
TG
1039 /* keep reference to block the client until we exit
1040 TODO: deal with several blocking exit requests */
1041 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1042 }
e4f66b77 1043
e82e8fa5 1044 return 1;
88f4b648 1045}
4a231017 1046
70068602
YW
1047static int synthesize_change(sd_device *dev) {
1048 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1049 char filename[PATH_MAX];
f3a740a5 1050 int r;
edd32000 1051
70068602
YW
1052 r = sd_device_get_subsystem(dev, &subsystem);
1053 if (r < 0)
1054 return r;
1055
1056 r = sd_device_get_sysname(dev, &sysname);
1057 if (r < 0)
1058 return r;
1059
1060 r = sd_device_get_devname(dev, &devname);
1061 if (r < 0)
1062 return r;
1063
1064 r = sd_device_get_syspath(dev, &syspath);
1065 if (r < 0)
1066 return r;
1067
1068 r = sd_device_get_devtype(dev, &devtype);
1069 if (r < 0)
1070 return r;
1071
1072 if (streq_ptr("block", subsystem) &&
1073 streq_ptr("disk", devtype) &&
1074 !startswith(sysname, "dm-")) {
1075 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1076 bool part_table_read = false, has_partitions = false;
1077 sd_device *d;
ede34445 1078 int fd;
f3a740a5 1079
ede34445 1080 /*
e9fc29f4
KS
1081 * Try to re-read the partition table. This only succeeds if
1082 * none of the devices is busy. The kernel returns 0 if no
1083 * partition table is found, and we will not get an event for
1084 * the disk.
ede34445 1085 */
70068602 1086 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1087 if (fd >= 0) {
02ba8fb3
KS
1088 r = flock(fd, LOCK_EX|LOCK_NB);
1089 if (r >= 0)
1090 r = ioctl(fd, BLKRRPART, 0);
1091
ede34445
KS
1092 close(fd);
1093 if (r >= 0)
e9fc29f4 1094 part_table_read = true;
ede34445
KS
1095 }
1096
e9fc29f4 1097 /* search for partitions */
70068602 1098 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1099 if (r < 0)
1100 return r;
1101
70068602 1102 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1103 if (r < 0)
1104 return r;
1105
70068602 1106 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1107 if (r < 0)
1108 return r;
e9fc29f4 1109
70068602
YW
1110 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1111 if (r < 0)
1112 return r;
e9fc29f4 1113
70068602
YW
1114 FOREACH_DEVICE(e, d) {
1115 const char *t;
e9fc29f4 1116
70068602
YW
1117 if (sd_device_get_devtype(d, &t) < 0 ||
1118 !streq("partition", t))
e9fc29f4
KS
1119 continue;
1120
1121 has_partitions = true;
1122 break;
1123 }
1124
1125 /*
1126 * We have partitions and re-read the table, the kernel already sent
1127 * out a "change" event for the disk, and "remove/add" for all
1128 * partitions.
1129 */
1130 if (part_table_read && has_partitions)
1131 return 0;
1132
1133 /*
1134 * We have partitions but re-reading the partition table did not
1135 * work, synthesize "change" for the disk and all partitions.
1136 */
70068602
YW
1137 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1138 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1139 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
e9fc29f4 1140
70068602
YW
1141 FOREACH_DEVICE(e, d) {
1142 const char *t, *n, *s;
f3a740a5 1143
70068602
YW
1144 if (sd_device_get_devtype(d, &t) < 0 ||
1145 !streq("partition", t))
f3a740a5
KS
1146 continue;
1147
70068602
YW
1148 if (sd_device_get_devname(d, &n) < 0 ||
1149 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1150 continue;
1151
70068602
YW
1152 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1153 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
57512c89 1154 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
f3a740a5 1155 }
ede34445
KS
1156
1157 return 0;
f3a740a5
KS
1158 }
1159
70068602
YW
1160 log_debug("Device %s is closed, synthesising 'change'", devname);
1161 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
57512c89 1162 write_string_file(filename, "change", WRITE_STRING_FILE_DISABLE_BUFFER);
ede34445 1163
f3a740a5 1164 return 0;
edd32000
KS
1165}
1166
e82e8fa5 1167static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1168 Manager *manager = userdata;
0254e944 1169 union inotify_event_buffer buffer;
f7c1ad4f
LP
1170 struct inotify_event *e;
1171 ssize_t l;
912541b0 1172
c0c6806b 1173 assert(manager);
e82e8fa5 1174
eca195ec
YW
1175 (void) manager_disable_kill_workers_event(manager);
1176
e82e8fa5 1177 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1178 if (l < 0) {
3742095b 1179 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1180 return 1;
912541b0 1181
f7c1ad4f 1182 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1183 }
1184
f7c1ad4f 1185 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1186 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1187 const char *devnode;
1188
7fe3324c 1189 if (udev_watch_lookup(e->wd, &dev) <= 0)
70068602 1190 continue;
912541b0 1191
70068602 1192 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1193 continue;
912541b0 1194
7fe3324c 1195 log_device_debug(dev, "Inotify event: %x for %s", e->mask, devnode);
da143134 1196 if (e->mask & IN_CLOSE_WRITE)
edd32000 1197 synthesize_change(dev);
da143134 1198 else if (e->mask & IN_IGNORED)
2024ed61 1199 udev_watch_end(dev);
912541b0
KS
1200 }
1201
e82e8fa5 1202 return 1;
bd284db1
SJR
1203}
1204
0561329d 1205static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1206 Manager *manager = userdata;
1207
1208 assert(manager);
1209
62d43dac 1210 manager_exit(manager);
912541b0 1211
e82e8fa5
TG
1212 return 1;
1213}
912541b0 1214
0561329d 1215static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1216 Manager *manager = userdata;
1217
1218 assert(manager);
1219
62d43dac 1220 manager_reload(manager);
912541b0 1221
e82e8fa5
TG
1222 return 1;
1223}
912541b0 1224
e82e8fa5 1225static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1226 Manager *manager = userdata;
1227
1228 assert(manager);
1229
e82e8fa5
TG
1230 for (;;) {
1231 pid_t pid;
1232 int status;
1233 struct worker *worker;
d1317d02 1234
e82e8fa5
TG
1235 pid = waitpid(-1, &status, WNOHANG);
1236 if (pid <= 0)
f29328d6 1237 break;
e82e8fa5 1238
4a0b58c4 1239 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1240 if (!worker) {
1241 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1242 continue;
912541b0 1243 }
e82e8fa5
TG
1244
1245 if (WIFEXITED(status)) {
1246 if (WEXITSTATUS(status) == 0)
1247 log_debug("worker ["PID_FMT"] exited", pid);
1248 else
1249 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1250 } else if (WIFSIGNALED(status)) {
76341acc 1251 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1252 } else if (WIFSTOPPED(status)) {
1253 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1254 continue;
e82e8fa5
TG
1255 } else if (WIFCONTINUED(status)) {
1256 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1257 continue;
e82e8fa5
TG
1258 } else
1259 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1260
05e6d9c6
YW
1261 if ((!WIFEXITED(status) || WEXITSTATUS(status) != 0) && worker->event) {
1262 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1263 /* delete state from disk */
1264 udev_device_delete_db(worker->event->dev);
1265 udev_device_tag_index(worker->event->dev, NULL, false);
1266 /* forward kernel event without amending it */
1267 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1268 }
1269
1270 worker_free(worker);
912541b0 1271 }
e82e8fa5 1272
8302fe5a
TG
1273 /* we can start new workers, try to schedule events */
1274 event_queue_start(manager);
1275
eca195ec
YW
1276 /* Disable unnecessary cleanup event */
1277 if (hashmap_isempty(manager->workers) && manager->kill_workers_event)
1278 (void) sd_event_source_set_enabled(manager->kill_workers_event, SD_EVENT_OFF);
1279
e82e8fa5 1280 return 1;
f27125f9 1281}
1282
693d371d
TG
1283static int on_post(sd_event_source *s, void *userdata) {
1284 Manager *manager = userdata;
693d371d
TG
1285
1286 assert(manager);
1287
b6107f01
YW
1288 if (!LIST_IS_EMPTY(manager->events))
1289 return 1;
1290
1291 /* There are no pending events. Let's cleanup idle process. */
1292
1293 if (!hashmap_isempty(manager->workers)) {
1294 /* There are idle workers */
6d63048a
YW
1295 (void) event_reset_time(manager->event, &manager->kill_workers_event, CLOCK_MONOTONIC,
1296 now(CLOCK_MONOTONIC) + 3 * USEC_PER_SEC, USEC_PER_SEC,
1297 on_kill_workers_event, manager, 0, "kill-workers-event", false);
b6107f01 1298 return 1;
693d371d
TG
1299 }
1300
b6107f01
YW
1301 /* There are no idle workers. */
1302
1303 if (manager->exit)
1304 return sd_event_exit(manager->event, 0);
1305
1306 if (manager->cgroup)
1307 /* cleanup possible left-over processes in our cgroup */
1308 (void) cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
1309
693d371d
TG
1310 return 1;
1311}
1312
c4b69e99 1313static int listen_fds(int *ret_ctrl, int *ret_netlink) {
fcff1e72 1314 int ctrl_fd = -1, netlink_fd = -1;
c4b69e99 1315 int fd, n;
912541b0 1316
c4b69e99
YW
1317 assert(ret_ctrl);
1318 assert(ret_netlink);
fcff1e72 1319
912541b0 1320 n = sd_listen_fds(true);
fcff1e72
TG
1321 if (n < 0)
1322 return n;
912541b0
KS
1323
1324 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
c52cff07 1325 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1) > 0) {
fcff1e72
TG
1326 if (ctrl_fd >= 0)
1327 return -EINVAL;
1328 ctrl_fd = fd;
912541b0
KS
1329 continue;
1330 }
1331
c52cff07 1332 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
fcff1e72
TG
1333 if (netlink_fd >= 0)
1334 return -EINVAL;
1335 netlink_fd = fd;
912541b0
KS
1336 continue;
1337 }
1338
fcff1e72 1339 return -EINVAL;
912541b0
KS
1340 }
1341
c4b69e99
YW
1342 *ret_ctrl = ctrl_fd;
1343 *ret_netlink = netlink_fd;
912541b0 1344
912541b0 1345 return 0;
7459bcdc
KS
1346}
1347
e6f86cac 1348/*
3f85ef0f 1349 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1350 * udev.log_priority=<level> syslog priority
1351 * udev.children_max=<number of workers> events are fully serialized if set to 1
1352 * udev.exec_delay=<number of seconds> delay execution of every executed program
1353 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1354 */
96287a49 1355static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1356 int r = 0;
e6f86cac 1357
614a823c 1358 assert(key);
e6f86cac 1359
614a823c
TG
1360 if (!value)
1361 return 0;
e6f86cac 1362
1d84ad94
LP
1363 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1364
1365 if (proc_cmdline_value_missing(key, value))
1366 return 0;
1367
46f0fbd8 1368 r = log_level_from_string(value);
92e72467
ZJS
1369 if (r >= 0)
1370 log_set_max_level(r);
1d84ad94
LP
1371
1372 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1373
1374 if (proc_cmdline_value_missing(key, value))
1375 return 0;
1376
9d9264ba 1377 r = parse_sec(value, &arg_event_timeout_usec);
1d84ad94
LP
1378
1379 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1380
1381 if (proc_cmdline_value_missing(key, value))
1382 return 0;
1383
020328e1 1384 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1385
1386 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1387
1388 if (proc_cmdline_value_missing(key, value))
1389 return 0;
1390
6b92f429 1391 r = parse_sec(value, &arg_exec_delay_usec);
1d84ad94
LP
1392
1393 } else if (startswith(key, "udev."))
92e72467 1394 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1395
92e72467
ZJS
1396 if (r < 0)
1397 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1398
614a823c 1399 return 0;
e6f86cac
KS
1400}
1401
37ec0fdd
LP
1402static int help(void) {
1403 _cleanup_free_ char *link = NULL;
1404 int r;
1405
1406 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1407 if (r < 0)
1408 return log_oom();
1409
ed216e1f
TG
1410 printf("%s [OPTIONS...]\n\n"
1411 "Manages devices.\n\n"
5ac0162c 1412 " -h --help Print this message\n"
2d19c17e
MF
1413 " -V --version Print version of the program\n"
1414 " -d --daemon Detach and run in the background\n"
1415 " -D --debug Enable debug output\n"
1416 " -c --children-max=INT Set maximum number of workers\n"
1417 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1418 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1419 " -N --resolve-names=early|late|never\n"
5ac0162c 1420 " When to resolve users and groups\n"
37ec0fdd
LP
1421 "\nSee the %s for details.\n"
1422 , program_invocation_short_name
1423 , link
1424 );
1425
1426 return 0;
ed216e1f
TG
1427}
1428
bba7a484 1429static int parse_argv(int argc, char *argv[]) {
912541b0 1430 static const struct option options[] = {
bba7a484
TG
1431 { "daemon", no_argument, NULL, 'd' },
1432 { "debug", no_argument, NULL, 'D' },
1433 { "children-max", required_argument, NULL, 'c' },
1434 { "exec-delay", required_argument, NULL, 'e' },
1435 { "event-timeout", required_argument, NULL, 't' },
1436 { "resolve-names", required_argument, NULL, 'N' },
1437 { "help", no_argument, NULL, 'h' },
1438 { "version", no_argument, NULL, 'V' },
912541b0
KS
1439 {}
1440 };
689a97f5 1441
bba7a484 1442 int c;
689a97f5 1443
bba7a484
TG
1444 assert(argc >= 0);
1445 assert(argv);
912541b0 1446
e14b6f21 1447 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1448 int r;
912541b0 1449
bba7a484 1450 switch (c) {
912541b0 1451
912541b0 1452 case 'd':
bba7a484 1453 arg_daemonize = true;
912541b0
KS
1454 break;
1455 case 'c':
020328e1 1456 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8 1457 if (r < 0)
389f9bf2 1458 log_warning_errno(r, "Failed to parse --children-max= value '%s', ignoring: %m", optarg);
912541b0
KS
1459 break;
1460 case 'e':
6b92f429 1461 r = parse_sec(optarg, &arg_exec_delay_usec);
6f5cf8a8 1462 if (r < 0)
6b92f429 1463 log_warning_errno(r, "Failed to parse --exec-delay= value '%s', ignoring: %m", optarg);
912541b0 1464 break;
9719859c 1465 case 't':
9d9264ba 1466 r = parse_sec(optarg, &arg_event_timeout_usec);
f1e8664e 1467 if (r < 0)
9d9264ba 1468 log_warning_errno(r, "Failed to parse --event-timeout= value '%s', ignoring: %m", optarg);
9719859c 1469 break;
912541b0 1470 case 'D':
bba7a484 1471 arg_debug = true;
912541b0 1472 break;
c4d44cba
YW
1473 case 'N': {
1474 ResolveNameTiming t;
1475
1476 t = resolve_name_timing_from_string(optarg);
1477 if (t < 0)
1478 log_warning("Invalid --resolve-names= value '%s', ignoring.", optarg);
1479 else
1480 arg_resolve_name_timing = t;
912541b0 1481 break;
c4d44cba 1482 }
912541b0 1483 case 'h':
37ec0fdd 1484 return help();
912541b0 1485 case 'V':
948aaa7c 1486 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1487 return 0;
1488 case '?':
1489 return -EINVAL;
912541b0 1490 default:
bba7a484
TG
1491 assert_not_reached("Unhandled option");
1492
912541b0
KS
1493 }
1494 }
1495
bba7a484
TG
1496 return 1;
1497}
1498
b7f74dd4 1499static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1500 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1501 int r, fd_worker;
c0c6806b
TG
1502
1503 assert(ret);
1504
6f19b42f 1505 manager = new(Manager, 1);
c0c6806b
TG
1506 if (!manager)
1507 return log_oom();
1508
6f19b42f
YW
1509 *manager = (Manager) {
1510 .fd_inotify = -1,
1511 .worker_watch = { -1, -1 },
1512 .cgroup = cgroup,
1513 };
e237d8cb 1514
2024ed61 1515 udev_builtin_init();
b2d21d93 1516
c4d44cba 1517 manager->rules = udev_rules_new(arg_resolve_name_timing);
ecb17862
TG
1518 if (!manager->rules)
1519 return log_error_errno(ENOMEM, "error reading rules");
1520
2024ed61 1521 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1522 if (!manager->ctrl)
1523 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1524
c4b69e99
YW
1525 r = udev_ctrl_enable_receiving(manager->ctrl);
1526 if (r < 0)
1527 return log_error_errno(r, "Failed to bind udev control socket: %m");
1528
1529 fd_ctrl = udev_ctrl_get_fd(manager->ctrl);
1530 if (fd_ctrl < 0)
1531 return log_error_errno(fd_ctrl, "Failed to get udev control fd: %m");
1532
2024ed61 1533 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1534 if (!manager->monitor)
1535 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb 1536
c4b69e99
YW
1537 (void) udev_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
1538
1539 r = udev_monitor_enable_receiving(manager->monitor);
1540 if (r < 0)
1541 return log_error_errno(r, "Failed to bind netlink socket; %m");
1542
1543 fd_uevent = udev_monitor_get_fd(manager->monitor);
1544 if (fd_uevent < 0)
1545 return log_error_errno(fd_uevent, "Failed to get uevent fd: %m");
1546
e237d8cb
TG
1547 /* unnamed socket from workers to the main daemon */
1548 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1549 if (r < 0)
1550 return log_error_errno(errno, "error creating socketpair: %m");
1551
693d371d 1552 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1553
2ff48e98 1554 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1555 if (r < 0)
2ff48e98 1556 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1557
b7759e04
YW
1558 r = udev_watch_init();
1559 if (r < 0)
1560 return log_error_errno(r, "Failed to create inotify descriptor: %m");
1561 manager->fd_inotify = r;
e237d8cb 1562
2024ed61 1563 udev_watch_restore();
e237d8cb
TG
1564
1565 /* block and listen to all signals on signalfd */
72c0a2c2 1566 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1567
49f997f3
TG
1568 r = sd_event_default(&manager->event);
1569 if (r < 0)
709f6e46 1570 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1571
693d371d
TG
1572 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1573 if (r < 0)
1574 return log_error_errno(r, "error creating sigint event source: %m");
1575
1576 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1577 if (r < 0)
1578 return log_error_errno(r, "error creating sigterm event source: %m");
1579
1580 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1581 if (r < 0)
1582 return log_error_errno(r, "error creating sighup event source: %m");
1583
1584 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1585 if (r < 0)
1586 return log_error_errno(r, "error creating sigchld event source: %m");
1587
1588 r = sd_event_set_watchdog(manager->event, true);
1589 if (r < 0)
1590 return log_error_errno(r, "error creating watchdog event source: %m");
1591
11b1dd8c 1592 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1593 if (r < 0)
1594 return log_error_errno(r, "error creating ctrl event source: %m");
1595
1596 /* This needs to be after the inotify and uevent handling, to make sure
1597 * that the ping is send back after fully processing the pending uevents
1598 * (including the synthetic ones we may create due to inotify events).
1599 */
1600 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1601 if (r < 0)
1602 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1603
1604 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1605 if (r < 0)
1606 return log_error_errno(r, "error creating inotify event source: %m");
1607
11b1dd8c 1608 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1609 if (r < 0)
1610 return log_error_errno(r, "error creating uevent event source: %m");
1611
1612 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1613 if (r < 0)
1614 return log_error_errno(r, "error creating worker event source: %m");
1615
1616 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1617 if (r < 0)
1618 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1619
1cc6c93a 1620 *ret = TAKE_PTR(manager);
11b1dd8c 1621
86c3bece 1622 return 0;
c0c6806b
TG
1623}
1624
077fc5e2 1625static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1626 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1627 int r;
1628
1629 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1630 if (r < 0) {
1631 r = log_error_errno(r, "failed to allocate manager object: %m");
1632 goto exit;
1633 }
1634
1635 r = udev_rules_apply_static_dev_perms(manager->rules);
1636 if (r < 0)
1637 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1638
1ef72b55
MS
1639 (void) sd_notifyf(false,
1640 "READY=1\n"
1641 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1642
1643 r = sd_event_loop(manager->event);
1644 if (r < 0) {
1645 log_error_errno(r, "event loop failed: %m");
1646 goto exit;
1647 }
1648
1649 sd_event_get_exit_code(manager->event, &r);
1650
1651exit:
1652 sd_notify(false,
1653 "STOPPING=1\n"
1654 "STATUS=Shutting down...");
1655 if (manager)
1656 udev_ctrl_cleanup(manager->ctrl);
1657 return r;
1658}
1659
1660int main(int argc, char *argv[]) {
c26d1879 1661 _cleanup_free_ char *cgroup = NULL;
efa1606e 1662 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1663 int r;
bba7a484 1664
bba7a484 1665 log_set_target(LOG_TARGET_AUTO);
a14e7af1 1666 udev_parse_config_full(&arg_children_max, &arg_exec_delay_usec, &arg_event_timeout_usec, &arg_resolve_name_timing);
bba7a484
TG
1667 log_parse_environment();
1668 log_open();
1669
bba7a484
TG
1670 r = parse_argv(argc, argv);
1671 if (r <= 0)
1672 goto exit;
1673
1d84ad94 1674 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1675 if (r < 0)
1676 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1677
78d3e041
KS
1678 if (arg_debug) {
1679 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1680 log_set_max_level(LOG_DEBUG);
78d3e041 1681 }
bba7a484 1682
6174a243
YW
1683 log_set_max_level_realm(LOG_REALM_SYSTEMD, log_get_max_level());
1684
fba868fa
LP
1685 r = must_be_root();
1686 if (r < 0)
912541b0 1687 goto exit;
912541b0 1688
712cebf1
TG
1689 if (arg_children_max == 0) {
1690 cpu_set_t cpu_set;
e438c57a 1691 unsigned long mem_limit;
ebc164ef 1692
712cebf1 1693 arg_children_max = 8;
d457ff83 1694
ece174c5 1695 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
faae64fa 1696 arg_children_max += CPU_COUNT(&cpu_set) * 8;
912541b0 1697
e438c57a
MW
1698 mem_limit = physical_memory() / (128LU*1024*1024);
1699 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1700
712cebf1 1701 log_debug("set children_max to %u", arg_children_max);
d457ff83 1702 }
912541b0 1703
712cebf1
TG
1704 /* set umask before creating any file/directory */
1705 r = chdir("/");
1706 if (r < 0) {
1707 r = log_error_errno(errno, "could not change dir to /: %m");
1708 goto exit;
1709 }
194bbe33 1710
712cebf1 1711 umask(022);
912541b0 1712
c3dacc8b 1713 r = mac_selinux_init();
712cebf1
TG
1714 if (r < 0) {
1715 log_error_errno(r, "could not initialize labelling: %m");
1716 goto exit;
912541b0
KS
1717 }
1718
dae8b82e
ZJS
1719 r = mkdir_errno_wrapper("/run/udev", 0755);
1720 if (r < 0 && r != -EEXIST) {
1721 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1722 goto exit;
1723 }
1724
03cfe0d5 1725 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1726
c26d1879
TG
1727 if (getppid() == 1) {
1728 /* get our own cgroup, we regularly kill everything udev has left behind
1729 we only do this on systemd systems, and only if we are directly spawned
1730 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1731 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1732 if (r < 0) {
a2d61f07 1733 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1734 log_debug_errno(r, "did not find dedicated cgroup: %m");
1735 else
1736 log_warning_errno(r, "failed to get cgroup: %m");
1737 }
c26d1879
TG
1738 }
1739
b7f74dd4
TG
1740 r = listen_fds(&fd_ctrl, &fd_uevent);
1741 if (r < 0) {
1742 r = log_error_errno(r, "could not listen on fds: %m");
1743 goto exit;
1744 }
1745
bba7a484 1746 if (arg_daemonize) {
912541b0 1747 pid_t pid;
912541b0 1748
948aaa7c 1749 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1750
40e749b5 1751 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1752 if (log_get_max_level() < LOG_DEBUG) {
1753 r = make_null_stdio();
1754 if (r < 0)
1755 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1756 }
1757
912541b0
KS
1758 pid = fork();
1759 switch (pid) {
1760 case 0:
1761 break;
1762 case -1:
6af5e6a4 1763 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1764 goto exit;
1765 default:
f53d1fcd
TG
1766 mac_selinux_finish();
1767 log_close();
1768 _exit(EXIT_SUCCESS);
912541b0
KS
1769 }
1770
1771 setsid();
1772
76cdddfb
YW
1773 r = set_oom_score_adjust(-1000);
1774 if (r < 0)
1775 log_debug_errno(r, "Failed to adjust OOM score, ignoring: %m");
7500cd5e 1776 }
912541b0 1777
077fc5e2 1778 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1779
53921bfa 1780exit:
cc56fafe 1781 mac_selinux_finish();
baa30fbc 1782 log_close();
6af5e6a4 1783 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1784}