]> git.ipfire.org Git - thirdparty/systemd.git/blame - src/udev/udevd.c
test: check returned value of sd_device_get_usec_since_initialized()
[thirdparty/systemd.git] / src / udev / udevd.c
CommitLineData
e7145211 1/* SPDX-License-Identifier: GPL-2.0+ */
7fafc032 2/*
810adae9
LP
3 * Copyright © 2004 Chris Friesen <chris_friesen@sympatico.ca>
4 * Copyright © 2009 Canonical Ltd.
5 * Copyright © 2009 Scott James Remnant <scott@netsplit.com>
7fafc032
KS
6 */
7
7fafc032 8#include <errno.h>
618234a5
LP
9#include <fcntl.h>
10#include <getopt.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stddef.h>
7fafc032
KS
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
618234a5 17#include <sys/epoll.h>
3ebdb81e 18#include <sys/file.h>
618234a5
LP
19#include <sys/inotify.h>
20#include <sys/ioctl.h>
21#include <sys/mount.h>
1e03b754 22#include <sys/prctl.h>
1e03b754 23#include <sys/signalfd.h>
618234a5 24#include <sys/socket.h>
dc117daa 25#include <sys/stat.h>
618234a5
LP
26#include <sys/time.h>
27#include <sys/wait.h>
28#include <unistd.h>
7fafc032 29
392ef7a2 30#include "sd-daemon.h"
693d371d 31#include "sd-event.h"
8314de1d 32
b5efdb8a 33#include "alloc-util.h"
194bbe33 34#include "cgroup-util.h"
618234a5 35#include "cpu-set-util.h"
5ba2dc25 36#include "dev-setup.h"
70068602 37#include "device-util.h"
3ffd4af2 38#include "fd-util.h"
a5c32cff 39#include "fileio.h"
f97b34a6 40#include "format-util.h"
f4f15635 41#include "fs-util.h"
a505965d 42#include "hashmap.h"
c004493c 43#include "io-util.h"
70068602 44#include "libudev-device-internal.h"
40a57716 45#include "list.h"
618234a5 46#include "netlink-util.h"
6bedfcbb 47#include "parse-util.h"
4e731273 48#include "proc-cmdline.h"
618234a5
LP
49#include "process-util.h"
50#include "selinux-util.h"
51#include "signal-util.h"
8f328d36 52#include "socket-util.h"
07630cea 53#include "string-util.h"
618234a5 54#include "terminal-util.h"
07a26e42 55#include "udev-builtin.h"
7d68eb1b 56#include "udev-ctrl.h"
618234a5 57#include "udev-util.h"
70068602 58#include "udev-watch.h"
618234a5 59#include "udev.h"
ee104e11 60#include "user-util.h"
7fafc032 61
bba7a484
TG
62static bool arg_debug = false;
63static int arg_daemonize = false;
64static int arg_resolve_names = 1;
020328e1 65static unsigned arg_children_max;
bba7a484
TG
66static int arg_exec_delay;
67static usec_t arg_event_timeout_usec = 180 * USEC_PER_SEC;
68static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
c0c6806b
TG
69
70typedef struct Manager {
693d371d 71 sd_event *event;
c0c6806b 72 Hashmap *workers;
40a57716 73 LIST_HEAD(struct event, events);
c26d1879 74 const char *cgroup;
cb49a4f2 75 pid_t pid; /* the process that originally allocated the manager object */
c0c6806b 76
ecb17862 77 struct udev_rules *rules;
9b5150b6 78 Hashmap *properties;
c0c6806b
TG
79
80 struct udev_monitor *monitor;
81 struct udev_ctrl *ctrl;
82 struct udev_ctrl_connection *ctrl_conn_blocking;
e237d8cb 83 int fd_inotify;
e237d8cb
TG
84 int worker_watch[2];
85
693d371d
TG
86 sd_event_source *ctrl_event;
87 sd_event_source *uevent_event;
88 sd_event_source *inotify_event;
89
7c4c7e89
TG
90 usec_t last_usec;
91
c0c6806b 92 bool stop_exec_queue:1;
c0c6806b
TG
93 bool exit:1;
94} Manager;
1e03b754 95
1e03b754 96enum event_state {
912541b0
KS
97 EVENT_UNDEF,
98 EVENT_QUEUED,
99 EVENT_RUNNING,
1e03b754
KS
100};
101
102struct event {
40a57716 103 LIST_FIELDS(struct event, event);
cb49a4f2 104 Manager *manager;
912541b0 105 struct udev_device *dev;
6969c349 106 struct udev_device *dev_kernel;
c6aa11f2 107 struct worker *worker;
912541b0 108 enum event_state state;
912541b0
KS
109 unsigned long long int delaying_seqnum;
110 unsigned long long int seqnum;
111 const char *devpath;
112 size_t devpath_len;
113 const char *devpath_old;
114 dev_t devnum;
912541b0 115 int ifindex;
ea6039a3 116 bool is_block;
693d371d
TG
117 sd_event_source *timeout_warning;
118 sd_event_source *timeout;
1e03b754
KS
119};
120
ecb17862 121static void event_queue_cleanup(Manager *manager, enum event_state type);
ff2c503d 122
1e03b754 123enum worker_state {
912541b0
KS
124 WORKER_UNDEF,
125 WORKER_RUNNING,
126 WORKER_IDLE,
127 WORKER_KILLED,
1e03b754
KS
128};
129
130struct worker {
c0c6806b 131 Manager *manager;
912541b0
KS
132 pid_t pid;
133 struct udev_monitor *monitor;
134 enum worker_state state;
135 struct event *event;
1e03b754
KS
136};
137
138/* passed from worker to main process */
139struct worker_message {
1e03b754
KS
140};
141
c6aa11f2 142static void event_free(struct event *event) {
cb49a4f2
TG
143 int r;
144
c6aa11f2
TG
145 if (!event)
146 return;
40a57716 147 assert(event->manager);
c6aa11f2 148
40a57716 149 LIST_REMOVE(event, event->manager->events, event);
912541b0 150 udev_device_unref(event->dev);
6969c349 151 udev_device_unref(event->dev_kernel);
c6aa11f2 152
693d371d
TG
153 sd_event_source_unref(event->timeout_warning);
154 sd_event_source_unref(event->timeout);
155
c6aa11f2
TG
156 if (event->worker)
157 event->worker->event = NULL;
158
40a57716 159 if (LIST_IS_EMPTY(event->manager->events)) {
cb49a4f2 160 /* only clean up the queue from the process that created it */
df0ff127 161 if (event->manager->pid == getpid_cached()) {
cb49a4f2
TG
162 r = unlink("/run/udev/queue");
163 if (r < 0)
164 log_warning_errno(errno, "could not unlink /run/udev/queue: %m");
165 }
166 }
167
912541b0 168 free(event);
aa8734ff 169}
7a770250 170
c6aa11f2
TG
171static void worker_free(struct worker *worker) {
172 if (!worker)
173 return;
bc113de9 174
c0c6806b
TG
175 assert(worker->manager);
176
4a0b58c4 177 hashmap_remove(worker->manager->workers, PID_TO_PTR(worker->pid));
912541b0 178 udev_monitor_unref(worker->monitor);
c6aa11f2
TG
179 event_free(worker->event);
180
c6aa11f2 181 free(worker);
ff2c503d
KS
182}
183
c0c6806b 184static void manager_workers_free(Manager *manager) {
a505965d
TG
185 struct worker *worker;
186 Iterator i;
ff2c503d 187
c0c6806b
TG
188 assert(manager);
189
190 HASHMAP_FOREACH(worker, manager->workers, i)
c6aa11f2 191 worker_free(worker);
a505965d 192
c0c6806b 193 manager->workers = hashmap_free(manager->workers);
fc465079
KS
194}
195
c0c6806b 196static int worker_new(struct worker **ret, Manager *manager, struct udev_monitor *worker_monitor, pid_t pid) {
a505965d
TG
197 _cleanup_free_ struct worker *worker = NULL;
198 int r;
3a19b32a
TG
199
200 assert(ret);
c0c6806b 201 assert(manager);
3a19b32a
TG
202 assert(worker_monitor);
203 assert(pid > 1);
204
205 worker = new0(struct worker, 1);
206 if (!worker)
207 return -ENOMEM;
208
c0c6806b 209 worker->manager = manager;
3a19b32a
TG
210 /* close monitor, but keep address around */
211 udev_monitor_disconnect(worker_monitor);
212 worker->monitor = udev_monitor_ref(worker_monitor);
213 worker->pid = pid;
a505965d 214
c0c6806b 215 r = hashmap_ensure_allocated(&manager->workers, NULL);
a505965d
TG
216 if (r < 0)
217 return r;
218
4a0b58c4 219 r = hashmap_put(manager->workers, PID_TO_PTR(pid), worker);
a505965d
TG
220 if (r < 0)
221 return r;
222
ae2a15bc 223 *ret = TAKE_PTR(worker);
3a19b32a
TG
224
225 return 0;
226}
227
4fa4d885
TG
228static int on_event_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
229 struct event *event = userdata;
230
231 assert(event);
232 assert(event->worker);
233
234 kill_and_sigcont(event->worker->pid, SIGKILL);
235 event->worker->state = WORKER_KILLED;
236
237 log_error("seq %llu '%s' killed", udev_device_get_seqnum(event->dev), event->devpath);
238
239 return 1;
240}
241
242static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *userdata) {
243 struct event *event = userdata;
244
245 assert(event);
246
247 log_warning("seq %llu '%s' is taking a long time", udev_device_get_seqnum(event->dev), event->devpath);
248
249 return 1;
250}
251
39c19cf1 252static void worker_attach_event(struct worker *worker, struct event *event) {
693d371d
TG
253 sd_event *e;
254 uint64_t usec;
693d371d 255
c6aa11f2 256 assert(worker);
693d371d 257 assert(worker->manager);
c6aa11f2
TG
258 assert(event);
259 assert(!event->worker);
260 assert(!worker->event);
261
39c19cf1 262 worker->state = WORKER_RUNNING;
39c19cf1
TG
263 worker->event = event;
264 event->state = EVENT_RUNNING;
c6aa11f2 265 event->worker = worker;
693d371d
TG
266
267 e = worker->manager->event;
268
3285baa8 269 assert_se(sd_event_now(e, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 270
3285baa8 271 (void) sd_event_add_time(e, &event->timeout_warning, CLOCK_MONOTONIC,
693d371d
TG
272 usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
273
3285baa8 274 (void) sd_event_add_time(e, &event->timeout, CLOCK_MONOTONIC,
693d371d 275 usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
39c19cf1
TG
276}
277
e237d8cb
TG
278static void manager_free(Manager *manager) {
279 if (!manager)
280 return;
281
2024ed61 282 udev_builtin_exit();
b2d21d93 283
693d371d
TG
284 sd_event_source_unref(manager->ctrl_event);
285 sd_event_source_unref(manager->uevent_event);
286 sd_event_source_unref(manager->inotify_event);
287
693d371d 288 sd_event_unref(manager->event);
e237d8cb
TG
289 manager_workers_free(manager);
290 event_queue_cleanup(manager, EVENT_UNDEF);
291
292 udev_monitor_unref(manager->monitor);
293 udev_ctrl_unref(manager->ctrl);
294 udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
295
9b5150b6 296 hashmap_free_free_free(manager->properties);
e237d8cb 297 udev_rules_unref(manager->rules);
e237d8cb 298
e237d8cb
TG
299 safe_close(manager->fd_inotify);
300 safe_close_pair(manager->worker_watch);
301
302 free(manager);
303}
304
305DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
306
9a73bd7c
TG
307static int worker_send_message(int fd) {
308 struct worker_message message = {};
309
310 return loop_write(fd, &message, sizeof(message), false);
311}
312
fee854ee
RK
313static bool shall_lock_device(struct udev_device *dev) {
314 const char *sysname;
315
316 if (!streq_ptr("block", udev_device_get_subsystem(dev)))
317 return false;
318
319 sysname = udev_device_get_sysname(dev);
320 return !startswith(sysname, "dm-") &&
321 !startswith(sysname, "md") &&
322 !startswith(sysname, "drbd");
323}
324
c0c6806b 325static void worker_spawn(Manager *manager, struct event *event) {
8e766630 326 _cleanup_(udev_monitor_unrefp) struct udev_monitor *worker_monitor = NULL;
912541b0 327 pid_t pid;
b6aab8ef 328 int r = 0;
912541b0
KS
329
330 /* listen for new events */
2024ed61 331 worker_monitor = udev_monitor_new_from_netlink(NULL, NULL);
912541b0
KS
332 if (worker_monitor == NULL)
333 return;
334 /* allow the main daemon netlink address to send devices to the worker */
c0c6806b 335 udev_monitor_allow_unicast_sender(worker_monitor, manager->monitor);
b6aab8ef
TG
336 r = udev_monitor_enable_receiving(worker_monitor);
337 if (r < 0)
338 log_error_errno(r, "worker: could not enable receiving of device: %m");
912541b0 339
912541b0
KS
340 pid = fork();
341 switch (pid) {
342 case 0: {
343 struct udev_device *dev = NULL;
4afd3348 344 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
912541b0 345 int fd_monitor;
e237d8cb 346 _cleanup_close_ int fd_signal = -1, fd_ep = -1;
2dd9f98d
TG
347 struct epoll_event ep_signal = { .events = EPOLLIN };
348 struct epoll_event ep_monitor = { .events = EPOLLIN };
912541b0 349 sigset_t mask;
912541b0 350
43095991 351 /* take initial device from queue */
1cc6c93a 352 dev = TAKE_PTR(event->dev);
912541b0 353
39fd2ca1
TG
354 unsetenv("NOTIFY_SOCKET");
355
c0c6806b 356 manager_workers_free(manager);
ecb17862 357 event_queue_cleanup(manager, EVENT_UNDEF);
6d1b1e0b 358
e237d8cb 359 manager->monitor = udev_monitor_unref(manager->monitor);
6d1b1e0b 360 manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
e237d8cb 361 manager->ctrl = udev_ctrl_unref(manager->ctrl);
e237d8cb 362 manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
912541b0 363
693d371d
TG
364 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
365 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
366 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
367
368 manager->event = sd_event_unref(manager->event);
369
912541b0
KS
370 sigfillset(&mask);
371 fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
372 if (fd_signal < 0) {
6af5e6a4 373 r = log_error_errno(errno, "error creating signalfd %m");
912541b0
KS
374 goto out;
375 }
2dd9f98d
TG
376 ep_signal.data.fd = fd_signal;
377
378 fd_monitor = udev_monitor_get_fd(worker_monitor);
379 ep_monitor.data.fd = fd_monitor;
912541b0
KS
380
381 fd_ep = epoll_create1(EPOLL_CLOEXEC);
382 if (fd_ep < 0) {
6af5e6a4 383 r = log_error_errno(errno, "error creating epoll fd: %m");
912541b0
KS
384 goto out;
385 }
386
912541b0
KS
387 if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
388 epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor) < 0) {
6af5e6a4 389 r = log_error_errno(errno, "fail to add fds to epoll: %m");
912541b0
KS
390 goto out;
391 }
392
045e00cf
ZJS
393 /* Request TERM signal if parent exits.
394 Ignore error, not much we can do in that case. */
395 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
912541b0 396
045e00cf 397 /* Reset OOM score, we only protect the main daemon. */
ad118bda 398 write_string_file("/proc/self/oom_score_adj", "0", 0);
145dae7e 399
912541b0 400 for (;;) {
c1118ceb 401 _cleanup_(udev_event_freep) struct udev_event *udev_event = NULL;
6af5e6a4 402 int fd_lock = -1;
912541b0 403
3b64e4d4
TG
404 assert(dev);
405
9f6445e3 406 log_debug("seq %llu running", udev_device_get_seqnum(dev));
912541b0
KS
407 udev_event = udev_event_new(dev);
408 if (udev_event == NULL) {
6af5e6a4 409 r = -ENOMEM;
912541b0
KS
410 goto out;
411 }
412
bba7a484
TG
413 if (arg_exec_delay > 0)
414 udev_event->exec_delay = arg_exec_delay;
912541b0 415
3ebdb81e 416 /*
2e5b17d0 417 * Take a shared lock on the device node; this establishes
3ebdb81e 418 * a concept of device "ownership" to serialize device
2e5b17d0 419 * access. External processes holding an exclusive lock will
3ebdb81e 420 * cause udev to skip the event handling; in the case udev
2e5b17d0 421 * acquired the lock, the external process can block until
3ebdb81e
KS
422 * udev has finished its event handling.
423 */
2e5b17d0 424 if (!streq_ptr(udev_device_get_action(dev), "remove") &&
fee854ee 425 shall_lock_device(dev)) {
3ebdb81e
KS
426 struct udev_device *d = dev;
427
428 if (streq_ptr("partition", udev_device_get_devtype(d)))
429 d = udev_device_get_parent(d);
430
431 if (d) {
432 fd_lock = open(udev_device_get_devnode(d), O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
433 if (fd_lock >= 0 && flock(fd_lock, LOCK_SH|LOCK_NB) < 0) {
56f64d95 434 log_debug_errno(errno, "Unable to flock(%s), skipping event handling: %m", udev_device_get_devnode(d));
3d06f418 435 fd_lock = safe_close(fd_lock);
3ebdb81e
KS
436 goto skip;
437 }
438 }
439 }
440
4c83d994
TG
441 /* needed for renaming netifs */
442 udev_event->rtnl = rtnl;
443
912541b0 444 /* apply rules, create node, symlinks */
adeba500
KS
445 udev_event_execute_rules(udev_event,
446 arg_event_timeout_usec, arg_event_timeout_warn_usec,
9b5150b6 447 manager->properties,
8314de1d 448 manager->rules);
adeba500
KS
449
450 udev_event_execute_run(udev_event,
8314de1d 451 arg_event_timeout_usec, arg_event_timeout_warn_usec);
912541b0 452
523c620b
TG
453 if (udev_event->rtnl)
454 /* in case rtnl was initialized */
1c4baffc 455 rtnl = sd_netlink_ref(udev_event->rtnl);
4c83d994 456
912541b0 457 /* apply/restore inotify watch */
bf9bead1 458 if (udev_event->inotify_watch) {
70068602 459 udev_watch_begin(dev->device);
912541b0
KS
460 udev_device_update_db(dev);
461 }
462
3d06f418 463 safe_close(fd_lock);
3ebdb81e 464
912541b0
KS
465 /* send processed event back to libudev listeners */
466 udev_monitor_send_device(worker_monitor, NULL, dev);
467
3ebdb81e 468skip:
4914cb2d 469 log_debug("seq %llu processed", udev_device_get_seqnum(dev));
b66f29a1 470
912541b0 471 /* send udevd the result of the event execution */
e237d8cb 472 r = worker_send_message(manager->worker_watch[WRITE_END]);
b66f29a1 473 if (r < 0)
9a73bd7c 474 log_error_errno(r, "failed to send result of seq %llu to main daemon: %m",
b66f29a1 475 udev_device_get_seqnum(dev));
912541b0
KS
476
477 udev_device_unref(dev);
478 dev = NULL;
479
912541b0
KS
480 /* wait for more device messages from main udevd, or term signal */
481 while (dev == NULL) {
482 struct epoll_event ev[4];
483 int fdcount;
484 int i;
485
8fef0ff2 486 fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
912541b0
KS
487 if (fdcount < 0) {
488 if (errno == EINTR)
489 continue;
6af5e6a4 490 r = log_error_errno(errno, "failed to poll: %m");
912541b0
KS
491 goto out;
492 }
493
494 for (i = 0; i < fdcount; i++) {
495 if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
496 dev = udev_monitor_receive_device(worker_monitor);
497 break;
498 } else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
499 struct signalfd_siginfo fdsi;
500 ssize_t size;
501
502 size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
503 if (size != sizeof(struct signalfd_siginfo))
504 continue;
505 switch (fdsi.ssi_signo) {
506 case SIGTERM:
507 goto out;
508 }
509 }
510 }
511 }
512 }
82063a88 513out:
912541b0 514 udev_device_unref(dev);
e237d8cb 515 manager_free(manager);
baa30fbc 516 log_close();
8b46c3fc 517 _exit(r < 0 ? EXIT_FAILURE : EXIT_SUCCESS);
912541b0
KS
518 }
519 case -1:
912541b0 520 event->state = EVENT_QUEUED;
56f64d95 521 log_error_errno(errno, "fork of child failed: %m");
912541b0
KS
522 break;
523 default:
e03c7cc2
TG
524 {
525 struct worker *worker;
526
c0c6806b 527 r = worker_new(&worker, manager, worker_monitor, pid);
3a19b32a 528 if (r < 0)
e03c7cc2 529 return;
e03c7cc2 530
39c19cf1
TG
531 worker_attach_event(worker, event);
532
1fa2f38f 533 log_debug("seq %llu forked new worker ["PID_FMT"]", udev_device_get_seqnum(event->dev), pid);
912541b0
KS
534 break;
535 }
e03c7cc2 536 }
7fafc032
KS
537}
538
c0c6806b 539static void event_run(Manager *manager, struct event *event) {
a505965d
TG
540 struct worker *worker;
541 Iterator i;
912541b0 542
c0c6806b
TG
543 assert(manager);
544 assert(event);
545
546 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
547 ssize_t count;
548
549 if (worker->state != WORKER_IDLE)
550 continue;
551
c0c6806b 552 count = udev_monitor_send_device(manager->monitor, worker->monitor, event->dev);
912541b0 553 if (count < 0) {
1fa2f38f
ZJS
554 log_error_errno(errno, "worker ["PID_FMT"] did not accept message %zi (%m), kill it",
555 worker->pid, count);
cb542e84 556 (void) kill(worker->pid, SIGKILL);
912541b0
KS
557 worker->state = WORKER_KILLED;
558 continue;
559 }
39c19cf1 560 worker_attach_event(worker, event);
912541b0
KS
561 return;
562 }
563
c0c6806b 564 if (hashmap_size(manager->workers) >= arg_children_max) {
bba7a484 565 if (arg_children_max > 1)
c0c6806b 566 log_debug("maximum number (%i) of children reached", hashmap_size(manager->workers));
912541b0
KS
567 return;
568 }
569
570 /* start new worker and pass initial device */
c0c6806b 571 worker_spawn(manager, event);
1e03b754
KS
572}
573
ecb17862 574static int event_queue_insert(Manager *manager, struct udev_device *dev) {
912541b0 575 struct event *event;
cb49a4f2 576 int r;
912541b0 577
ecb17862
TG
578 assert(manager);
579 assert(dev);
580
040e6896
TG
581 /* only one process can add events to the queue */
582 if (manager->pid == 0)
df0ff127 583 manager->pid = getpid_cached();
040e6896 584
df0ff127 585 assert(manager->pid == getpid_cached());
cb49a4f2 586
955d98c9 587 event = new0(struct event, 1);
cb49a4f2
TG
588 if (!event)
589 return -ENOMEM;
912541b0 590
cb49a4f2 591 event->manager = manager;
912541b0 592 event->dev = dev;
6969c349
TG
593 event->dev_kernel = udev_device_shallow_clone(dev);
594 udev_device_copy_properties(event->dev_kernel, dev);
912541b0
KS
595 event->seqnum = udev_device_get_seqnum(dev);
596 event->devpath = udev_device_get_devpath(dev);
597 event->devpath_len = strlen(event->devpath);
598 event->devpath_old = udev_device_get_devpath_old(dev);
599 event->devnum = udev_device_get_devnum(dev);
ea6039a3 600 event->is_block = streq("block", udev_device_get_subsystem(dev));
912541b0
KS
601 event->ifindex = udev_device_get_ifindex(dev);
602
9f6445e3 603 log_debug("seq %llu queued, '%s' '%s'", udev_device_get_seqnum(dev),
912541b0
KS
604 udev_device_get_action(dev), udev_device_get_subsystem(dev));
605
606 event->state = EVENT_QUEUED;
cb49a4f2 607
40a57716 608 if (LIST_IS_EMPTY(manager->events)) {
cb49a4f2
TG
609 r = touch("/run/udev/queue");
610 if (r < 0)
611 log_warning_errno(r, "could not touch /run/udev/queue: %m");
612 }
613
40a57716 614 LIST_APPEND(event, manager->events, event);
cb49a4f2 615
912541b0 616 return 0;
fc465079
KS
617}
618
c0c6806b 619static void manager_kill_workers(Manager *manager) {
a505965d
TG
620 struct worker *worker;
621 Iterator i;
1e03b754 622
c0c6806b
TG
623 assert(manager);
624
625 HASHMAP_FOREACH(worker, manager->workers, i) {
912541b0
KS
626 if (worker->state == WORKER_KILLED)
627 continue;
1e03b754 628
912541b0 629 worker->state = WORKER_KILLED;
cb542e84 630 (void) kill(worker->pid, SIGTERM);
912541b0 631 }
1e03b754
KS
632}
633
e3196993 634/* lookup event for identical, parent, child device */
ecb17862 635static bool is_devpath_busy(Manager *manager, struct event *event) {
40a57716 636 struct event *loop_event;
912541b0
KS
637 size_t common;
638
639 /* check if queue contains events we depend on */
40a57716 640 LIST_FOREACH(event, loop_event, manager->events) {
87ac8d99 641 /* we already found a later event, earlier cannot block us, no need to check again */
912541b0
KS
642 if (loop_event->seqnum < event->delaying_seqnum)
643 continue;
644
645 /* event we checked earlier still exists, no need to check again */
646 if (loop_event->seqnum == event->delaying_seqnum)
647 return true;
648
649 /* found ourself, no later event can block us */
650 if (loop_event->seqnum >= event->seqnum)
651 break;
652
653 /* check major/minor */
654 if (major(event->devnum) != 0 && event->devnum == loop_event->devnum && event->is_block == loop_event->is_block)
655 return true;
656
657 /* check network device ifindex */
658 if (event->ifindex != 0 && event->ifindex == loop_event->ifindex)
659 return true;
660
661 /* check our old name */
090be865 662 if (event->devpath_old != NULL && streq(loop_event->devpath, event->devpath_old)) {
912541b0
KS
663 event->delaying_seqnum = loop_event->seqnum;
664 return true;
665 }
666
667 /* compare devpath */
668 common = MIN(loop_event->devpath_len, event->devpath_len);
669
670 /* one devpath is contained in the other? */
671 if (memcmp(loop_event->devpath, event->devpath, common) != 0)
672 continue;
673
674 /* identical device event found */
675 if (loop_event->devpath_len == event->devpath_len) {
676 /* devices names might have changed/swapped in the meantime */
677 if (major(event->devnum) != 0 && (event->devnum != loop_event->devnum || event->is_block != loop_event->is_block))
678 continue;
679 if (event->ifindex != 0 && event->ifindex != loop_event->ifindex)
680 continue;
681 event->delaying_seqnum = loop_event->seqnum;
682 return true;
683 }
684
685 /* parent device event found */
686 if (event->devpath[common] == '/') {
687 event->delaying_seqnum = loop_event->seqnum;
688 return true;
689 }
690
691 /* child device event found */
692 if (loop_event->devpath[common] == '/') {
693 event->delaying_seqnum = loop_event->seqnum;
694 return true;
695 }
696
697 /* no matching device */
698 continue;
699 }
700
701 return false;
7fafc032
KS
702}
703
693d371d
TG
704static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
705 Manager *manager = userdata;
706
707 assert(manager);
708
709 log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
710
711 sd_event_exit(manager->event, -ETIMEDOUT);
712
713 return 1;
714}
715
62d43dac 716static void manager_exit(Manager *manager) {
693d371d
TG
717 uint64_t usec;
718 int r;
62d43dac
TG
719
720 assert(manager);
721
722 manager->exit = true;
723
b79aacbf
TG
724 sd_notify(false,
725 "STOPPING=1\n"
726 "STATUS=Starting shutdown...");
727
62d43dac 728 /* close sources of new events and discard buffered events */
693d371d 729 manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
ab7854df 730 manager->ctrl = udev_ctrl_unref(manager->ctrl);
62d43dac 731
693d371d 732 manager->inotify_event = sd_event_source_unref(manager->inotify_event);
ab7854df 733 manager->fd_inotify = safe_close(manager->fd_inotify);
62d43dac 734
693d371d 735 manager->uevent_event = sd_event_source_unref(manager->uevent_event);
ab7854df 736 manager->monitor = udev_monitor_unref(manager->monitor);
62d43dac
TG
737
738 /* discard queued events and kill workers */
739 event_queue_cleanup(manager, EVENT_QUEUED);
740 manager_kill_workers(manager);
693d371d 741
3285baa8 742 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
693d371d 743
3285baa8 744 r = sd_event_add_time(manager->event, NULL, CLOCK_MONOTONIC,
693d371d
TG
745 usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
746 if (r < 0)
747 return;
62d43dac
TG
748}
749
750/* reload requested, HUP signal received, rules changed, builtin changed */
751static void manager_reload(Manager *manager) {
752
753 assert(manager);
754
b79aacbf
TG
755 sd_notify(false,
756 "RELOADING=1\n"
757 "STATUS=Flushing configuration...");
758
62d43dac
TG
759 manager_kill_workers(manager);
760 manager->rules = udev_rules_unref(manager->rules);
2024ed61 761 udev_builtin_exit();
b79aacbf 762
1ef72b55
MS
763 sd_notifyf(false,
764 "READY=1\n"
765 "STATUS=Processing with %u children at max", arg_children_max);
62d43dac
TG
766}
767
c0c6806b 768static void event_queue_start(Manager *manager) {
40a57716 769 struct event *event;
693d371d 770 usec_t usec;
8ab44e3f 771
c0c6806b
TG
772 assert(manager);
773
40a57716 774 if (LIST_IS_EMPTY(manager->events) ||
7c4c7e89
TG
775 manager->exit || manager->stop_exec_queue)
776 return;
777
3285baa8 778 assert_se(sd_event_now(manager->event, CLOCK_MONOTONIC, &usec) >= 0);
38a03f06
LP
779 /* check for changed config, every 3 seconds at most */
780 if (manager->last_usec == 0 ||
781 (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
782 if (udev_rules_check_timestamp(manager->rules) ||
2024ed61 783 udev_builtin_validate())
38a03f06 784 manager_reload(manager);
693d371d 785
38a03f06 786 manager->last_usec = usec;
7c4c7e89
TG
787 }
788
2024ed61 789 udev_builtin_init();
7c4c7e89
TG
790
791 if (!manager->rules) {
2024ed61 792 manager->rules = udev_rules_new(arg_resolve_names);
7c4c7e89
TG
793 if (!manager->rules)
794 return;
795 }
796
40a57716 797 LIST_FOREACH(event,event,manager->events) {
912541b0
KS
798 if (event->state != EVENT_QUEUED)
799 continue;
0bc74ea7 800
912541b0 801 /* do not start event if parent or child event is still running */
ecb17862 802 if (is_devpath_busy(manager, event))
912541b0 803 continue;
fc465079 804
c0c6806b 805 event_run(manager, event);
912541b0 806 }
1e03b754
KS
807}
808
ecb17862 809static void event_queue_cleanup(Manager *manager, enum event_state match_type) {
40a57716 810 struct event *event, *tmp;
ff2c503d 811
40a57716 812 LIST_FOREACH_SAFE(event, event, tmp, manager->events) {
912541b0
KS
813 if (match_type != EVENT_UNDEF && match_type != event->state)
814 continue;
ff2c503d 815
c6aa11f2 816 event_free(event);
912541b0 817 }
ff2c503d
KS
818}
819
e82e8fa5 820static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b
TG
821 Manager *manager = userdata;
822
823 assert(manager);
824
912541b0
KS
825 for (;;) {
826 struct worker_message msg;
979558f3
TG
827 struct iovec iovec = {
828 .iov_base = &msg,
829 .iov_len = sizeof(msg),
830 };
831 union {
832 struct cmsghdr cmsghdr;
833 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
834 } control = {};
835 struct msghdr msghdr = {
836 .msg_iov = &iovec,
837 .msg_iovlen = 1,
838 .msg_control = &control,
839 .msg_controllen = sizeof(control),
840 };
841 struct cmsghdr *cmsg;
912541b0 842 ssize_t size;
979558f3 843 struct ucred *ucred = NULL;
a505965d 844 struct worker *worker;
912541b0 845
e82e8fa5 846 size = recvmsg(fd, &msghdr, MSG_DONTWAIT);
979558f3 847 if (size < 0) {
738a7907
TG
848 if (errno == EINTR)
849 continue;
850 else if (errno == EAGAIN)
851 /* nothing more to read */
852 break;
979558f3 853
e82e8fa5 854 return log_error_errno(errno, "failed to receive message: %m");
979558f3
TG
855 } else if (size != sizeof(struct worker_message)) {
856 log_warning_errno(EIO, "ignoring worker message with invalid size %zi bytes", size);
e82e8fa5 857 continue;
979558f3
TG
858 }
859
2a1288ff 860 CMSG_FOREACH(cmsg, &msghdr) {
979558f3
TG
861 if (cmsg->cmsg_level == SOL_SOCKET &&
862 cmsg->cmsg_type == SCM_CREDENTIALS &&
863 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
864 ucred = (struct ucred*) CMSG_DATA(cmsg);
865 }
866
867 if (!ucred || ucred->pid <= 0) {
868 log_warning_errno(EIO, "ignoring worker message without valid PID");
869 continue;
870 }
912541b0
KS
871
872 /* lookup worker who sent the signal */
4a0b58c4 873 worker = hashmap_get(manager->workers, PID_TO_PTR(ucred->pid));
a505965d
TG
874 if (!worker) {
875 log_debug("worker ["PID_FMT"] returned, but is no longer tracked", ucred->pid);
876 continue;
912541b0 877 }
c0bbfd72 878
a505965d
TG
879 if (worker->state != WORKER_KILLED)
880 worker->state = WORKER_IDLE;
881
882 /* worker returned */
883 event_free(worker->event);
912541b0 884 }
e82e8fa5 885
8302fe5a
TG
886 /* we have free workers, try to schedule events */
887 event_queue_start(manager);
888
e82e8fa5
TG
889 return 1;
890}
891
892static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 893 Manager *manager = userdata;
e82e8fa5
TG
894 struct udev_device *dev;
895 int r;
896
c0c6806b 897 assert(manager);
e82e8fa5 898
c0c6806b 899 dev = udev_monitor_receive_device(manager->monitor);
e82e8fa5
TG
900 if (dev) {
901 udev_device_ensure_usec_initialized(dev, NULL);
ecb17862 902 r = event_queue_insert(manager, dev);
e82e8fa5
TG
903 if (r < 0)
904 udev_device_unref(dev);
8302fe5a
TG
905 else
906 /* we have fresh events, try to schedule them */
907 event_queue_start(manager);
e82e8fa5
TG
908 }
909
910 return 1;
88f4b648
KS
911}
912
3b47c739 913/* receive the udevd message from userspace */
e82e8fa5 914static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 915 Manager *manager = userdata;
8e766630
LP
916 _cleanup_(udev_ctrl_connection_unrefp) struct udev_ctrl_connection *ctrl_conn = NULL;
917 _cleanup_(udev_ctrl_msg_unrefp) struct udev_ctrl_msg *ctrl_msg = NULL;
912541b0 918 const char *str;
9b5150b6 919 int i, r;
912541b0 920
c0c6806b 921 assert(manager);
e4f66b77 922
c0c6806b 923 ctrl_conn = udev_ctrl_get_connection(manager->ctrl);
e4f66b77 924 if (!ctrl_conn)
e82e8fa5 925 return 1;
912541b0
KS
926
927 ctrl_msg = udev_ctrl_receive_msg(ctrl_conn);
e4f66b77 928 if (!ctrl_msg)
e82e8fa5 929 return 1;
912541b0
KS
930
931 i = udev_ctrl_get_set_log_level(ctrl_msg);
932 if (i >= 0) {
ed14edc0 933 log_debug("udevd message (SET_LOG_LEVEL) received, log_priority=%i", i);
baa30fbc 934 log_set_max_level(i);
c0c6806b 935 manager_kill_workers(manager);
912541b0
KS
936 }
937
938 if (udev_ctrl_get_stop_exec_queue(ctrl_msg) > 0) {
9f6445e3 939 log_debug("udevd message (STOP_EXEC_QUEUE) received");
c0c6806b 940 manager->stop_exec_queue = true;
912541b0
KS
941 }
942
943 if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
9f6445e3 944 log_debug("udevd message (START_EXEC_QUEUE) received");
c0c6806b 945 manager->stop_exec_queue = false;
8302fe5a 946 event_queue_start(manager);
912541b0
KS
947 }
948
949 if (udev_ctrl_get_reload(ctrl_msg) > 0) {
9f6445e3 950 log_debug("udevd message (RELOAD) received");
62d43dac 951 manager_reload(manager);
912541b0
KS
952 }
953
954 str = udev_ctrl_get_set_env(ctrl_msg);
9b5150b6
YW
955 if (str) {
956 _cleanup_free_ char *key = NULL, *val = NULL, *old_key = NULL, *old_val = NULL;
957 char *eq;
958
959 eq = strchr(str, '=');
960 if (!eq) {
961 log_error("Invalid key format '%s'", str);
962 return 1;
963 }
964
965 key = strndup(str, eq - str);
966 if (!key) {
967 log_oom();
968 return 1;
969 }
970
971 old_val = hashmap_remove2(manager->properties, key, (void **) &old_key);
972
973 r = hashmap_ensure_allocated(&manager->properties, &string_hash_ops);
974 if (r < 0) {
975 log_oom();
976 return 1;
912541b0 977 }
9b5150b6
YW
978
979 eq++;
980 if (!isempty(eq)) {
981 log_debug("udevd message (ENV) received, unset '%s'", key);
982
983 r = hashmap_put(manager->properties, key, NULL);
984 if (r < 0) {
985 log_oom();
986 return 1;
987 }
988 } else {
989 val = strdup(eq);
990 if (!val) {
991 log_oom();
992 return 1;
993 }
994
995 log_debug("udevd message (ENV) received, set '%s=%s'", key, val);
996
997 r = hashmap_put(manager->properties, key, val);
998 if (r < 0) {
999 log_oom();
1000 return 1;
1001 }
1002 }
1003
1004 key = val = NULL;
c0c6806b 1005 manager_kill_workers(manager);
912541b0
KS
1006 }
1007
1008 i = udev_ctrl_get_set_children_max(ctrl_msg);
1009 if (i >= 0) {
9f6445e3 1010 log_debug("udevd message (SET_MAX_CHILDREN) received, children_max=%i", i);
bba7a484 1011 arg_children_max = i;
1ef72b55
MS
1012
1013 (void) sd_notifyf(false,
1014 "READY=1\n"
1015 "STATUS=Processing with %u children at max", arg_children_max);
912541b0
KS
1016 }
1017
cb49a4f2 1018 if (udev_ctrl_get_ping(ctrl_msg) > 0)
9f6445e3 1019 log_debug("udevd message (SYNC) received");
912541b0
KS
1020
1021 if (udev_ctrl_get_exit(ctrl_msg) > 0) {
9f6445e3 1022 log_debug("udevd message (EXIT) received");
62d43dac 1023 manager_exit(manager);
c0c6806b
TG
1024 /* keep reference to block the client until we exit
1025 TODO: deal with several blocking exit requests */
1026 manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
912541b0 1027 }
e4f66b77 1028
e82e8fa5 1029 return 1;
88f4b648 1030}
4a231017 1031
70068602
YW
1032static int synthesize_change(sd_device *dev) {
1033 const char *subsystem, *sysname, *devname, *syspath, *devtype;
1034 char filename[PATH_MAX];
f3a740a5 1035 int r;
edd32000 1036
70068602
YW
1037 r = sd_device_get_subsystem(dev, &subsystem);
1038 if (r < 0)
1039 return r;
1040
1041 r = sd_device_get_sysname(dev, &sysname);
1042 if (r < 0)
1043 return r;
1044
1045 r = sd_device_get_devname(dev, &devname);
1046 if (r < 0)
1047 return r;
1048
1049 r = sd_device_get_syspath(dev, &syspath);
1050 if (r < 0)
1051 return r;
1052
1053 r = sd_device_get_devtype(dev, &devtype);
1054 if (r < 0)
1055 return r;
1056
1057 if (streq_ptr("block", subsystem) &&
1058 streq_ptr("disk", devtype) &&
1059 !startswith(sysname, "dm-")) {
1060 _cleanup_(sd_device_enumerator_unrefp) sd_device_enumerator *e = NULL;
1061 bool part_table_read = false, has_partitions = false;
1062 sd_device *d;
ede34445 1063 int fd;
f3a740a5 1064
ede34445 1065 /*
e9fc29f4
KS
1066 * Try to re-read the partition table. This only succeeds if
1067 * none of the devices is busy. The kernel returns 0 if no
1068 * partition table is found, and we will not get an event for
1069 * the disk.
ede34445 1070 */
70068602 1071 fd = open(devname, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
ede34445 1072 if (fd >= 0) {
02ba8fb3
KS
1073 r = flock(fd, LOCK_EX|LOCK_NB);
1074 if (r >= 0)
1075 r = ioctl(fd, BLKRRPART, 0);
1076
ede34445
KS
1077 close(fd);
1078 if (r >= 0)
e9fc29f4 1079 part_table_read = true;
ede34445
KS
1080 }
1081
e9fc29f4 1082 /* search for partitions */
70068602 1083 r = sd_device_enumerator_new(&e);
f3a740a5
KS
1084 if (r < 0)
1085 return r;
1086
70068602 1087 r = sd_device_enumerator_allow_uninitialized(e);
f3a740a5
KS
1088 if (r < 0)
1089 return r;
1090
70068602 1091 r = sd_device_enumerator_add_match_parent(e, dev);
47a3fa0f
TA
1092 if (r < 0)
1093 return r;
e9fc29f4 1094
70068602
YW
1095 r = sd_device_enumerator_add_match_subsystem(e, "block", true);
1096 if (r < 0)
1097 return r;
e9fc29f4 1098
70068602
YW
1099 FOREACH_DEVICE(e, d) {
1100 const char *t;
e9fc29f4 1101
70068602
YW
1102 if (sd_device_get_devtype(d, &t) < 0 ||
1103 !streq("partition", t))
e9fc29f4
KS
1104 continue;
1105
1106 has_partitions = true;
1107 break;
1108 }
1109
1110 /*
1111 * We have partitions and re-read the table, the kernel already sent
1112 * out a "change" event for the disk, and "remove/add" for all
1113 * partitions.
1114 */
1115 if (part_table_read && has_partitions)
1116 return 0;
1117
1118 /*
1119 * We have partitions but re-reading the partition table did not
1120 * work, synthesize "change" for the disk and all partitions.
1121 */
70068602
YW
1122 log_debug("Device '%s' is closed, synthesising 'change'", devname);
1123 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
4c1fc3e4 1124 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
e9fc29f4 1125
70068602
YW
1126 FOREACH_DEVICE(e, d) {
1127 const char *t, *n, *s;
f3a740a5 1128
70068602
YW
1129 if (sd_device_get_devtype(d, &t) < 0 ||
1130 !streq("partition", t))
f3a740a5
KS
1131 continue;
1132
70068602
YW
1133 if (sd_device_get_devname(d, &n) < 0 ||
1134 sd_device_get_syspath(d, &s) < 0)
f3a740a5
KS
1135 continue;
1136
70068602
YW
1137 log_debug("Device '%s' is closed, synthesising partition '%s' 'change'", devname, n);
1138 strscpyl(filename, sizeof(filename), s, "/uevent", NULL);
4c1fc3e4 1139 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
f3a740a5 1140 }
ede34445
KS
1141
1142 return 0;
f3a740a5
KS
1143 }
1144
70068602
YW
1145 log_debug("Device %s is closed, synthesising 'change'", devname);
1146 strscpyl(filename, sizeof(filename), syspath, "/uevent", NULL);
4c1fc3e4 1147 write_string_file(filename, "change", WRITE_STRING_FILE_CREATE);
ede34445 1148
f3a740a5 1149 return 0;
edd32000
KS
1150}
1151
e82e8fa5 1152static int on_inotify(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
c0c6806b 1153 Manager *manager = userdata;
0254e944 1154 union inotify_event_buffer buffer;
f7c1ad4f
LP
1155 struct inotify_event *e;
1156 ssize_t l;
912541b0 1157
c0c6806b 1158 assert(manager);
e82e8fa5
TG
1159
1160 l = read(fd, &buffer, sizeof(buffer));
f7c1ad4f 1161 if (l < 0) {
3742095b 1162 if (IN_SET(errno, EAGAIN, EINTR))
e82e8fa5 1163 return 1;
912541b0 1164
f7c1ad4f 1165 return log_error_errno(errno, "Failed to read inotify fd: %m");
912541b0
KS
1166 }
1167
f7c1ad4f 1168 FOREACH_INOTIFY_EVENT(e, buffer, l) {
70068602
YW
1169 _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
1170 const char *devnode;
1171
1172 if (udev_watch_lookup(e->wd, &dev) < 0)
1173 continue;
912541b0 1174
70068602 1175 if (sd_device_get_devname(dev, &devnode) < 0)
edd32000 1176 continue;
912541b0 1177
70068602 1178 log_debug("inotify event: %x for %s", e->mask, devnode);
a8389097 1179 if (e->mask & IN_CLOSE_WRITE) {
edd32000 1180 synthesize_change(dev);
a8389097
TG
1181
1182 /* settle might be waiting on us to determine the queue
1183 * state. If we just handled an inotify event, we might have
1184 * generated a "change" event, but we won't have queued up
1185 * the resultant uevent yet. Do that.
1186 */
c0c6806b 1187 on_uevent(NULL, -1, 0, manager);
a8389097 1188 } else if (e->mask & IN_IGNORED)
2024ed61 1189 udev_watch_end(dev);
912541b0
KS
1190 }
1191
e82e8fa5 1192 return 1;
bd284db1
SJR
1193}
1194
0561329d 1195static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1196 Manager *manager = userdata;
1197
1198 assert(manager);
1199
62d43dac 1200 manager_exit(manager);
912541b0 1201
e82e8fa5
TG
1202 return 1;
1203}
912541b0 1204
0561329d 1205static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1206 Manager *manager = userdata;
1207
1208 assert(manager);
1209
62d43dac 1210 manager_reload(manager);
912541b0 1211
e82e8fa5
TG
1212 return 1;
1213}
912541b0 1214
e82e8fa5 1215static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
c0c6806b
TG
1216 Manager *manager = userdata;
1217
1218 assert(manager);
1219
e82e8fa5
TG
1220 for (;;) {
1221 pid_t pid;
1222 int status;
1223 struct worker *worker;
d1317d02 1224
e82e8fa5
TG
1225 pid = waitpid(-1, &status, WNOHANG);
1226 if (pid <= 0)
f29328d6 1227 break;
e82e8fa5 1228
4a0b58c4 1229 worker = hashmap_get(manager->workers, PID_TO_PTR(pid));
e82e8fa5
TG
1230 if (!worker) {
1231 log_warning("worker ["PID_FMT"] is unknown, ignoring", pid);
f29328d6 1232 continue;
912541b0 1233 }
e82e8fa5
TG
1234
1235 if (WIFEXITED(status)) {
1236 if (WEXITSTATUS(status) == 0)
1237 log_debug("worker ["PID_FMT"] exited", pid);
1238 else
1239 log_warning("worker ["PID_FMT"] exited with return code %i", pid, WEXITSTATUS(status));
1240 } else if (WIFSIGNALED(status)) {
76341acc 1241 log_warning("worker ["PID_FMT"] terminated by signal %i (%s)", pid, WTERMSIG(status), signal_to_string(WTERMSIG(status)));
e82e8fa5
TG
1242 } else if (WIFSTOPPED(status)) {
1243 log_info("worker ["PID_FMT"] stopped", pid);
f29328d6 1244 continue;
e82e8fa5
TG
1245 } else if (WIFCONTINUED(status)) {
1246 log_info("worker ["PID_FMT"] continued", pid);
f29328d6 1247 continue;
e82e8fa5
TG
1248 } else
1249 log_warning("worker ["PID_FMT"] exit with status 0x%04x", pid, status);
1250
1251 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
1252 if (worker->event) {
1253 log_error("worker ["PID_FMT"] failed while handling '%s'", pid, worker->event->devpath);
1254 /* delete state from disk */
1255 udev_device_delete_db(worker->event->dev);
1256 udev_device_tag_index(worker->event->dev, NULL, false);
1257 /* forward kernel event without amending it */
c0c6806b 1258 udev_monitor_send_device(manager->monitor, NULL, worker->event->dev_kernel);
e82e8fa5
TG
1259 }
1260 }
1261
1262 worker_free(worker);
912541b0 1263 }
e82e8fa5 1264
8302fe5a
TG
1265 /* we can start new workers, try to schedule events */
1266 event_queue_start(manager);
1267
e82e8fa5 1268 return 1;
f27125f9 1269}
1270
693d371d
TG
1271static int on_post(sd_event_source *s, void *userdata) {
1272 Manager *manager = userdata;
1273 int r;
1274
1275 assert(manager);
1276
40a57716 1277 if (LIST_IS_EMPTY(manager->events)) {
693d371d
TG
1278 /* no pending events */
1279 if (!hashmap_isempty(manager->workers)) {
1280 /* there are idle workers */
1281 log_debug("cleanup idle workers");
1282 manager_kill_workers(manager);
1283 } else {
1284 /* we are idle */
1285 if (manager->exit) {
1286 r = sd_event_exit(manager->event, 0);
1287 if (r < 0)
1288 return r;
1289 } else if (manager->cgroup)
1290 /* cleanup possible left-over processes in our cgroup */
1d98fef1 1291 cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, CGROUP_IGNORE_SELF, NULL, NULL, NULL);
693d371d
TG
1292 }
1293 }
1294
1295 return 1;
1296}
1297
fcff1e72
TG
1298static int listen_fds(int *rctrl, int *rnetlink) {
1299 int ctrl_fd = -1, netlink_fd = -1;
f59118ec 1300 int fd, n, r;
912541b0 1301
fcff1e72
TG
1302 assert(rctrl);
1303 assert(rnetlink);
1304
912541b0 1305 n = sd_listen_fds(true);
fcff1e72
TG
1306 if (n < 0)
1307 return n;
912541b0
KS
1308
1309 for (fd = SD_LISTEN_FDS_START; fd < n + SD_LISTEN_FDS_START; fd++) {
1310 if (sd_is_socket(fd, AF_LOCAL, SOCK_SEQPACKET, -1)) {
fcff1e72
TG
1311 if (ctrl_fd >= 0)
1312 return -EINVAL;
1313 ctrl_fd = fd;
912541b0
KS
1314 continue;
1315 }
1316
1317 if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1)) {
fcff1e72
TG
1318 if (netlink_fd >= 0)
1319 return -EINVAL;
1320 netlink_fd = fd;
912541b0
KS
1321 continue;
1322 }
1323
fcff1e72 1324 return -EINVAL;
912541b0
KS
1325 }
1326
f59118ec 1327 if (ctrl_fd < 0) {
8e766630 1328 _cleanup_(udev_ctrl_unrefp) struct udev_ctrl *ctrl = NULL;
f59118ec 1329
2024ed61 1330 ctrl = udev_ctrl_new();
f59118ec
TG
1331 if (!ctrl)
1332 return log_error_errno(EINVAL, "error initializing udev control socket");
1333
1334 r = udev_ctrl_enable_receiving(ctrl);
1335 if (r < 0)
1336 return log_error_errno(EINVAL, "error binding udev control socket");
1337
1338 fd = udev_ctrl_get_fd(ctrl);
1339 if (fd < 0)
1340 return log_error_errno(EIO, "could not get ctrl fd");
fcff1e72 1341
f59118ec
TG
1342 ctrl_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1343 if (ctrl_fd < 0)
1344 return log_error_errno(errno, "could not dup ctrl fd: %m");
1345 }
1346
1347 if (netlink_fd < 0) {
8e766630 1348 _cleanup_(udev_monitor_unrefp) struct udev_monitor *monitor = NULL;
f59118ec 1349
2024ed61 1350 monitor = udev_monitor_new_from_netlink(NULL, "kernel");
f59118ec
TG
1351 if (!monitor)
1352 return log_error_errno(EINVAL, "error initializing netlink socket");
1353
1354 (void) udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024);
1355
1356 r = udev_monitor_enable_receiving(monitor);
1357 if (r < 0)
1358 return log_error_errno(EINVAL, "error binding netlink socket");
1359
1360 fd = udev_monitor_get_fd(monitor);
1361 if (fd < 0)
1362 return log_error_errno(netlink_fd, "could not get uevent fd: %m");
1363
1364 netlink_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
a92cf784 1365 if (netlink_fd < 0)
f59118ec
TG
1366 return log_error_errno(errno, "could not dup netlink fd: %m");
1367 }
fcff1e72
TG
1368
1369 *rctrl = ctrl_fd;
1370 *rnetlink = netlink_fd;
912541b0 1371
912541b0 1372 return 0;
7459bcdc
KS
1373}
1374
e6f86cac 1375/*
3f85ef0f 1376 * read the kernel command line, in case we need to get into debug mode
1d84ad94
LP
1377 * udev.log_priority=<level> syslog priority
1378 * udev.children_max=<number of workers> events are fully serialized if set to 1
1379 * udev.exec_delay=<number of seconds> delay execution of every executed program
1380 * udev.event_timeout=<number of seconds> seconds to wait before terminating an event
e6f86cac 1381 */
96287a49 1382static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
92e72467 1383 int r = 0;
e6f86cac 1384
614a823c 1385 assert(key);
e6f86cac 1386
614a823c
TG
1387 if (!value)
1388 return 0;
e6f86cac 1389
1d84ad94
LP
1390 if (proc_cmdline_key_streq(key, "udev.log_priority")) {
1391
1392 if (proc_cmdline_value_missing(key, value))
1393 return 0;
1394
92e72467
ZJS
1395 r = util_log_priority(value);
1396 if (r >= 0)
1397 log_set_max_level(r);
1d84ad94
LP
1398
1399 } else if (proc_cmdline_key_streq(key, "udev.event_timeout")) {
1400
1401 if (proc_cmdline_value_missing(key, value))
1402 return 0;
1403
92e72467
ZJS
1404 r = safe_atou64(value, &arg_event_timeout_usec);
1405 if (r >= 0) {
1406 arg_event_timeout_usec *= USEC_PER_SEC;
1407 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1408 }
1d84ad94
LP
1409
1410 } else if (proc_cmdline_key_streq(key, "udev.children_max")) {
1411
1412 if (proc_cmdline_value_missing(key, value))
1413 return 0;
1414
020328e1 1415 r = safe_atou(value, &arg_children_max);
1d84ad94
LP
1416
1417 } else if (proc_cmdline_key_streq(key, "udev.exec_delay")) {
1418
1419 if (proc_cmdline_value_missing(key, value))
1420 return 0;
1421
614a823c 1422 r = safe_atoi(value, &arg_exec_delay);
1d84ad94
LP
1423
1424 } else if (startswith(key, "udev."))
92e72467 1425 log_warning("Unknown udev kernel command line option \"%s\"", key);
614a823c 1426
92e72467
ZJS
1427 if (r < 0)
1428 log_warning_errno(r, "Failed to parse \"%s=%s\", ignoring: %m", key, value);
1d84ad94 1429
614a823c 1430 return 0;
e6f86cac
KS
1431}
1432
37ec0fdd
LP
1433static int help(void) {
1434 _cleanup_free_ char *link = NULL;
1435 int r;
1436
1437 r = terminal_urlify_man("systemd-udevd.service", "8", &link);
1438 if (r < 0)
1439 return log_oom();
1440
ed216e1f
TG
1441 printf("%s [OPTIONS...]\n\n"
1442 "Manages devices.\n\n"
5ac0162c 1443 " -h --help Print this message\n"
2d19c17e
MF
1444 " -V --version Print version of the program\n"
1445 " -d --daemon Detach and run in the background\n"
1446 " -D --debug Enable debug output\n"
1447 " -c --children-max=INT Set maximum number of workers\n"
1448 " -e --exec-delay=SECONDS Seconds to wait before executing RUN=\n"
1449 " -t --event-timeout=SECONDS Seconds to wait before terminating an event\n"
1450 " -N --resolve-names=early|late|never\n"
5ac0162c 1451 " When to resolve users and groups\n"
37ec0fdd
LP
1452 "\nSee the %s for details.\n"
1453 , program_invocation_short_name
1454 , link
1455 );
1456
1457 return 0;
ed216e1f
TG
1458}
1459
bba7a484 1460static int parse_argv(int argc, char *argv[]) {
912541b0 1461 static const struct option options[] = {
bba7a484
TG
1462 { "daemon", no_argument, NULL, 'd' },
1463 { "debug", no_argument, NULL, 'D' },
1464 { "children-max", required_argument, NULL, 'c' },
1465 { "exec-delay", required_argument, NULL, 'e' },
1466 { "event-timeout", required_argument, NULL, 't' },
1467 { "resolve-names", required_argument, NULL, 'N' },
1468 { "help", no_argument, NULL, 'h' },
1469 { "version", no_argument, NULL, 'V' },
912541b0
KS
1470 {}
1471 };
689a97f5 1472
bba7a484 1473 int c;
689a97f5 1474
bba7a484
TG
1475 assert(argc >= 0);
1476 assert(argv);
912541b0 1477
e14b6f21 1478 while ((c = getopt_long(argc, argv, "c:de:Dt:N:hV", options, NULL)) >= 0) {
f1e8664e 1479 int r;
912541b0 1480
bba7a484 1481 switch (c) {
912541b0 1482
912541b0 1483 case 'd':
bba7a484 1484 arg_daemonize = true;
912541b0
KS
1485 break;
1486 case 'c':
020328e1 1487 r = safe_atou(optarg, &arg_children_max);
6f5cf8a8
TG
1488 if (r < 0)
1489 log_warning("Invalid --children-max ignored: %s", optarg);
912541b0
KS
1490 break;
1491 case 'e':
6f5cf8a8
TG
1492 r = safe_atoi(optarg, &arg_exec_delay);
1493 if (r < 0)
1494 log_warning("Invalid --exec-delay ignored: %s", optarg);
912541b0 1495 break;
9719859c 1496 case 't':
f1e8664e
TG
1497 r = safe_atou64(optarg, &arg_event_timeout_usec);
1498 if (r < 0)
65fea570 1499 log_warning("Invalid --event-timeout ignored: %s", optarg);
6f5cf8a8
TG
1500 else {
1501 arg_event_timeout_usec *= USEC_PER_SEC;
1502 arg_event_timeout_warn_usec = (arg_event_timeout_usec / 3) ? : 1;
1503 }
9719859c 1504 break;
912541b0 1505 case 'D':
bba7a484 1506 arg_debug = true;
912541b0
KS
1507 break;
1508 case 'N':
090be865 1509 if (streq(optarg, "early")) {
bba7a484 1510 arg_resolve_names = 1;
090be865 1511 } else if (streq(optarg, "late")) {
bba7a484 1512 arg_resolve_names = 0;
090be865 1513 } else if (streq(optarg, "never")) {
bba7a484 1514 arg_resolve_names = -1;
912541b0 1515 } else {
9f6445e3 1516 log_error("resolve-names must be early, late or never");
bba7a484 1517 return 0;
912541b0
KS
1518 }
1519 break;
1520 case 'h':
37ec0fdd 1521 return help();
912541b0 1522 case 'V':
948aaa7c 1523 printf("%s\n", PACKAGE_VERSION);
bba7a484
TG
1524 return 0;
1525 case '?':
1526 return -EINVAL;
912541b0 1527 default:
bba7a484
TG
1528 assert_not_reached("Unhandled option");
1529
912541b0
KS
1530 }
1531 }
1532
bba7a484
TG
1533 return 1;
1534}
1535
b7f74dd4 1536static int manager_new(Manager **ret, int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1537 _cleanup_(manager_freep) Manager *manager = NULL;
6d5e65f6 1538 int r, fd_worker;
c0c6806b
TG
1539
1540 assert(ret);
11b1dd8c
TG
1541 assert(fd_ctrl >= 0);
1542 assert(fd_uevent >= 0);
c0c6806b
TG
1543
1544 manager = new0(Manager, 1);
1545 if (!manager)
1546 return log_oom();
1547
e237d8cb
TG
1548 manager->fd_inotify = -1;
1549 manager->worker_watch[WRITE_END] = -1;
1550 manager->worker_watch[READ_END] = -1;
1551
2024ed61 1552 udev_builtin_init();
b2d21d93 1553
2024ed61 1554 manager->rules = udev_rules_new(arg_resolve_names);
ecb17862
TG
1555 if (!manager->rules)
1556 return log_error_errno(ENOMEM, "error reading rules");
1557
40a57716 1558 LIST_HEAD_INIT(manager->events);
ecb17862 1559
c26d1879
TG
1560 manager->cgroup = cgroup;
1561
2024ed61 1562 manager->ctrl = udev_ctrl_new_from_fd(fd_ctrl);
f59118ec
TG
1563 if (!manager->ctrl)
1564 return log_error_errno(EINVAL, "error taking over udev control socket");
e237d8cb 1565
2024ed61 1566 manager->monitor = udev_monitor_new_from_netlink_fd(NULL, "kernel", fd_uevent);
f59118ec
TG
1567 if (!manager->monitor)
1568 return log_error_errno(EINVAL, "error taking over netlink socket");
e237d8cb
TG
1569
1570 /* unnamed socket from workers to the main daemon */
1571 r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
1572 if (r < 0)
1573 return log_error_errno(errno, "error creating socketpair: %m");
1574
693d371d 1575 fd_worker = manager->worker_watch[READ_END];
e237d8cb 1576
2ff48e98 1577 r = setsockopt_int(fd_worker, SOL_SOCKET, SO_PASSCRED, true);
e237d8cb 1578 if (r < 0)
2ff48e98 1579 return log_error_errno(r, "could not enable SO_PASSCRED: %m");
e237d8cb 1580
2024ed61 1581 manager->fd_inotify = udev_watch_init();
e237d8cb
TG
1582 if (manager->fd_inotify < 0)
1583 return log_error_errno(ENOMEM, "error initializing inotify");
1584
2024ed61 1585 udev_watch_restore();
e237d8cb
TG
1586
1587 /* block and listen to all signals on signalfd */
72c0a2c2 1588 assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGTERM, SIGINT, SIGHUP, SIGCHLD, -1) >= 0);
693d371d 1589
49f997f3
TG
1590 r = sd_event_default(&manager->event);
1591 if (r < 0)
709f6e46 1592 return log_error_errno(r, "could not allocate event loop: %m");
49f997f3 1593
693d371d
TG
1594 r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
1595 if (r < 0)
1596 return log_error_errno(r, "error creating sigint event source: %m");
1597
1598 r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
1599 if (r < 0)
1600 return log_error_errno(r, "error creating sigterm event source: %m");
1601
1602 r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
1603 if (r < 0)
1604 return log_error_errno(r, "error creating sighup event source: %m");
1605
1606 r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
1607 if (r < 0)
1608 return log_error_errno(r, "error creating sigchld event source: %m");
1609
1610 r = sd_event_set_watchdog(manager->event, true);
1611 if (r < 0)
1612 return log_error_errno(r, "error creating watchdog event source: %m");
1613
11b1dd8c 1614 r = sd_event_add_io(manager->event, &manager->ctrl_event, fd_ctrl, EPOLLIN, on_ctrl_msg, manager);
693d371d
TG
1615 if (r < 0)
1616 return log_error_errno(r, "error creating ctrl event source: %m");
1617
1618 /* This needs to be after the inotify and uevent handling, to make sure
1619 * that the ping is send back after fully processing the pending uevents
1620 * (including the synthetic ones we may create due to inotify events).
1621 */
1622 r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
1623 if (r < 0)
1624 return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
1625
1626 r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
1627 if (r < 0)
1628 return log_error_errno(r, "error creating inotify event source: %m");
1629
11b1dd8c 1630 r = sd_event_add_io(manager->event, &manager->uevent_event, fd_uevent, EPOLLIN, on_uevent, manager);
693d371d
TG
1631 if (r < 0)
1632 return log_error_errno(r, "error creating uevent event source: %m");
1633
1634 r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
1635 if (r < 0)
1636 return log_error_errno(r, "error creating worker event source: %m");
1637
1638 r = sd_event_add_post(manager->event, NULL, on_post, manager);
1639 if (r < 0)
1640 return log_error_errno(r, "error creating post event source: %m");
e237d8cb 1641
1cc6c93a 1642 *ret = TAKE_PTR(manager);
11b1dd8c 1643
86c3bece 1644 return 0;
c0c6806b
TG
1645}
1646
077fc5e2 1647static int run(int fd_ctrl, int fd_uevent, const char *cgroup) {
c0c6806b 1648 _cleanup_(manager_freep) Manager *manager = NULL;
077fc5e2
DH
1649 int r;
1650
1651 r = manager_new(&manager, fd_ctrl, fd_uevent, cgroup);
1652 if (r < 0) {
1653 r = log_error_errno(r, "failed to allocate manager object: %m");
1654 goto exit;
1655 }
1656
1657 r = udev_rules_apply_static_dev_perms(manager->rules);
1658 if (r < 0)
1659 log_error_errno(r, "failed to apply permissions on static device nodes: %m");
1660
1ef72b55
MS
1661 (void) sd_notifyf(false,
1662 "READY=1\n"
1663 "STATUS=Processing with %u children at max", arg_children_max);
077fc5e2
DH
1664
1665 r = sd_event_loop(manager->event);
1666 if (r < 0) {
1667 log_error_errno(r, "event loop failed: %m");
1668 goto exit;
1669 }
1670
1671 sd_event_get_exit_code(manager->event, &r);
1672
1673exit:
1674 sd_notify(false,
1675 "STOPPING=1\n"
1676 "STATUS=Shutting down...");
1677 if (manager)
1678 udev_ctrl_cleanup(manager->ctrl);
1679 return r;
1680}
1681
1682int main(int argc, char *argv[]) {
c26d1879 1683 _cleanup_free_ char *cgroup = NULL;
efa1606e 1684 int fd_ctrl = -1, fd_uevent = -1;
e5d7bce1 1685 int r;
bba7a484 1686
bba7a484 1687 log_set_target(LOG_TARGET_AUTO);
b237a168 1688 udev_parse_config();
bba7a484
TG
1689 log_parse_environment();
1690 log_open();
1691
bba7a484
TG
1692 r = parse_argv(argc, argv);
1693 if (r <= 0)
1694 goto exit;
1695
1d84ad94 1696 r = proc_cmdline_parse(parse_proc_cmdline_item, NULL, PROC_CMDLINE_STRIP_RD_PREFIX);
614a823c
TG
1697 if (r < 0)
1698 log_warning_errno(r, "failed to parse kernel command line, ignoring: %m");
912541b0 1699
78d3e041
KS
1700 if (arg_debug) {
1701 log_set_target(LOG_TARGET_CONSOLE);
bba7a484 1702 log_set_max_level(LOG_DEBUG);
78d3e041 1703 }
bba7a484 1704
fba868fa
LP
1705 r = must_be_root();
1706 if (r < 0)
912541b0 1707 goto exit;
912541b0 1708
712cebf1
TG
1709 if (arg_children_max == 0) {
1710 cpu_set_t cpu_set;
e438c57a 1711 unsigned long mem_limit;
ebc164ef 1712
712cebf1 1713 arg_children_max = 8;
d457ff83 1714
ece174c5 1715 if (sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == 0)
920b52e4 1716 arg_children_max += CPU_COUNT(&cpu_set) * 2;
912541b0 1717
e438c57a
MW
1718 mem_limit = physical_memory() / (128LU*1024*1024);
1719 arg_children_max = MAX(10U, MIN(arg_children_max, mem_limit));
1720
712cebf1 1721 log_debug("set children_max to %u", arg_children_max);
d457ff83 1722 }
912541b0 1723
712cebf1
TG
1724 /* set umask before creating any file/directory */
1725 r = chdir("/");
1726 if (r < 0) {
1727 r = log_error_errno(errno, "could not change dir to /: %m");
1728 goto exit;
1729 }
194bbe33 1730
712cebf1 1731 umask(022);
912541b0 1732
c3dacc8b 1733 r = mac_selinux_init();
712cebf1
TG
1734 if (r < 0) {
1735 log_error_errno(r, "could not initialize labelling: %m");
1736 goto exit;
912541b0
KS
1737 }
1738
dae8b82e
ZJS
1739 r = mkdir_errno_wrapper("/run/udev", 0755);
1740 if (r < 0 && r != -EEXIST) {
1741 log_error_errno(r, "could not create /run/udev: %m");
712cebf1
TG
1742 goto exit;
1743 }
1744
03cfe0d5 1745 dev_setup(NULL, UID_INVALID, GID_INVALID);
912541b0 1746
c26d1879
TG
1747 if (getppid() == 1) {
1748 /* get our own cgroup, we regularly kill everything udev has left behind
1749 we only do this on systemd systems, and only if we are directly spawned
1750 by PID1. otherwise we are not guaranteed to have a dedicated cgroup */
1751 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &cgroup);
11b9fb15 1752 if (r < 0) {
a2d61f07 1753 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
11b9fb15
TG
1754 log_debug_errno(r, "did not find dedicated cgroup: %m");
1755 else
1756 log_warning_errno(r, "failed to get cgroup: %m");
1757 }
c26d1879
TG
1758 }
1759
b7f74dd4
TG
1760 r = listen_fds(&fd_ctrl, &fd_uevent);
1761 if (r < 0) {
1762 r = log_error_errno(r, "could not listen on fds: %m");
1763 goto exit;
1764 }
1765
bba7a484 1766 if (arg_daemonize) {
912541b0 1767 pid_t pid;
912541b0 1768
948aaa7c 1769 log_info("starting version " PACKAGE_VERSION);
3cbb2057 1770
40e749b5 1771 /* connect /dev/null to stdin, stdout, stderr */
c76cf844
AK
1772 if (log_get_max_level() < LOG_DEBUG) {
1773 r = make_null_stdio();
1774 if (r < 0)
1775 log_warning_errno(r, "Failed to redirect standard streams to /dev/null: %m");
1776 }
1777
912541b0
KS
1778 pid = fork();
1779 switch (pid) {
1780 case 0:
1781 break;
1782 case -1:
6af5e6a4 1783 r = log_error_errno(errno, "fork of daemon failed: %m");
912541b0
KS
1784 goto exit;
1785 default:
f53d1fcd
TG
1786 mac_selinux_finish();
1787 log_close();
1788 _exit(EXIT_SUCCESS);
912541b0
KS
1789 }
1790
1791 setsid();
1792
ad118bda 1793 write_string_file("/proc/self/oom_score_adj", "-1000", 0);
7500cd5e 1794 }
912541b0 1795
077fc5e2 1796 r = run(fd_ctrl, fd_uevent, cgroup);
693d371d 1797
53921bfa 1798exit:
cc56fafe 1799 mac_selinux_finish();
baa30fbc 1800 log_close();
6af5e6a4 1801 return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
7fafc032 1802}