]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-netlink/sd-netlink.c
Merge pull request #22791 from keszybz/bootctl-invert-order
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <poll.h>
4
5 #include "sd-netlink.h"
6
7 #include "alloc-util.h"
8 #include "fd-util.h"
9 #include "hashmap.h"
10 #include "io-util.h"
11 #include "macro.h"
12 #include "netlink-genl.h"
13 #include "netlink-internal.h"
14 #include "netlink-slot.h"
15 #include "process-util.h"
16 #include "socket-util.h"
17 #include "string-util.h"
18
19 /* Some really high limit, to catch programming errors */
20 #define REPLY_CALLBACKS_MAX UINT16_MAX
21
22 static int netlink_new(sd_netlink **ret) {
23 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
24
25 assert_return(ret, -EINVAL);
26
27 nl = new(sd_netlink, 1);
28 if (!nl)
29 return -ENOMEM;
30
31 *nl = (sd_netlink) {
32 .n_ref = 1,
33 .fd = -1,
34 .sockaddr.nl.nl_family = AF_NETLINK,
35 .original_pid = getpid_cached(),
36 .protocol = -1,
37
38 /* Kernel change notification messages have sequence number 0. We want to avoid that with our
39 * own serials, in order not to get confused when matching up kernel replies to our earlier
40 * requests.
41 *
42 * Moreover, when using netlink socket activation (i.e. where PID 1 binds an AF_NETLINK
43 * socket for us and passes it to us across execve()) and we get restarted multiple times
44 * while the socket sticks around we might get confused by replies from earlier runs coming
45 * in late — which is pretty likely if we'd start our sequence numbers always from 1. Hence,
46 * let's start with a value based on the system clock. This should make collisions much less
47 * likely (though still theoretically possible). We use a 32 bit µs counter starting at boot
48 * for this (and explicitly exclude the zero, see above). This counter will wrap around after
49 * a bit more than 1h, but that's hopefully OK as the kernel shouldn't take that long to
50 * reply to our requests.
51 *
52 * We only pick the initial start value this way. For each message we simply increase the
53 * sequence number by 1. This means we could enqueue 1 netlink message per µs without risking
54 * collisions, which should be OK.
55 *
56 * Note this means the serials will be in the range 1…UINT32_MAX here.
57 *
58 * (In an ideal world we'd attach the current serial counter to the netlink socket itself
59 * somehow, to avoid all this, but I couldn't come up with a nice way to do this) */
60 .serial = (uint32_t) (now(CLOCK_MONOTONIC) % UINT32_MAX) + 1,
61 };
62
63 /* We guarantee that the read buffer has at least space for a message header */
64 if (!greedy_realloc((void**) &nl->rbuffer, sizeof(struct nlmsghdr), sizeof(uint8_t)))
65 return -ENOMEM;
66
67 *ret = TAKE_PTR(nl);
68 return 0;
69 }
70
71 int sd_netlink_new_from_fd(sd_netlink **ret, int fd) {
72 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
73 socklen_t addrlen;
74 int r;
75
76 assert_return(ret, -EINVAL);
77
78 r = netlink_new(&nl);
79 if (r < 0)
80 return r;
81
82 addrlen = sizeof(nl->sockaddr);
83
84 if (getsockname(fd, &nl->sockaddr.sa, &addrlen) < 0)
85 return -errno;
86
87 if (nl->sockaddr.nl.nl_family != AF_NETLINK)
88 return -EINVAL;
89
90 nl->fd = fd;
91
92 *ret = TAKE_PTR(nl);
93 return 0;
94 }
95
96 int sd_netlink_open_fd(sd_netlink **ret, int fd) {
97 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
98 int r, protocol;
99
100 assert_return(ret, -EINVAL);
101 assert_return(fd >= 0, -EBADF);
102
103 r = netlink_new(&nl);
104 if (r < 0)
105 return r;
106
107 r = getsockopt_int(fd, SOL_SOCKET, SO_PROTOCOL, &protocol);
108 if (r < 0)
109 return r;
110
111 nl->fd = fd;
112 nl->protocol = protocol;
113
114 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_EXT_ACK, true);
115 if (r < 0)
116 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_EXT_ACK option, ignoring: %m");
117
118 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, true);
119 if (r < 0)
120 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_GET_STRICT_CHK option, ignoring: %m");
121
122 r = socket_bind(nl);
123 if (r < 0) {
124 nl->fd = -1; /* on failure, the caller remains owner of the fd, hence don't close it here */
125 nl->protocol = -1;
126 return r;
127 }
128
129 *ret = TAKE_PTR(nl);
130
131 return 0;
132 }
133
134 int netlink_open_family(sd_netlink **ret, int family) {
135 _cleanup_close_ int fd = -1;
136 int r;
137
138 fd = socket_open(family);
139 if (fd < 0)
140 return fd;
141
142 r = sd_netlink_open_fd(ret, fd);
143 if (r < 0)
144 return r;
145 TAKE_FD(fd);
146
147 return 0;
148 }
149
150 int sd_netlink_open(sd_netlink **ret) {
151 return netlink_open_family(ret, NETLINK_ROUTE);
152 }
153
154 bool netlink_pid_changed(sd_netlink *nl) {
155 assert(nl);
156
157 /* We don't support people creating an nl connection and
158 * keeping it around over a fork(). Let's complain. */
159
160 return nl->original_pid != getpid_cached();
161 }
162
163 int sd_netlink_inc_rcvbuf(sd_netlink *nl, size_t size) {
164 assert_return(nl, -EINVAL);
165 assert_return(!netlink_pid_changed(nl), -ECHILD);
166
167 return fd_inc_rcvbuf(nl->fd, size);
168 }
169
170 static sd_netlink *netlink_free(sd_netlink *nl) {
171 sd_netlink_slot *s;
172 unsigned i;
173
174 assert(nl);
175
176 for (i = 0; i < nl->rqueue_size; i++)
177 sd_netlink_message_unref(nl->rqueue[i]);
178 free(nl->rqueue);
179
180 for (i = 0; i < nl->rqueue_partial_size; i++)
181 sd_netlink_message_unref(nl->rqueue_partial[i]);
182 free(nl->rqueue_partial);
183
184 free(nl->rbuffer);
185
186 while ((s = nl->slots)) {
187 assert(s->floating);
188 netlink_slot_disconnect(s, true);
189 }
190 hashmap_free(nl->reply_callbacks);
191 prioq_free(nl->reply_callbacks_prioq);
192
193 sd_event_source_unref(nl->io_event_source);
194 sd_event_source_unref(nl->time_event_source);
195 sd_event_unref(nl->event);
196
197 hashmap_free(nl->broadcast_group_refs);
198
199 genl_clear_family(nl);
200
201 safe_close(nl->fd);
202 return mfree(nl);
203 }
204
205 DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
206
207 static void netlink_seal_message(sd_netlink *nl, sd_netlink_message *m) {
208 uint32_t picked;
209
210 assert(nl);
211 assert(!netlink_pid_changed(nl));
212 assert(m);
213 assert(m->hdr);
214
215 /* Avoid collisions with outstanding requests */
216 do {
217 picked = nl->serial;
218
219 /* Don't use seq == 0, as that is used for broadcasts, so we would get confused by replies to
220 such messages */
221 nl->serial = nl->serial == UINT32_MAX ? 1 : nl->serial + 1;
222
223 } while (hashmap_contains(nl->reply_callbacks, UINT32_TO_PTR(picked)));
224
225 m->hdr->nlmsg_seq = picked;
226 message_seal(m);
227 }
228
229 int sd_netlink_send(
230 sd_netlink *nl,
231 sd_netlink_message *message,
232 uint32_t *serial) {
233
234 int r;
235
236 assert_return(nl, -EINVAL);
237 assert_return(!netlink_pid_changed(nl), -ECHILD);
238 assert_return(message, -EINVAL);
239 assert_return(!message->sealed, -EPERM);
240
241 netlink_seal_message(nl, message);
242
243 r = socket_write_message(nl, message);
244 if (r < 0)
245 return r;
246
247 if (serial)
248 *serial = message_get_serial(message);
249
250 return 1;
251 }
252
253 int sd_netlink_sendv(
254 sd_netlink *nl,
255 sd_netlink_message **messages,
256 size_t msgcount,
257 uint32_t **ret_serial) {
258
259 _cleanup_free_ uint32_t *serials = NULL;
260 int r;
261
262 assert_return(nl, -EINVAL);
263 assert_return(!netlink_pid_changed(nl), -ECHILD);
264 assert_return(messages, -EINVAL);
265 assert_return(msgcount > 0, -EINVAL);
266
267 if (ret_serial) {
268 serials = new(uint32_t, msgcount);
269 if (!serials)
270 return -ENOMEM;
271 }
272
273 for (unsigned i = 0; i < msgcount; i++) {
274 assert_return(!messages[i]->sealed, -EPERM);
275
276 netlink_seal_message(nl, messages[i]);
277 if (serials)
278 serials[i] = message_get_serial(messages[i]);
279 }
280
281 r = socket_writev_message(nl, messages, msgcount);
282 if (r < 0)
283 return r;
284
285 if (ret_serial)
286 *ret_serial = TAKE_PTR(serials);
287
288 return r;
289 }
290
291 int netlink_rqueue_make_room(sd_netlink *nl) {
292 assert(nl);
293
294 if (nl->rqueue_size >= NETLINK_RQUEUE_MAX)
295 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
296 "sd-netlink: exhausted the read queue size (%d)",
297 NETLINK_RQUEUE_MAX);
298
299 if (!GREEDY_REALLOC(nl->rqueue, nl->rqueue_size + 1))
300 return -ENOMEM;
301
302 return 0;
303 }
304
305 int netlink_rqueue_partial_make_room(sd_netlink *nl) {
306 assert(nl);
307
308 if (nl->rqueue_partial_size >= NETLINK_RQUEUE_MAX)
309 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
310 "sd-netlink: exhausted the partial read queue size (%d)",
311 NETLINK_RQUEUE_MAX);
312
313 if (!GREEDY_REALLOC(nl->rqueue_partial, nl->rqueue_partial_size + 1))
314 return -ENOMEM;
315
316 return 0;
317 }
318
319 static int dispatch_rqueue(sd_netlink *nl, sd_netlink_message **message) {
320 int r;
321
322 assert(nl);
323 assert(message);
324
325 if (nl->rqueue_size <= 0) {
326 /* Try to read a new message */
327 r = socket_read_message(nl);
328 if (r == -ENOBUFS) { /* FIXME: ignore buffer overruns for now */
329 log_debug_errno(r, "sd-netlink: Got ENOBUFS from netlink socket, ignoring.");
330 return 1;
331 }
332 if (r <= 0)
333 return r;
334 }
335
336 /* Dispatch a queued message */
337 *message = nl->rqueue[0];
338 nl->rqueue_size--;
339 memmove(nl->rqueue, nl->rqueue + 1, sizeof(sd_netlink_message*) * nl->rqueue_size);
340
341 return 1;
342 }
343
344 static int process_timeout(sd_netlink *nl) {
345 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
346 struct reply_callback *c;
347 sd_netlink_slot *slot;
348 usec_t n;
349 int r;
350
351 assert(nl);
352
353 c = prioq_peek(nl->reply_callbacks_prioq);
354 if (!c)
355 return 0;
356
357 n = now(CLOCK_MONOTONIC);
358 if (c->timeout > n)
359 return 0;
360
361 r = message_new_synthetic_error(nl, -ETIMEDOUT, c->serial, &m);
362 if (r < 0)
363 return r;
364
365 assert_se(prioq_pop(nl->reply_callbacks_prioq) == c);
366 c->timeout = 0;
367 hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(c->serial));
368
369 slot = container_of(c, sd_netlink_slot, reply_callback);
370
371 r = c->callback(nl, m, slot->userdata);
372 if (r < 0)
373 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
374 slot->description ? "'" : "",
375 strempty(slot->description),
376 slot->description ? "' " : "");
377
378 if (slot->floating)
379 netlink_slot_disconnect(slot, true);
380
381 return 1;
382 }
383
384 static int process_reply(sd_netlink *nl, sd_netlink_message *m) {
385 struct reply_callback *c;
386 sd_netlink_slot *slot;
387 uint32_t serial;
388 uint16_t type;
389 int r;
390
391 assert(nl);
392 assert(m);
393
394 serial = message_get_serial(m);
395 c = hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(serial));
396 if (!c)
397 return 0;
398
399 if (c->timeout != 0) {
400 prioq_remove(nl->reply_callbacks_prioq, c, &c->prioq_idx);
401 c->timeout = 0;
402 }
403
404 r = sd_netlink_message_get_type(m, &type);
405 if (r < 0)
406 return r;
407
408 if (type == NLMSG_DONE)
409 m = NULL;
410
411 slot = container_of(c, sd_netlink_slot, reply_callback);
412
413 r = c->callback(nl, m, slot->userdata);
414 if (r < 0)
415 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
416 slot->description ? "'" : "",
417 strempty(slot->description),
418 slot->description ? "' " : "");
419
420 if (slot->floating)
421 netlink_slot_disconnect(slot, true);
422
423 return 1;
424 }
425
426 static int process_match(sd_netlink *nl, sd_netlink_message *m) {
427 uint16_t type;
428 uint8_t cmd;
429 int r;
430
431 assert(nl);
432 assert(m);
433
434 r = sd_netlink_message_get_type(m, &type);
435 if (r < 0)
436 return r;
437
438 if (m->protocol == NETLINK_GENERIC) {
439 r = sd_genl_message_get_command(nl, m, &cmd);
440 if (r < 0)
441 return r;
442 } else
443 cmd = 0;
444
445 LIST_FOREACH(match_callbacks, c, nl->match_callbacks) {
446 sd_netlink_slot *slot;
447 bool found = false;
448
449 if (c->type != type)
450 continue;
451 if (c->cmd != 0 && c->cmd != cmd)
452 continue;
453
454 for (size_t i = 0; i < c->n_groups; i++)
455 if (c->groups[i] == m->multicast_group) {
456 found = true;
457 break;
458 }
459
460 if (!found)
461 continue;
462
463 slot = container_of(c, sd_netlink_slot, match_callback);
464
465 r = c->callback(nl, m, slot->userdata);
466 if (r < 0)
467 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
468 slot->description ? "'" : "",
469 strempty(slot->description),
470 slot->description ? "' " : "");
471 if (r != 0)
472 break;
473 }
474
475 return 1;
476 }
477
478 static int process_running(sd_netlink *nl, sd_netlink_message **ret) {
479 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
480 int r;
481
482 assert(nl);
483
484 r = process_timeout(nl);
485 if (r != 0)
486 goto null_message;
487
488 r = dispatch_rqueue(nl, &m);
489 if (r < 0)
490 return r;
491 if (!m)
492 goto null_message;
493
494 if (sd_netlink_message_is_broadcast(m))
495 r = process_match(nl, m);
496 else
497 r = process_reply(nl, m);
498 if (r != 0)
499 goto null_message;
500
501 if (ret) {
502 *ret = TAKE_PTR(m);
503
504 return 1;
505 }
506
507 return 1;
508
509 null_message:
510 if (r >= 0 && ret)
511 *ret = NULL;
512
513 return r;
514 }
515
516 int sd_netlink_process(sd_netlink *nl, sd_netlink_message **ret) {
517 NETLINK_DONT_DESTROY(nl);
518 int r;
519
520 assert_return(nl, -EINVAL);
521 assert_return(!netlink_pid_changed(nl), -ECHILD);
522 assert_return(!nl->processing, -EBUSY);
523
524 nl->processing = true;
525 r = process_running(nl, ret);
526 nl->processing = false;
527
528 return r;
529 }
530
531 static usec_t calc_elapse(uint64_t usec) {
532 if (usec == UINT64_MAX)
533 return 0;
534
535 if (usec == 0)
536 usec = NETLINK_DEFAULT_TIMEOUT_USEC;
537
538 return usec_add(now(CLOCK_MONOTONIC), usec);
539 }
540
541 static int netlink_poll(sd_netlink *nl, bool need_more, usec_t timeout_usec) {
542 usec_t m = USEC_INFINITY;
543 int r, e;
544
545 assert(nl);
546
547 e = sd_netlink_get_events(nl);
548 if (e < 0)
549 return e;
550
551 if (need_more)
552 /* Caller wants more data, and doesn't care about
553 * what's been read or any other timeouts. */
554 e |= POLLIN;
555 else {
556 usec_t until;
557
558 /* Caller wants to process if there is something to
559 * process, but doesn't care otherwise */
560
561 r = sd_netlink_get_timeout(nl, &until);
562 if (r < 0)
563 return r;
564
565 m = usec_sub_unsigned(until, now(CLOCK_MONOTONIC));
566 }
567
568 r = fd_wait_for_event(nl->fd, e, MIN(m, timeout_usec));
569 if (r <= 0)
570 return r;
571
572 return 1;
573 }
574
575 int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
576 assert_return(nl, -EINVAL);
577 assert_return(!netlink_pid_changed(nl), -ECHILD);
578
579 if (nl->rqueue_size > 0)
580 return 0;
581
582 return netlink_poll(nl, false, timeout_usec);
583 }
584
585 static int timeout_compare(const void *a, const void *b) {
586 const struct reply_callback *x = a, *y = b;
587
588 if (x->timeout != 0 && y->timeout == 0)
589 return -1;
590
591 if (x->timeout == 0 && y->timeout != 0)
592 return 1;
593
594 return CMP(x->timeout, y->timeout);
595 }
596
597 int sd_netlink_call_async(
598 sd_netlink *nl,
599 sd_netlink_slot **ret_slot,
600 sd_netlink_message *m,
601 sd_netlink_message_handler_t callback,
602 sd_netlink_destroy_t destroy_callback,
603 void *userdata,
604 uint64_t usec,
605 const char *description) {
606
607 _cleanup_free_ sd_netlink_slot *slot = NULL;
608 int r, k;
609
610 assert_return(nl, -EINVAL);
611 assert_return(m, -EINVAL);
612 assert_return(callback, -EINVAL);
613 assert_return(!netlink_pid_changed(nl), -ECHILD);
614
615 if (hashmap_size(nl->reply_callbacks) >= REPLY_CALLBACKS_MAX)
616 return -ERANGE;
617
618 r = hashmap_ensure_allocated(&nl->reply_callbacks, &trivial_hash_ops);
619 if (r < 0)
620 return r;
621
622 if (usec != UINT64_MAX) {
623 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
624 if (r < 0)
625 return r;
626 }
627
628 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
629 if (r < 0)
630 return r;
631
632 slot->reply_callback.callback = callback;
633 slot->reply_callback.timeout = calc_elapse(usec);
634
635 k = sd_netlink_send(nl, m, &slot->reply_callback.serial);
636 if (k < 0)
637 return k;
638
639 r = hashmap_put(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial), &slot->reply_callback);
640 if (r < 0)
641 return r;
642
643 if (slot->reply_callback.timeout != 0) {
644 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
645 if (r < 0) {
646 (void) hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial));
647 return r;
648 }
649 }
650
651 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
652 slot->destroy_callback = destroy_callback;
653
654 if (ret_slot)
655 *ret_slot = slot;
656
657 TAKE_PTR(slot);
658
659 return k;
660 }
661
662 int sd_netlink_read(
663 sd_netlink *nl,
664 uint32_t serial,
665 uint64_t usec,
666 sd_netlink_message **ret) {
667
668 usec_t timeout;
669 int r;
670
671 assert_return(nl, -EINVAL);
672 assert_return(!netlink_pid_changed(nl), -ECHILD);
673
674 timeout = calc_elapse(usec);
675
676 for (;;) {
677 usec_t left;
678
679 for (unsigned i = 0; i < nl->rqueue_size; i++) {
680 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *incoming = NULL;
681 uint32_t received_serial;
682 uint16_t type;
683
684 received_serial = message_get_serial(nl->rqueue[i]);
685 if (received_serial != serial)
686 continue;
687
688 incoming = nl->rqueue[i];
689
690 /* found a match, remove from rqueue and return it */
691 memmove(nl->rqueue + i, nl->rqueue + i + 1,
692 sizeof(sd_netlink_message*) * (nl->rqueue_size - i - 1));
693 nl->rqueue_size--;
694
695 r = sd_netlink_message_get_errno(incoming);
696 if (r < 0)
697 return r;
698
699 r = sd_netlink_message_get_type(incoming, &type);
700 if (r < 0)
701 return r;
702
703 if (type == NLMSG_DONE) {
704 *ret = NULL;
705 return 0;
706 }
707
708 if (ret)
709 *ret = TAKE_PTR(incoming);
710 return 1;
711 }
712
713 r = socket_read_message(nl);
714 if (r < 0)
715 return r;
716 if (r > 0)
717 /* received message, so try to process straight away */
718 continue;
719
720 if (timeout > 0) {
721 usec_t n;
722
723 n = now(CLOCK_MONOTONIC);
724 if (n >= timeout)
725 return -ETIMEDOUT;
726
727 left = usec_sub_unsigned(timeout, n);
728 } else
729 left = USEC_INFINITY;
730
731 r = netlink_poll(nl, true, left);
732 if (r < 0)
733 return r;
734 if (r == 0)
735 return -ETIMEDOUT;
736 }
737 }
738
739 int sd_netlink_call(
740 sd_netlink *nl,
741 sd_netlink_message *message,
742 uint64_t usec,
743 sd_netlink_message **ret) {
744
745 uint32_t serial;
746 int r;
747
748 assert_return(nl, -EINVAL);
749 assert_return(!netlink_pid_changed(nl), -ECHILD);
750 assert_return(message, -EINVAL);
751
752 r = sd_netlink_send(nl, message, &serial);
753 if (r < 0)
754 return r;
755
756 return sd_netlink_read(nl, serial, usec, ret);
757 }
758
759 int sd_netlink_get_events(sd_netlink *nl) {
760 assert_return(nl, -EINVAL);
761 assert_return(!netlink_pid_changed(nl), -ECHILD);
762
763 return nl->rqueue_size == 0 ? POLLIN : 0;
764 }
765
766 int sd_netlink_get_timeout(sd_netlink *nl, uint64_t *timeout_usec) {
767 struct reply_callback *c;
768
769 assert_return(nl, -EINVAL);
770 assert_return(timeout_usec, -EINVAL);
771 assert_return(!netlink_pid_changed(nl), -ECHILD);
772
773 if (nl->rqueue_size > 0) {
774 *timeout_usec = 0;
775 return 1;
776 }
777
778 c = prioq_peek(nl->reply_callbacks_prioq);
779 if (!c) {
780 *timeout_usec = UINT64_MAX;
781 return 0;
782 }
783
784 *timeout_usec = c->timeout;
785
786 return 1;
787 }
788
789 static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
790 sd_netlink *nl = userdata;
791 int r;
792
793 assert(nl);
794
795 r = sd_netlink_process(nl, NULL);
796 if (r < 0)
797 return r;
798
799 return 1;
800 }
801
802 static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
803 sd_netlink *nl = userdata;
804 int r;
805
806 assert(nl);
807
808 r = sd_netlink_process(nl, NULL);
809 if (r < 0)
810 return r;
811
812 return 1;
813 }
814
815 static int prepare_callback(sd_event_source *s, void *userdata) {
816 sd_netlink *nl = userdata;
817 int r, e;
818 usec_t until;
819
820 assert(s);
821 assert(nl);
822
823 e = sd_netlink_get_events(nl);
824 if (e < 0)
825 return e;
826
827 r = sd_event_source_set_io_events(nl->io_event_source, e);
828 if (r < 0)
829 return r;
830
831 r = sd_netlink_get_timeout(nl, &until);
832 if (r < 0)
833 return r;
834 if (r > 0) {
835 int j;
836
837 j = sd_event_source_set_time(nl->time_event_source, until);
838 if (j < 0)
839 return j;
840 }
841
842 r = sd_event_source_set_enabled(nl->time_event_source, r > 0);
843 if (r < 0)
844 return r;
845
846 return 1;
847 }
848
849 int sd_netlink_attach_event(sd_netlink *nl, sd_event *event, int64_t priority) {
850 int r;
851
852 assert_return(nl, -EINVAL);
853 assert_return(!nl->event, -EBUSY);
854
855 assert(!nl->io_event_source);
856 assert(!nl->time_event_source);
857
858 if (event)
859 nl->event = sd_event_ref(event);
860 else {
861 r = sd_event_default(&nl->event);
862 if (r < 0)
863 return r;
864 }
865
866 r = sd_event_add_io(nl->event, &nl->io_event_source, nl->fd, 0, io_callback, nl);
867 if (r < 0)
868 goto fail;
869
870 r = sd_event_source_set_priority(nl->io_event_source, priority);
871 if (r < 0)
872 goto fail;
873
874 r = sd_event_source_set_description(nl->io_event_source, "netlink-receive-message");
875 if (r < 0)
876 goto fail;
877
878 r = sd_event_source_set_prepare(nl->io_event_source, prepare_callback);
879 if (r < 0)
880 goto fail;
881
882 r = sd_event_add_time(nl->event, &nl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, nl);
883 if (r < 0)
884 goto fail;
885
886 r = sd_event_source_set_priority(nl->time_event_source, priority);
887 if (r < 0)
888 goto fail;
889
890 r = sd_event_source_set_description(nl->time_event_source, "netlink-timer");
891 if (r < 0)
892 goto fail;
893
894 return 0;
895
896 fail:
897 sd_netlink_detach_event(nl);
898 return r;
899 }
900
901 int sd_netlink_detach_event(sd_netlink *nl) {
902 assert_return(nl, -EINVAL);
903 assert_return(nl->event, -ENXIO);
904
905 nl->io_event_source = sd_event_source_unref(nl->io_event_source);
906
907 nl->time_event_source = sd_event_source_unref(nl->time_event_source);
908
909 nl->event = sd_event_unref(nl->event);
910
911 return 0;
912 }
913
914 int netlink_add_match_internal(
915 sd_netlink *nl,
916 sd_netlink_slot **ret_slot,
917 const uint32_t *groups,
918 size_t n_groups,
919 uint16_t type,
920 uint8_t cmd,
921 sd_netlink_message_handler_t callback,
922 sd_netlink_destroy_t destroy_callback,
923 void *userdata,
924 const char *description) {
925
926 _cleanup_free_ sd_netlink_slot *slot = NULL;
927 int r;
928
929 assert(groups);
930 assert(n_groups > 0);
931
932 for (size_t i = 0; i < n_groups; i++) {
933 r = socket_broadcast_group_ref(nl, groups[i]);
934 if (r < 0)
935 return r;
936 }
937
938 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback),
939 userdata, description, &slot);
940 if (r < 0)
941 return r;
942
943 slot->match_callback.groups = newdup(uint32_t, groups, n_groups);
944 if (!slot->match_callback.groups)
945 return -ENOMEM;
946
947 slot->match_callback.n_groups = n_groups;
948 slot->match_callback.callback = callback;
949 slot->match_callback.type = type;
950 slot->match_callback.cmd = cmd;
951
952 LIST_PREPEND(match_callbacks, nl->match_callbacks, &slot->match_callback);
953
954 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
955 slot->destroy_callback = destroy_callback;
956
957 if (ret_slot)
958 *ret_slot = slot;
959
960 TAKE_PTR(slot);
961 return 0;
962 }
963
964 int sd_netlink_add_match(
965 sd_netlink *rtnl,
966 sd_netlink_slot **ret_slot,
967 uint16_t type,
968 sd_netlink_message_handler_t callback,
969 sd_netlink_destroy_t destroy_callback,
970 void *userdata,
971 const char *description) {
972
973 static const uint32_t
974 address_groups[] = { RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV6_IFADDR, },
975 link_groups[] = { RTNLGRP_LINK, },
976 neighbor_groups[] = { RTNLGRP_NEIGH, },
977 nexthop_groups[] = { RTNLGRP_NEXTHOP, },
978 route_groups[] = { RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV6_ROUTE, },
979 rule_groups[] = { RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_RULE, },
980 tc_groups[] = { RTNLGRP_TC };
981 const uint32_t *groups;
982 size_t n_groups;
983
984 assert_return(rtnl, -EINVAL);
985 assert_return(callback, -EINVAL);
986 assert_return(!netlink_pid_changed(rtnl), -ECHILD);
987
988 switch (type) {
989 case RTM_NEWLINK:
990 case RTM_DELLINK:
991 groups = link_groups;
992 n_groups = ELEMENTSOF(link_groups);
993 break;
994 case RTM_NEWADDR:
995 case RTM_DELADDR:
996 groups = address_groups;
997 n_groups = ELEMENTSOF(address_groups);
998 break;
999 case RTM_NEWNEIGH:
1000 case RTM_DELNEIGH:
1001 groups = neighbor_groups;
1002 n_groups = ELEMENTSOF(neighbor_groups);
1003 break;
1004 case RTM_NEWROUTE:
1005 case RTM_DELROUTE:
1006 groups = route_groups;
1007 n_groups = ELEMENTSOF(route_groups);
1008 break;
1009 case RTM_NEWRULE:
1010 case RTM_DELRULE:
1011 groups = rule_groups;
1012 n_groups = ELEMENTSOF(rule_groups);
1013 break;
1014 case RTM_NEWNEXTHOP:
1015 case RTM_DELNEXTHOP:
1016 groups = nexthop_groups;
1017 n_groups = ELEMENTSOF(nexthop_groups);
1018 break;
1019 case RTM_NEWQDISC:
1020 case RTM_DELQDISC:
1021 case RTM_NEWTCLASS:
1022 case RTM_DELTCLASS:
1023 groups = tc_groups;
1024 n_groups = ELEMENTSOF(tc_groups);
1025 break;
1026 default:
1027 return -EOPNOTSUPP;
1028 }
1029
1030 return netlink_add_match_internal(rtnl, ret_slot, groups, n_groups, type, 0, callback,
1031 destroy_callback, userdata, description);
1032 }
1033
1034 int sd_netlink_attach_filter(sd_netlink *nl, size_t len, struct sock_filter *filter) {
1035 assert_return(nl, -EINVAL);
1036 assert_return(len == 0 || filter, -EINVAL);
1037
1038 if (setsockopt(nl->fd, SOL_SOCKET,
1039 len == 0 ? SO_DETACH_FILTER : SO_ATTACH_FILTER,
1040 &(struct sock_fprog) {
1041 .len = len,
1042 .filter = filter,
1043 }, sizeof(struct sock_fprog)) < 0)
1044 return -errno;
1045
1046 return 0;
1047 }