]> git.ipfire.org Git - thirdparty/systemd.git/blame_incremental - src/libsystemd/sd-netlink/sd-netlink.c
update NEWS with even more features for v258
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
... / ...
CommitLineData
1/* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3#include <linux/filter.h>
4#include <poll.h>
5#include <stdlib.h>
6
7#include "sd-event.h"
8#include "sd-netlink.h"
9
10#include "alloc-util.h"
11#include "errno-util.h"
12#include "fd-util.h"
13#include "hashmap.h"
14#include "io-util.h"
15#include "log.h"
16#include "netlink-genl.h"
17#include "netlink-internal.h"
18#include "netlink-slot.h"
19#include "netlink-util.h"
20#include "ordered-set.h"
21#include "prioq.h"
22#include "process-util.h"
23#include "socket-util.h"
24#include "string-util.h"
25#include "time-util.h"
26
27/* Some really high limit, to catch programming errors */
28#define REPLY_CALLBACKS_MAX UINT16_MAX
29
30static int netlink_new(sd_netlink **ret) {
31 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
32
33 assert_return(ret, -EINVAL);
34
35 nl = new(sd_netlink, 1);
36 if (!nl)
37 return -ENOMEM;
38
39 *nl = (sd_netlink) {
40 .n_ref = 1,
41 .fd = -EBADF,
42 .sockaddr.nl.nl_family = AF_NETLINK,
43 .original_pid = getpid_cached(),
44 .protocol = -1,
45
46 /* Kernel change notification messages have sequence number 0. We want to avoid that with our
47 * own serials, in order not to get confused when matching up kernel replies to our earlier
48 * requests.
49 *
50 * Moreover, when using netlink socket activation (i.e. where PID 1 binds an AF_NETLINK
51 * socket for us and passes it to us across execve()) and we get restarted multiple times
52 * while the socket sticks around we might get confused by replies from earlier runs coming
53 * in late — which is pretty likely if we'd start our sequence numbers always from 1. Hence,
54 * let's start with a value based on the system clock. This should make collisions much less
55 * likely (though still theoretically possible). We use a 32 bit μs counter starting at boot
56 * for this (and explicitly exclude the zero, see above). This counter will wrap around after
57 * a bit more than 1h, but that's hopefully OK as the kernel shouldn't take that long to
58 * reply to our requests.
59 *
60 * We only pick the initial start value this way. For each message we simply increase the
61 * sequence number by 1. This means we could enqueue 1 netlink message per μs without risking
62 * collisions, which should be OK.
63 *
64 * Note this means the serials will be in the range 1…UINT32_MAX here.
65 *
66 * (In an ideal world we'd attach the current serial counter to the netlink socket itself
67 * somehow, to avoid all this, but I couldn't come up with a nice way to do this) */
68 .serial = (uint32_t) (now(CLOCK_MONOTONIC) % UINT32_MAX) + 1,
69 };
70
71 *ret = TAKE_PTR(nl);
72 return 0;
73}
74
75int sd_netlink_open_fd(sd_netlink **ret, int fd) {
76 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
77 int r, protocol = 0; /* Avoid maybe-uninitialized false positive */
78
79 assert_return(ret, -EINVAL);
80 assert_return(fd >= 0, -EBADF);
81
82 r = netlink_new(&nl);
83 if (r < 0)
84 return r;
85
86 r = getsockopt_int(fd, SOL_SOCKET, SO_PROTOCOL, &protocol);
87 if (r < 0)
88 return r;
89
90 nl->fd = fd;
91 nl->protocol = protocol;
92
93 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_EXT_ACK, true);
94 if (r < 0)
95 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_EXT_ACK option, ignoring: %m");
96
97 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, true);
98 if (r < 0)
99 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_GET_STRICT_CHK option, ignoring: %m");
100
101 r = socket_bind(nl);
102 if (r < 0) {
103 nl->fd = -EBADF; /* on failure, the caller remains owner of the fd, hence don't close it here */
104 nl->protocol = -1;
105 return r;
106 }
107
108 *ret = TAKE_PTR(nl);
109
110 return 0;
111}
112
113int sd_netlink_open(sd_netlink **ret) {
114 return netlink_open_family(ret, NETLINK_ROUTE);
115}
116
117int sd_netlink_increase_rxbuf(sd_netlink *nl, size_t size) {
118 assert_return(nl, -EINVAL);
119 assert_return(!netlink_pid_changed(nl), -ECHILD);
120
121 return fd_increase_rxbuf(nl->fd, size);
122}
123
124static sd_netlink *netlink_free(sd_netlink *nl) {
125 sd_netlink_slot *s;
126
127 assert(nl);
128
129 ordered_set_free(nl->rqueue);
130 hashmap_free(nl->rqueue_by_serial);
131 hashmap_free(nl->rqueue_partial_by_serial);
132 free(nl->rbuffer);
133
134 while ((s = nl->slots)) {
135 assert(s->floating);
136 netlink_slot_disconnect(s, true);
137 }
138 hashmap_free(nl->reply_callbacks);
139 prioq_free(nl->reply_callbacks_prioq);
140
141 sd_event_source_unref(nl->io_event_source);
142 sd_event_source_unref(nl->time_event_source);
143 sd_event_unref(nl->event);
144
145 hashmap_free(nl->broadcast_group_refs);
146
147 genl_clear_family(nl);
148
149 safe_close(nl->fd);
150 return mfree(nl);
151}
152
153DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
154
155int sd_netlink_send(
156 sd_netlink *nl,
157 sd_netlink_message *message,
158 uint32_t *ret_serial) {
159
160 int r;
161
162 assert_return(nl, -EINVAL);
163 assert_return(!netlink_pid_changed(nl), -ECHILD);
164 assert_return(message, -EINVAL);
165 assert_return(!message->sealed, -EPERM);
166
167 netlink_seal_message(nl, message);
168
169 r = socket_write_message(nl, message);
170 if (r < 0)
171 return r;
172
173 if (ret_serial)
174 *ret_serial = message_get_serial(message);
175
176 return 1;
177}
178
179static int dispatch_rqueue(sd_netlink *nl, sd_netlink_message **ret) {
180 sd_netlink_message *m;
181 int r;
182
183 assert(nl);
184 assert(ret);
185
186 if (ordered_set_isempty(nl->rqueue)) {
187 /* Try to read a new message */
188 r = socket_read_message(nl);
189 if (r == -ENOBUFS) /* FIXME: ignore buffer overruns for now */
190 log_debug_errno(r, "sd-netlink: Got ENOBUFS from netlink socket, ignoring.");
191 else if (r < 0)
192 return r;
193 }
194
195 /* Dispatch a queued message */
196 m = ordered_set_steal_first(nl->rqueue);
197 if (m)
198 sd_netlink_message_unref(hashmap_remove_value(nl->rqueue_by_serial, UINT32_TO_PTR(message_get_serial(m)), m));
199 *ret = m;
200 return !!m;
201}
202
203static int process_timeout(sd_netlink *nl) {
204 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
205 struct reply_callback *c;
206 sd_netlink_slot *slot;
207 usec_t n;
208 int r;
209
210 assert(nl);
211
212 c = prioq_peek(nl->reply_callbacks_prioq);
213 if (!c)
214 return 0;
215
216 n = now(CLOCK_MONOTONIC);
217 if (c->timeout > n)
218 return 0;
219
220 r = message_new_synthetic_error(nl, -ETIMEDOUT, c->serial, &m);
221 if (r < 0)
222 return r;
223
224 assert_se(prioq_pop(nl->reply_callbacks_prioq) == c);
225 hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(c->serial));
226
227 slot = container_of(c, sd_netlink_slot, reply_callback);
228
229 r = c->callback(nl, m, slot->userdata);
230 if (r < 0)
231 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
232 slot->description ? "'" : "",
233 strempty(slot->description),
234 slot->description ? "' " : "");
235
236 if (slot->floating)
237 netlink_slot_disconnect(slot, true);
238
239 return 1;
240}
241
242static int process_reply(sd_netlink *nl, sd_netlink_message *m) {
243 struct reply_callback *c;
244 sd_netlink_slot *slot;
245 uint32_t serial;
246 uint16_t type;
247 int r;
248
249 assert(nl);
250 assert(m);
251
252 serial = message_get_serial(m);
253 c = hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(serial));
254 if (!c)
255 return 0;
256
257 if (c->timeout != USEC_INFINITY)
258 prioq_remove(nl->reply_callbacks_prioq, c, &c->prioq_idx);
259
260 r = sd_netlink_message_get_type(m, &type);
261 if (r < 0)
262 return r;
263
264 if (type == NLMSG_DONE)
265 m = NULL;
266
267 slot = container_of(c, sd_netlink_slot, reply_callback);
268
269 r = c->callback(nl, m, slot->userdata);
270 if (r < 0)
271 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
272 slot->description ? "'" : "",
273 strempty(slot->description),
274 slot->description ? "' " : "");
275
276 if (slot->floating)
277 netlink_slot_disconnect(slot, true);
278
279 return 1;
280}
281
282static int process_match(sd_netlink *nl, sd_netlink_message *m) {
283 uint16_t type;
284 uint8_t cmd;
285 int r;
286
287 assert(nl);
288 assert(m);
289
290 r = sd_netlink_message_get_type(m, &type);
291 if (r < 0)
292 return r;
293
294 if (m->protocol == NETLINK_GENERIC) {
295 r = sd_genl_message_get_command(nl, m, &cmd);
296 if (r < 0)
297 return r;
298 } else
299 cmd = 0;
300
301 LIST_FOREACH(match_callbacks, c, nl->match_callbacks) {
302 sd_netlink_slot *slot;
303 bool found = false;
304
305 if (c->type != type)
306 continue;
307 if (c->cmd != 0 && c->cmd != cmd)
308 continue;
309
310 for (size_t i = 0; i < c->n_groups; i++)
311 if (c->groups[i] == m->multicast_group) {
312 found = true;
313 break;
314 }
315
316 if (!found)
317 continue;
318
319 slot = container_of(c, sd_netlink_slot, match_callback);
320
321 r = c->callback(nl, m, slot->userdata);
322 if (r < 0)
323 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
324 slot->description ? "'" : "",
325 strempty(slot->description),
326 slot->description ? "' " : "");
327 if (r != 0)
328 break;
329 }
330
331 return 1;
332}
333
334static int process_running(sd_netlink *nl, sd_netlink_message **ret) {
335 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
336 int r;
337
338 assert(nl);
339
340 r = process_timeout(nl);
341 if (r != 0)
342 goto null_message;
343
344 r = dispatch_rqueue(nl, &m);
345 if (r < 0)
346 return r;
347 if (!m)
348 goto null_message;
349
350 if (sd_netlink_message_is_broadcast(m))
351 r = process_match(nl, m);
352 else
353 r = process_reply(nl, m);
354 if (r != 0)
355 goto null_message;
356
357 if (ret) {
358 *ret = TAKE_PTR(m);
359
360 return 1;
361 }
362
363 return 1;
364
365null_message:
366 if (r >= 0 && ret)
367 *ret = NULL;
368
369 return r;
370}
371
372int sd_netlink_process(sd_netlink *nl, sd_netlink_message **ret) {
373 NETLINK_DONT_DESTROY(nl);
374 int r;
375
376 assert_return(nl, -EINVAL);
377 assert_return(!netlink_pid_changed(nl), -ECHILD);
378 assert_return(!nl->processing, -EBUSY);
379
380 nl->processing = true;
381 r = process_running(nl, ret);
382 nl->processing = false;
383
384 return r;
385}
386
387static usec_t timespan_to_timestamp(usec_t usec) {
388 static bool default_timeout_set = false;
389 static usec_t default_timeout;
390 int r;
391
392 if (usec == 0) {
393 if (!default_timeout_set) {
394 const char *e;
395
396 default_timeout_set = true;
397 default_timeout = NETLINK_DEFAULT_TIMEOUT_USEC;
398
399 e = secure_getenv("SYSTEMD_NETLINK_DEFAULT_TIMEOUT");
400 if (e) {
401 r = parse_sec(e, &default_timeout);
402 if (r < 0)
403 log_debug_errno(r, "sd-netlink: Failed to parse $SYSTEMD_NETLINK_DEFAULT_TIMEOUT environment variable, ignoring: %m");
404 }
405 }
406
407 usec = default_timeout;
408 }
409
410 return usec_add(now(CLOCK_MONOTONIC), usec);
411}
412
413static int netlink_poll(sd_netlink *nl, bool need_more, usec_t timeout_usec) {
414 usec_t m = USEC_INFINITY;
415 int r, e;
416
417 assert(nl);
418
419 e = sd_netlink_get_events(nl);
420 if (e < 0)
421 return e;
422
423 if (need_more)
424 /* Caller wants more data, and doesn't care about
425 * what's been read or any other timeouts. */
426 e |= POLLIN;
427 else {
428 usec_t until;
429
430 /* Caller wants to process if there is something to
431 * process, but doesn't care otherwise */
432
433 r = sd_netlink_get_timeout(nl, &until);
434 if (r < 0)
435 return r;
436
437 m = usec_sub_unsigned(until, now(CLOCK_MONOTONIC));
438 }
439
440 r = fd_wait_for_event(nl->fd, e, MIN(m, timeout_usec));
441 if (r <= 0)
442 return r;
443
444 return 1;
445}
446
447int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
448 int r;
449
450 assert_return(nl, -EINVAL);
451 assert_return(!netlink_pid_changed(nl), -ECHILD);
452
453 if (!ordered_set_isempty(nl->rqueue))
454 return 0;
455
456 r = netlink_poll(nl, false, timeout_usec);
457 if (ERRNO_IS_NEG_TRANSIENT(r)) /* Convert EINTR to "something happened" and give user a chance to run some code before calling back into us */
458 return 1;
459 return r;
460}
461
462static int timeout_compare(const void *a, const void *b) {
463 const struct reply_callback *x = a, *y = b;
464
465 return CMP(x->timeout, y->timeout);
466}
467
468size_t netlink_get_reply_callback_count(sd_netlink *nl) {
469 assert(nl);
470
471 return hashmap_size(nl->reply_callbacks);
472}
473
474int sd_netlink_call_async(
475 sd_netlink *nl,
476 sd_netlink_slot **ret_slot,
477 sd_netlink_message *m,
478 sd_netlink_message_handler_t callback,
479 sd_netlink_destroy_t destroy_callback,
480 void *userdata,
481 uint64_t usec,
482 const char *description) {
483
484 _cleanup_free_ sd_netlink_slot *slot = NULL;
485 int r, k;
486
487 assert_return(nl, -EINVAL);
488 assert_return(m, -EINVAL);
489 assert_return(callback, -EINVAL);
490 assert_return(!netlink_pid_changed(nl), -ECHILD);
491
492 if (hashmap_size(nl->reply_callbacks) >= REPLY_CALLBACKS_MAX)
493 return -EXFULL;
494
495 r = hashmap_ensure_allocated(&nl->reply_callbacks, &trivial_hash_ops);
496 if (r < 0)
497 return r;
498
499 if (usec != UINT64_MAX) {
500 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
501 if (r < 0)
502 return r;
503 }
504
505 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
506 if (r < 0)
507 return r;
508
509 slot->reply_callback.callback = callback;
510 slot->reply_callback.timeout = timespan_to_timestamp(usec);
511
512 k = sd_netlink_send(nl, m, &slot->reply_callback.serial);
513 if (k < 0)
514 return k;
515
516 r = hashmap_put(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial), &slot->reply_callback);
517 if (r < 0)
518 return r;
519
520 if (slot->reply_callback.timeout != USEC_INFINITY) {
521 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
522 if (r < 0) {
523 (void) hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial));
524 return r;
525 }
526 }
527
528 /* Set this at last. Otherwise, some failures in above would call destroy_callback but some would not. */
529 slot->destroy_callback = destroy_callback;
530
531 if (ret_slot)
532 *ret_slot = slot;
533
534 TAKE_PTR(slot);
535
536 return k;
537}
538
539int sd_netlink_read(
540 sd_netlink *nl,
541 uint32_t serial,
542 uint64_t usec,
543 sd_netlink_message **ret) {
544
545 usec_t timeout;
546 int r;
547
548 assert_return(nl, -EINVAL);
549 assert_return(!netlink_pid_changed(nl), -ECHILD);
550
551 timeout = timespan_to_timestamp(usec);
552
553 for (;;) {
554 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
555 usec_t left;
556
557 m = hashmap_remove(nl->rqueue_by_serial, UINT32_TO_PTR(serial));
558 if (m) {
559 uint16_t type;
560
561 /* found a match, remove from rqueue and return it */
562 sd_netlink_message_unref(ordered_set_remove(nl->rqueue, m));
563
564 r = sd_netlink_message_get_errno(m);
565 if (r < 0)
566 return r;
567
568 r = sd_netlink_message_get_type(m, &type);
569 if (r < 0)
570 return r;
571
572 if (type == NLMSG_DONE) {
573 if (ret)
574 *ret = NULL;
575 return 0;
576 }
577
578 if (ret)
579 *ret = TAKE_PTR(m);
580 return 1;
581 }
582
583 r = socket_read_message(nl);
584 if (r < 0)
585 return r;
586 if (r > 0)
587 /* received message, so try to process straight away */
588 continue;
589
590 if (timeout != USEC_INFINITY) {
591 usec_t n;
592
593 n = now(CLOCK_MONOTONIC);
594 if (n >= timeout)
595 return -ETIMEDOUT;
596
597 left = usec_sub_unsigned(timeout, n);
598 } else
599 left = USEC_INFINITY;
600
601 r = netlink_poll(nl, true, left);
602 if (r < 0)
603 return r;
604 if (r == 0)
605 return -ETIMEDOUT;
606 }
607}
608
609int sd_netlink_call(
610 sd_netlink *nl,
611 sd_netlink_message *message,
612 uint64_t usec,
613 sd_netlink_message **ret) {
614
615 uint32_t serial;
616 int r;
617
618 assert_return(nl, -EINVAL);
619 assert_return(!netlink_pid_changed(nl), -ECHILD);
620 assert_return(message, -EINVAL);
621
622 r = sd_netlink_send(nl, message, &serial);
623 if (r < 0)
624 return r;
625
626 return sd_netlink_read(nl, serial, usec, ret);
627}
628
629int sd_netlink_get_events(sd_netlink *nl) {
630 assert_return(nl, -EINVAL);
631 assert_return(!netlink_pid_changed(nl), -ECHILD);
632
633 return ordered_set_isempty(nl->rqueue) ? POLLIN : 0;
634}
635
636int sd_netlink_get_timeout(sd_netlink *nl, uint64_t *ret) {
637 struct reply_callback *c;
638
639 assert_return(nl, -EINVAL);
640 assert_return(ret, -EINVAL);
641 assert_return(!netlink_pid_changed(nl), -ECHILD);
642
643 if (!ordered_set_isempty(nl->rqueue)) {
644 *ret = 0;
645 return 1;
646 }
647
648 c = prioq_peek(nl->reply_callbacks_prioq);
649 if (!c) {
650 *ret = UINT64_MAX;
651 return 0;
652 }
653
654 *ret = c->timeout;
655 return 1;
656}
657
658static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
659 sd_netlink *nl = ASSERT_PTR(userdata);
660 int r;
661
662 r = sd_netlink_process(nl, NULL);
663 if (r < 0)
664 return r;
665
666 return 1;
667}
668
669static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
670 sd_netlink *nl = ASSERT_PTR(userdata);
671 int r;
672
673 r = sd_netlink_process(nl, NULL);
674 if (r < 0)
675 return r;
676
677 return 1;
678}
679
680static int prepare_callback(sd_event_source *s, void *userdata) {
681 sd_netlink *nl = ASSERT_PTR(userdata);
682 int r, enabled;
683 usec_t until;
684
685 assert(s);
686
687 r = sd_netlink_get_events(nl);
688 if (r < 0)
689 return r;
690
691 r = sd_event_source_set_io_events(nl->io_event_source, r);
692 if (r < 0)
693 return r;
694
695 enabled = sd_netlink_get_timeout(nl, &until);
696 if (enabled < 0)
697 return enabled;
698 if (enabled > 0) {
699 r = sd_event_source_set_time(nl->time_event_source, until);
700 if (r < 0)
701 return r;
702 }
703
704 r = sd_event_source_set_enabled(nl->time_event_source,
705 enabled > 0 ? SD_EVENT_ONESHOT : SD_EVENT_OFF);
706 if (r < 0)
707 return r;
708
709 return 1;
710}
711
712int sd_netlink_attach_event(sd_netlink *nl, sd_event *event, int64_t priority) {
713 int r;
714
715 assert_return(nl, -EINVAL);
716 assert_return(!nl->event, -EBUSY);
717
718 assert(!nl->io_event_source);
719 assert(!nl->time_event_source);
720
721 if (event)
722 nl->event = sd_event_ref(event);
723 else {
724 r = sd_event_default(&nl->event);
725 if (r < 0)
726 return r;
727 }
728
729 r = sd_event_add_io(nl->event, &nl->io_event_source, nl->fd, 0, io_callback, nl);
730 if (r < 0)
731 goto fail;
732
733 r = sd_event_source_set_priority(nl->io_event_source, priority);
734 if (r < 0)
735 goto fail;
736
737 r = sd_event_source_set_description(nl->io_event_source, "netlink-receive-message");
738 if (r < 0)
739 goto fail;
740
741 r = sd_event_source_set_prepare(nl->io_event_source, prepare_callback);
742 if (r < 0)
743 goto fail;
744
745 r = sd_event_add_time(nl->event, &nl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, nl);
746 if (r < 0)
747 goto fail;
748
749 r = sd_event_source_set_priority(nl->time_event_source, priority);
750 if (r < 0)
751 goto fail;
752
753 r = sd_event_source_set_description(nl->time_event_source, "netlink-timer");
754 if (r < 0)
755 goto fail;
756
757 return 0;
758
759fail:
760 sd_netlink_detach_event(nl);
761 return r;
762}
763
764int sd_netlink_detach_event(sd_netlink *nl) {
765 assert_return(nl, -EINVAL);
766 assert_return(nl->event, -ENXIO);
767
768 nl->io_event_source = sd_event_source_unref(nl->io_event_source);
769
770 nl->time_event_source = sd_event_source_unref(nl->time_event_source);
771
772 nl->event = sd_event_unref(nl->event);
773
774 return 0;
775}
776
777sd_event* sd_netlink_get_event(sd_netlink *nl) {
778 assert_return(nl, NULL);
779
780 return nl->event;
781}
782
783int netlink_add_match_internal(
784 sd_netlink *nl,
785 sd_netlink_slot **ret_slot,
786 const uint32_t *groups,
787 size_t n_groups,
788 uint16_t type,
789 uint8_t cmd,
790 sd_netlink_message_handler_t callback,
791 sd_netlink_destroy_t destroy_callback,
792 void *userdata,
793 const char *description) {
794
795 _cleanup_free_ sd_netlink_slot *slot = NULL;
796 int r;
797
798 assert(groups);
799 assert(n_groups > 0);
800
801 for (size_t i = 0; i < n_groups; i++) {
802 r = socket_broadcast_group_ref(nl, groups[i]);
803 if (r < 0)
804 return r;
805 }
806
807 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback),
808 userdata, description, &slot);
809 if (r < 0)
810 return r;
811
812 slot->match_callback.groups = newdup(uint32_t, groups, n_groups);
813 if (!slot->match_callback.groups)
814 return -ENOMEM;
815
816 slot->match_callback.n_groups = n_groups;
817 slot->match_callback.callback = callback;
818 slot->match_callback.type = type;
819 slot->match_callback.cmd = cmd;
820
821 LIST_PREPEND(match_callbacks, nl->match_callbacks, &slot->match_callback);
822
823 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
824 slot->destroy_callback = destroy_callback;
825
826 if (ret_slot)
827 *ret_slot = slot;
828
829 TAKE_PTR(slot);
830 return 0;
831}
832
833int sd_netlink_add_match(
834 sd_netlink *rtnl,
835 sd_netlink_slot **ret_slot,
836 uint16_t type,
837 sd_netlink_message_handler_t callback,
838 sd_netlink_destroy_t destroy_callback,
839 void *userdata,
840 const char *description) {
841
842 static const uint32_t
843 address_groups[] = { RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV6_IFADDR, },
844 link_groups[] = { RTNLGRP_LINK, },
845 neighbor_groups[] = { RTNLGRP_NEIGH, },
846 nexthop_groups[] = { RTNLGRP_NEXTHOP, },
847 route_groups[] = { RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV6_ROUTE, },
848 rule_groups[] = { RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_RULE, },
849 tc_groups[] = { RTNLGRP_TC };
850 const uint32_t *groups;
851 size_t n_groups;
852
853 assert_return(rtnl, -EINVAL);
854 assert_return(callback, -EINVAL);
855 assert_return(!netlink_pid_changed(rtnl), -ECHILD);
856
857 switch (type) {
858 case RTM_NEWLINK:
859 case RTM_DELLINK:
860 groups = link_groups;
861 n_groups = ELEMENTSOF(link_groups);
862 break;
863 case RTM_NEWADDR:
864 case RTM_DELADDR:
865 groups = address_groups;
866 n_groups = ELEMENTSOF(address_groups);
867 break;
868 case RTM_NEWNEIGH:
869 case RTM_DELNEIGH:
870 groups = neighbor_groups;
871 n_groups = ELEMENTSOF(neighbor_groups);
872 break;
873 case RTM_NEWROUTE:
874 case RTM_DELROUTE:
875 groups = route_groups;
876 n_groups = ELEMENTSOF(route_groups);
877 break;
878 case RTM_NEWRULE:
879 case RTM_DELRULE:
880 groups = rule_groups;
881 n_groups = ELEMENTSOF(rule_groups);
882 break;
883 case RTM_NEWNEXTHOP:
884 case RTM_DELNEXTHOP:
885 groups = nexthop_groups;
886 n_groups = ELEMENTSOF(nexthop_groups);
887 break;
888 case RTM_NEWQDISC:
889 case RTM_DELQDISC:
890 case RTM_NEWTCLASS:
891 case RTM_DELTCLASS:
892 groups = tc_groups;
893 n_groups = ELEMENTSOF(tc_groups);
894 break;
895 default:
896 return -EOPNOTSUPP;
897 }
898
899 return netlink_add_match_internal(rtnl, ret_slot, groups, n_groups, type, 0, callback,
900 destroy_callback, userdata, description);
901}
902
903int sd_netlink_attach_filter(sd_netlink *nl, size_t len, const struct sock_filter *filter) {
904 assert_return(nl, -EINVAL);
905 assert_return(len == 0 || filter, -EINVAL);
906
907 if (setsockopt(nl->fd, SOL_SOCKET,
908 len == 0 ? SO_DETACH_FILTER : SO_ATTACH_FILTER,
909 &(struct sock_fprog) {
910 .len = len,
911 .filter = (struct sock_filter*) filter,
912 }, sizeof(struct sock_fprog)) < 0)
913 return -errno;
914
915 return 0;
916}