]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-netlink/sd-netlink.c
tree-wide: use cocinnelle to apply _NEG_ macros
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <poll.h>
4
5 #include "sd-netlink.h"
6
7 #include "alloc-util.h"
8 #include "fd-util.h"
9 #include "hashmap.h"
10 #include "io-util.h"
11 #include "macro.h"
12 #include "netlink-genl.h"
13 #include "netlink-internal.h"
14 #include "netlink-slot.h"
15 #include "netlink-util.h"
16 #include "process-util.h"
17 #include "socket-util.h"
18 #include "string-util.h"
19
20 /* Some really high limit, to catch programming errors */
21 #define REPLY_CALLBACKS_MAX UINT16_MAX
22
23 static int netlink_new(sd_netlink **ret) {
24 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
25
26 assert_return(ret, -EINVAL);
27
28 nl = new(sd_netlink, 1);
29 if (!nl)
30 return -ENOMEM;
31
32 *nl = (sd_netlink) {
33 .n_ref = 1,
34 .fd = -EBADF,
35 .sockaddr.nl.nl_family = AF_NETLINK,
36 .original_pid = getpid_cached(),
37 .protocol = -1,
38
39 /* Kernel change notification messages have sequence number 0. We want to avoid that with our
40 * own serials, in order not to get confused when matching up kernel replies to our earlier
41 * requests.
42 *
43 * Moreover, when using netlink socket activation (i.e. where PID 1 binds an AF_NETLINK
44 * socket for us and passes it to us across execve()) and we get restarted multiple times
45 * while the socket sticks around we might get confused by replies from earlier runs coming
46 * in late — which is pretty likely if we'd start our sequence numbers always from 1. Hence,
47 * let's start with a value based on the system clock. This should make collisions much less
48 * likely (though still theoretically possible). We use a 32 bit μs counter starting at boot
49 * for this (and explicitly exclude the zero, see above). This counter will wrap around after
50 * a bit more than 1h, but that's hopefully OK as the kernel shouldn't take that long to
51 * reply to our requests.
52 *
53 * We only pick the initial start value this way. For each message we simply increase the
54 * sequence number by 1. This means we could enqueue 1 netlink message per μs without risking
55 * collisions, which should be OK.
56 *
57 * Note this means the serials will be in the range 1…UINT32_MAX here.
58 *
59 * (In an ideal world we'd attach the current serial counter to the netlink socket itself
60 * somehow, to avoid all this, but I couldn't come up with a nice way to do this) */
61 .serial = (uint32_t) (now(CLOCK_MONOTONIC) % UINT32_MAX) + 1,
62 };
63
64 *ret = TAKE_PTR(nl);
65 return 0;
66 }
67
68 int sd_netlink_open_fd(sd_netlink **ret, int fd) {
69 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
70 int r, protocol;
71
72 assert_return(ret, -EINVAL);
73 assert_return(fd >= 0, -EBADF);
74
75 r = netlink_new(&nl);
76 if (r < 0)
77 return r;
78
79 r = getsockopt_int(fd, SOL_SOCKET, SO_PROTOCOL, &protocol);
80 if (r < 0)
81 return r;
82
83 nl->fd = fd;
84 nl->protocol = protocol;
85
86 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_EXT_ACK, true);
87 if (r < 0)
88 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_EXT_ACK option, ignoring: %m");
89
90 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, true);
91 if (r < 0)
92 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_GET_STRICT_CHK option, ignoring: %m");
93
94 r = socket_bind(nl);
95 if (r < 0) {
96 nl->fd = -EBADF; /* on failure, the caller remains owner of the fd, hence don't close it here */
97 nl->protocol = -1;
98 return r;
99 }
100
101 *ret = TAKE_PTR(nl);
102
103 return 0;
104 }
105
106 int sd_netlink_open(sd_netlink **ret) {
107 return netlink_open_family(ret, NETLINK_ROUTE);
108 }
109
110 int sd_netlink_increase_rxbuf(sd_netlink *nl, size_t size) {
111 assert_return(nl, -EINVAL);
112 assert_return(!netlink_pid_changed(nl), -ECHILD);
113
114 return fd_increase_rxbuf(nl->fd, size);
115 }
116
117 static sd_netlink *netlink_free(sd_netlink *nl) {
118 sd_netlink_slot *s;
119
120 assert(nl);
121
122 ordered_set_free(nl->rqueue);
123 hashmap_free(nl->rqueue_by_serial);
124 hashmap_free(nl->rqueue_partial_by_serial);
125 free(nl->rbuffer);
126
127 while ((s = nl->slots)) {
128 assert(s->floating);
129 netlink_slot_disconnect(s, true);
130 }
131 hashmap_free(nl->reply_callbacks);
132 prioq_free(nl->reply_callbacks_prioq);
133
134 sd_event_source_unref(nl->io_event_source);
135 sd_event_source_unref(nl->time_event_source);
136 sd_event_unref(nl->event);
137
138 hashmap_free(nl->broadcast_group_refs);
139
140 genl_clear_family(nl);
141
142 safe_close(nl->fd);
143 return mfree(nl);
144 }
145
146 DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
147
148 int sd_netlink_send(
149 sd_netlink *nl,
150 sd_netlink_message *message,
151 uint32_t *serial) {
152
153 int r;
154
155 assert_return(nl, -EINVAL);
156 assert_return(!netlink_pid_changed(nl), -ECHILD);
157 assert_return(message, -EINVAL);
158 assert_return(!message->sealed, -EPERM);
159
160 netlink_seal_message(nl, message);
161
162 r = socket_write_message(nl, message);
163 if (r < 0)
164 return r;
165
166 if (serial)
167 *serial = message_get_serial(message);
168
169 return 1;
170 }
171
172 static int dispatch_rqueue(sd_netlink *nl, sd_netlink_message **ret) {
173 sd_netlink_message *m;
174 int r;
175
176 assert(nl);
177 assert(ret);
178
179 if (ordered_set_size(nl->rqueue) <= 0) {
180 /* Try to read a new message */
181 r = socket_read_message(nl);
182 if (r == -ENOBUFS) /* FIXME: ignore buffer overruns for now */
183 log_debug_errno(r, "sd-netlink: Got ENOBUFS from netlink socket, ignoring.");
184 else if (r < 0)
185 return r;
186 }
187
188 /* Dispatch a queued message */
189 m = ordered_set_steal_first(nl->rqueue);
190 if (m)
191 sd_netlink_message_unref(hashmap_remove_value(nl->rqueue_by_serial, UINT32_TO_PTR(message_get_serial(m)), m));
192 *ret = m;
193 return !!m;
194 }
195
196 static int process_timeout(sd_netlink *nl) {
197 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
198 struct reply_callback *c;
199 sd_netlink_slot *slot;
200 usec_t n;
201 int r;
202
203 assert(nl);
204
205 c = prioq_peek(nl->reply_callbacks_prioq);
206 if (!c)
207 return 0;
208
209 n = now(CLOCK_MONOTONIC);
210 if (c->timeout > n)
211 return 0;
212
213 r = message_new_synthetic_error(nl, -ETIMEDOUT, c->serial, &m);
214 if (r < 0)
215 return r;
216
217 assert_se(prioq_pop(nl->reply_callbacks_prioq) == c);
218 c->timeout = 0;
219 hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(c->serial));
220
221 slot = container_of(c, sd_netlink_slot, reply_callback);
222
223 r = c->callback(nl, m, slot->userdata);
224 if (r < 0)
225 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
226 slot->description ? "'" : "",
227 strempty(slot->description),
228 slot->description ? "' " : "");
229
230 if (slot->floating)
231 netlink_slot_disconnect(slot, true);
232
233 return 1;
234 }
235
236 static int process_reply(sd_netlink *nl, sd_netlink_message *m) {
237 struct reply_callback *c;
238 sd_netlink_slot *slot;
239 uint32_t serial;
240 uint16_t type;
241 int r;
242
243 assert(nl);
244 assert(m);
245
246 serial = message_get_serial(m);
247 c = hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(serial));
248 if (!c)
249 return 0;
250
251 if (c->timeout != 0) {
252 prioq_remove(nl->reply_callbacks_prioq, c, &c->prioq_idx);
253 c->timeout = 0;
254 }
255
256 r = sd_netlink_message_get_type(m, &type);
257 if (r < 0)
258 return r;
259
260 if (type == NLMSG_DONE)
261 m = NULL;
262
263 slot = container_of(c, sd_netlink_slot, reply_callback);
264
265 r = c->callback(nl, m, slot->userdata);
266 if (r < 0)
267 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
268 slot->description ? "'" : "",
269 strempty(slot->description),
270 slot->description ? "' " : "");
271
272 if (slot->floating)
273 netlink_slot_disconnect(slot, true);
274
275 return 1;
276 }
277
278 static int process_match(sd_netlink *nl, sd_netlink_message *m) {
279 uint16_t type;
280 uint8_t cmd;
281 int r;
282
283 assert(nl);
284 assert(m);
285
286 r = sd_netlink_message_get_type(m, &type);
287 if (r < 0)
288 return r;
289
290 if (m->protocol == NETLINK_GENERIC) {
291 r = sd_genl_message_get_command(nl, m, &cmd);
292 if (r < 0)
293 return r;
294 } else
295 cmd = 0;
296
297 LIST_FOREACH(match_callbacks, c, nl->match_callbacks) {
298 sd_netlink_slot *slot;
299 bool found = false;
300
301 if (c->type != type)
302 continue;
303 if (c->cmd != 0 && c->cmd != cmd)
304 continue;
305
306 for (size_t i = 0; i < c->n_groups; i++)
307 if (c->groups[i] == m->multicast_group) {
308 found = true;
309 break;
310 }
311
312 if (!found)
313 continue;
314
315 slot = container_of(c, sd_netlink_slot, match_callback);
316
317 r = c->callback(nl, m, slot->userdata);
318 if (r < 0)
319 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
320 slot->description ? "'" : "",
321 strempty(slot->description),
322 slot->description ? "' " : "");
323 if (r != 0)
324 break;
325 }
326
327 return 1;
328 }
329
330 static int process_running(sd_netlink *nl, sd_netlink_message **ret) {
331 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
332 int r;
333
334 assert(nl);
335
336 r = process_timeout(nl);
337 if (r != 0)
338 goto null_message;
339
340 r = dispatch_rqueue(nl, &m);
341 if (r < 0)
342 return r;
343 if (!m)
344 goto null_message;
345
346 if (sd_netlink_message_is_broadcast(m))
347 r = process_match(nl, m);
348 else
349 r = process_reply(nl, m);
350 if (r != 0)
351 goto null_message;
352
353 if (ret) {
354 *ret = TAKE_PTR(m);
355
356 return 1;
357 }
358
359 return 1;
360
361 null_message:
362 if (r >= 0 && ret)
363 *ret = NULL;
364
365 return r;
366 }
367
368 int sd_netlink_process(sd_netlink *nl, sd_netlink_message **ret) {
369 NETLINK_DONT_DESTROY(nl);
370 int r;
371
372 assert_return(nl, -EINVAL);
373 assert_return(!netlink_pid_changed(nl), -ECHILD);
374 assert_return(!nl->processing, -EBUSY);
375
376 nl->processing = true;
377 r = process_running(nl, ret);
378 nl->processing = false;
379
380 return r;
381 }
382
383 static usec_t calc_elapse(uint64_t usec) {
384 if (usec == UINT64_MAX)
385 return 0;
386
387 if (usec == 0)
388 usec = NETLINK_DEFAULT_TIMEOUT_USEC;
389
390 return usec_add(now(CLOCK_MONOTONIC), usec);
391 }
392
393 static int netlink_poll(sd_netlink *nl, bool need_more, usec_t timeout_usec) {
394 usec_t m = USEC_INFINITY;
395 int r, e;
396
397 assert(nl);
398
399 e = sd_netlink_get_events(nl);
400 if (e < 0)
401 return e;
402
403 if (need_more)
404 /* Caller wants more data, and doesn't care about
405 * what's been read or any other timeouts. */
406 e |= POLLIN;
407 else {
408 usec_t until;
409
410 /* Caller wants to process if there is something to
411 * process, but doesn't care otherwise */
412
413 r = sd_netlink_get_timeout(nl, &until);
414 if (r < 0)
415 return r;
416
417 m = usec_sub_unsigned(until, now(CLOCK_MONOTONIC));
418 }
419
420 r = fd_wait_for_event(nl->fd, e, MIN(m, timeout_usec));
421 if (r <= 0)
422 return r;
423
424 return 1;
425 }
426
427 int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
428 int r;
429
430 assert_return(nl, -EINVAL);
431 assert_return(!netlink_pid_changed(nl), -ECHILD);
432
433 if (ordered_set_size(nl->rqueue) > 0)
434 return 0;
435
436 r = netlink_poll(nl, false, timeout_usec);
437 if (ERRNO_IS_NEG_TRANSIENT(r)) /* Convert EINTR to "something happened" and give user a chance to run some code before calling back into us */
438 return 1;
439 return r;
440 }
441
442 static int timeout_compare(const void *a, const void *b) {
443 const struct reply_callback *x = a, *y = b;
444
445 if (x->timeout != 0 && y->timeout == 0)
446 return -1;
447
448 if (x->timeout == 0 && y->timeout != 0)
449 return 1;
450
451 return CMP(x->timeout, y->timeout);
452 }
453
454 int sd_netlink_call_async(
455 sd_netlink *nl,
456 sd_netlink_slot **ret_slot,
457 sd_netlink_message *m,
458 sd_netlink_message_handler_t callback,
459 sd_netlink_destroy_t destroy_callback,
460 void *userdata,
461 uint64_t usec,
462 const char *description) {
463
464 _cleanup_free_ sd_netlink_slot *slot = NULL;
465 int r, k;
466
467 assert_return(nl, -EINVAL);
468 assert_return(m, -EINVAL);
469 assert_return(callback, -EINVAL);
470 assert_return(!netlink_pid_changed(nl), -ECHILD);
471
472 if (hashmap_size(nl->reply_callbacks) >= REPLY_CALLBACKS_MAX)
473 return -ERANGE;
474
475 r = hashmap_ensure_allocated(&nl->reply_callbacks, &trivial_hash_ops);
476 if (r < 0)
477 return r;
478
479 if (usec != UINT64_MAX) {
480 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
481 if (r < 0)
482 return r;
483 }
484
485 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
486 if (r < 0)
487 return r;
488
489 slot->reply_callback.callback = callback;
490 slot->reply_callback.timeout = calc_elapse(usec);
491
492 k = sd_netlink_send(nl, m, &slot->reply_callback.serial);
493 if (k < 0)
494 return k;
495
496 r = hashmap_put(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial), &slot->reply_callback);
497 if (r < 0)
498 return r;
499
500 if (slot->reply_callback.timeout != 0) {
501 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
502 if (r < 0) {
503 (void) hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial));
504 return r;
505 }
506 }
507
508 /* Set this at last. Otherwise, some failures in above would call destroy_callback but some would not. */
509 slot->destroy_callback = destroy_callback;
510
511 if (ret_slot)
512 *ret_slot = slot;
513
514 TAKE_PTR(slot);
515
516 return k;
517 }
518
519 int sd_netlink_read(
520 sd_netlink *nl,
521 uint32_t serial,
522 uint64_t usec,
523 sd_netlink_message **ret) {
524
525 usec_t timeout;
526 int r;
527
528 assert_return(nl, -EINVAL);
529 assert_return(!netlink_pid_changed(nl), -ECHILD);
530
531 timeout = calc_elapse(usec);
532
533 for (;;) {
534 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
535 usec_t left;
536
537 m = hashmap_remove(nl->rqueue_by_serial, UINT32_TO_PTR(serial));
538 if (m) {
539 uint16_t type;
540
541 /* found a match, remove from rqueue and return it */
542 sd_netlink_message_unref(ordered_set_remove(nl->rqueue, m));
543
544 r = sd_netlink_message_get_errno(m);
545 if (r < 0)
546 return r;
547
548 r = sd_netlink_message_get_type(m, &type);
549 if (r < 0)
550 return r;
551
552 if (type == NLMSG_DONE) {
553 if (ret)
554 *ret = NULL;
555 return 0;
556 }
557
558 if (ret)
559 *ret = TAKE_PTR(m);
560 return 1;
561 }
562
563 r = socket_read_message(nl);
564 if (r < 0)
565 return r;
566 if (r > 0)
567 /* received message, so try to process straight away */
568 continue;
569
570 if (timeout > 0) {
571 usec_t n;
572
573 n = now(CLOCK_MONOTONIC);
574 if (n >= timeout)
575 return -ETIMEDOUT;
576
577 left = usec_sub_unsigned(timeout, n);
578 } else
579 left = USEC_INFINITY;
580
581 r = netlink_poll(nl, true, left);
582 if (r < 0)
583 return r;
584 if (r == 0)
585 return -ETIMEDOUT;
586 }
587 }
588
589 int sd_netlink_call(
590 sd_netlink *nl,
591 sd_netlink_message *message,
592 uint64_t usec,
593 sd_netlink_message **ret) {
594
595 uint32_t serial;
596 int r;
597
598 assert_return(nl, -EINVAL);
599 assert_return(!netlink_pid_changed(nl), -ECHILD);
600 assert_return(message, -EINVAL);
601
602 r = sd_netlink_send(nl, message, &serial);
603 if (r < 0)
604 return r;
605
606 return sd_netlink_read(nl, serial, usec, ret);
607 }
608
609 int sd_netlink_get_events(sd_netlink *nl) {
610 assert_return(nl, -EINVAL);
611 assert_return(!netlink_pid_changed(nl), -ECHILD);
612
613 return ordered_set_size(nl->rqueue) == 0 ? POLLIN : 0;
614 }
615
616 int sd_netlink_get_timeout(sd_netlink *nl, uint64_t *timeout_usec) {
617 struct reply_callback *c;
618
619 assert_return(nl, -EINVAL);
620 assert_return(timeout_usec, -EINVAL);
621 assert_return(!netlink_pid_changed(nl), -ECHILD);
622
623 if (ordered_set_size(nl->rqueue) > 0) {
624 *timeout_usec = 0;
625 return 1;
626 }
627
628 c = prioq_peek(nl->reply_callbacks_prioq);
629 if (!c) {
630 *timeout_usec = UINT64_MAX;
631 return 0;
632 }
633
634 *timeout_usec = c->timeout;
635 return 1;
636 }
637
638 static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
639 sd_netlink *nl = ASSERT_PTR(userdata);
640 int r;
641
642 r = sd_netlink_process(nl, NULL);
643 if (r < 0)
644 return r;
645
646 return 1;
647 }
648
649 static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
650 sd_netlink *nl = ASSERT_PTR(userdata);
651 int r;
652
653 r = sd_netlink_process(nl, NULL);
654 if (r < 0)
655 return r;
656
657 return 1;
658 }
659
660 static int prepare_callback(sd_event_source *s, void *userdata) {
661 sd_netlink *nl = ASSERT_PTR(userdata);
662 int r, enabled;
663 usec_t until;
664
665 assert(s);
666
667 r = sd_netlink_get_events(nl);
668 if (r < 0)
669 return r;
670
671 r = sd_event_source_set_io_events(nl->io_event_source, r);
672 if (r < 0)
673 return r;
674
675 enabled = sd_netlink_get_timeout(nl, &until);
676 if (enabled < 0)
677 return enabled;
678 if (enabled > 0) {
679 r = sd_event_source_set_time(nl->time_event_source, until);
680 if (r < 0)
681 return r;
682 }
683
684 r = sd_event_source_set_enabled(nl->time_event_source,
685 enabled > 0 ? SD_EVENT_ONESHOT : SD_EVENT_OFF);
686 if (r < 0)
687 return r;
688
689 return 1;
690 }
691
692 int sd_netlink_attach_event(sd_netlink *nl, sd_event *event, int64_t priority) {
693 int r;
694
695 assert_return(nl, -EINVAL);
696 assert_return(!nl->event, -EBUSY);
697
698 assert(!nl->io_event_source);
699 assert(!nl->time_event_source);
700
701 if (event)
702 nl->event = sd_event_ref(event);
703 else {
704 r = sd_event_default(&nl->event);
705 if (r < 0)
706 return r;
707 }
708
709 r = sd_event_add_io(nl->event, &nl->io_event_source, nl->fd, 0, io_callback, nl);
710 if (r < 0)
711 goto fail;
712
713 r = sd_event_source_set_priority(nl->io_event_source, priority);
714 if (r < 0)
715 goto fail;
716
717 r = sd_event_source_set_description(nl->io_event_source, "netlink-receive-message");
718 if (r < 0)
719 goto fail;
720
721 r = sd_event_source_set_prepare(nl->io_event_source, prepare_callback);
722 if (r < 0)
723 goto fail;
724
725 r = sd_event_add_time(nl->event, &nl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, nl);
726 if (r < 0)
727 goto fail;
728
729 r = sd_event_source_set_priority(nl->time_event_source, priority);
730 if (r < 0)
731 goto fail;
732
733 r = sd_event_source_set_description(nl->time_event_source, "netlink-timer");
734 if (r < 0)
735 goto fail;
736
737 return 0;
738
739 fail:
740 sd_netlink_detach_event(nl);
741 return r;
742 }
743
744 int sd_netlink_detach_event(sd_netlink *nl) {
745 assert_return(nl, -EINVAL);
746 assert_return(nl->event, -ENXIO);
747
748 nl->io_event_source = sd_event_source_unref(nl->io_event_source);
749
750 nl->time_event_source = sd_event_source_unref(nl->time_event_source);
751
752 nl->event = sd_event_unref(nl->event);
753
754 return 0;
755 }
756
757 int netlink_add_match_internal(
758 sd_netlink *nl,
759 sd_netlink_slot **ret_slot,
760 const uint32_t *groups,
761 size_t n_groups,
762 uint16_t type,
763 uint8_t cmd,
764 sd_netlink_message_handler_t callback,
765 sd_netlink_destroy_t destroy_callback,
766 void *userdata,
767 const char *description) {
768
769 _cleanup_free_ sd_netlink_slot *slot = NULL;
770 int r;
771
772 assert(groups);
773 assert(n_groups > 0);
774
775 for (size_t i = 0; i < n_groups; i++) {
776 r = socket_broadcast_group_ref(nl, groups[i]);
777 if (r < 0)
778 return r;
779 }
780
781 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback),
782 userdata, description, &slot);
783 if (r < 0)
784 return r;
785
786 slot->match_callback.groups = newdup(uint32_t, groups, n_groups);
787 if (!slot->match_callback.groups)
788 return -ENOMEM;
789
790 slot->match_callback.n_groups = n_groups;
791 slot->match_callback.callback = callback;
792 slot->match_callback.type = type;
793 slot->match_callback.cmd = cmd;
794
795 LIST_PREPEND(match_callbacks, nl->match_callbacks, &slot->match_callback);
796
797 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
798 slot->destroy_callback = destroy_callback;
799
800 if (ret_slot)
801 *ret_slot = slot;
802
803 TAKE_PTR(slot);
804 return 0;
805 }
806
807 int sd_netlink_add_match(
808 sd_netlink *rtnl,
809 sd_netlink_slot **ret_slot,
810 uint16_t type,
811 sd_netlink_message_handler_t callback,
812 sd_netlink_destroy_t destroy_callback,
813 void *userdata,
814 const char *description) {
815
816 static const uint32_t
817 address_groups[] = { RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV6_IFADDR, },
818 link_groups[] = { RTNLGRP_LINK, },
819 neighbor_groups[] = { RTNLGRP_NEIGH, },
820 nexthop_groups[] = { RTNLGRP_NEXTHOP, },
821 route_groups[] = { RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV6_ROUTE, },
822 rule_groups[] = { RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_RULE, },
823 tc_groups[] = { RTNLGRP_TC };
824 const uint32_t *groups;
825 size_t n_groups;
826
827 assert_return(rtnl, -EINVAL);
828 assert_return(callback, -EINVAL);
829 assert_return(!netlink_pid_changed(rtnl), -ECHILD);
830
831 switch (type) {
832 case RTM_NEWLINK:
833 case RTM_DELLINK:
834 groups = link_groups;
835 n_groups = ELEMENTSOF(link_groups);
836 break;
837 case RTM_NEWADDR:
838 case RTM_DELADDR:
839 groups = address_groups;
840 n_groups = ELEMENTSOF(address_groups);
841 break;
842 case RTM_NEWNEIGH:
843 case RTM_DELNEIGH:
844 groups = neighbor_groups;
845 n_groups = ELEMENTSOF(neighbor_groups);
846 break;
847 case RTM_NEWROUTE:
848 case RTM_DELROUTE:
849 groups = route_groups;
850 n_groups = ELEMENTSOF(route_groups);
851 break;
852 case RTM_NEWRULE:
853 case RTM_DELRULE:
854 groups = rule_groups;
855 n_groups = ELEMENTSOF(rule_groups);
856 break;
857 case RTM_NEWNEXTHOP:
858 case RTM_DELNEXTHOP:
859 groups = nexthop_groups;
860 n_groups = ELEMENTSOF(nexthop_groups);
861 break;
862 case RTM_NEWQDISC:
863 case RTM_DELQDISC:
864 case RTM_NEWTCLASS:
865 case RTM_DELTCLASS:
866 groups = tc_groups;
867 n_groups = ELEMENTSOF(tc_groups);
868 break;
869 default:
870 return -EOPNOTSUPP;
871 }
872
873 return netlink_add_match_internal(rtnl, ret_slot, groups, n_groups, type, 0, callback,
874 destroy_callback, userdata, description);
875 }
876
877 int sd_netlink_attach_filter(sd_netlink *nl, size_t len, const struct sock_filter *filter) {
878 assert_return(nl, -EINVAL);
879 assert_return(len == 0 || filter, -EINVAL);
880
881 if (setsockopt(nl->fd, SOL_SOCKET,
882 len == 0 ? SO_DETACH_FILTER : SO_ATTACH_FILTER,
883 &(struct sock_fprog) {
884 .len = len,
885 .filter = (struct sock_filter*) filter,
886 }, sizeof(struct sock_fprog)) < 0)
887 return -errno;
888
889 return 0;
890 }