]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-netlink/sd-netlink.c
ec1035fdd10ab19595ab09e0bfd60420526ded2c
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <poll.h>
4
5 #include "sd-netlink.h"
6
7 #include "alloc-util.h"
8 #include "fd-util.h"
9 #include "hashmap.h"
10 #include "io-util.h"
11 #include "macro.h"
12 #include "netlink-genl.h"
13 #include "netlink-internal.h"
14 #include "netlink-slot.h"
15 #include "netlink-util.h"
16 #include "process-util.h"
17 #include "socket-util.h"
18 #include "string-util.h"
19
20 /* Some really high limit, to catch programming errors */
21 #define REPLY_CALLBACKS_MAX UINT16_MAX
22
23 static int netlink_new(sd_netlink **ret) {
24 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
25
26 assert_return(ret, -EINVAL);
27
28 nl = new(sd_netlink, 1);
29 if (!nl)
30 return -ENOMEM;
31
32 *nl = (sd_netlink) {
33 .n_ref = 1,
34 .fd = -1,
35 .sockaddr.nl.nl_family = AF_NETLINK,
36 .original_pid = getpid_cached(),
37 .protocol = -1,
38
39 /* Kernel change notification messages have sequence number 0. We want to avoid that with our
40 * own serials, in order not to get confused when matching up kernel replies to our earlier
41 * requests.
42 *
43 * Moreover, when using netlink socket activation (i.e. where PID 1 binds an AF_NETLINK
44 * socket for us and passes it to us across execve()) and we get restarted multiple times
45 * while the socket sticks around we might get confused by replies from earlier runs coming
46 * in late — which is pretty likely if we'd start our sequence numbers always from 1. Hence,
47 * let's start with a value based on the system clock. This should make collisions much less
48 * likely (though still theoretically possible). We use a 32 bit µs counter starting at boot
49 * for this (and explicitly exclude the zero, see above). This counter will wrap around after
50 * a bit more than 1h, but that's hopefully OK as the kernel shouldn't take that long to
51 * reply to our requests.
52 *
53 * We only pick the initial start value this way. For each message we simply increase the
54 * sequence number by 1. This means we could enqueue 1 netlink message per µs without risking
55 * collisions, which should be OK.
56 *
57 * Note this means the serials will be in the range 1…UINT32_MAX here.
58 *
59 * (In an ideal world we'd attach the current serial counter to the netlink socket itself
60 * somehow, to avoid all this, but I couldn't come up with a nice way to do this) */
61 .serial = (uint32_t) (now(CLOCK_MONOTONIC) % UINT32_MAX) + 1,
62 };
63
64 *ret = TAKE_PTR(nl);
65 return 0;
66 }
67
68 int sd_netlink_open_fd(sd_netlink **ret, int fd) {
69 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
70 int r, protocol;
71
72 assert_return(ret, -EINVAL);
73 assert_return(fd >= 0, -EBADF);
74
75 r = netlink_new(&nl);
76 if (r < 0)
77 return r;
78
79 r = getsockopt_int(fd, SOL_SOCKET, SO_PROTOCOL, &protocol);
80 if (r < 0)
81 return r;
82
83 nl->fd = fd;
84 nl->protocol = protocol;
85
86 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_EXT_ACK, true);
87 if (r < 0)
88 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_EXT_ACK option, ignoring: %m");
89
90 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, true);
91 if (r < 0)
92 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_GET_STRICT_CHK option, ignoring: %m");
93
94 r = socket_bind(nl);
95 if (r < 0) {
96 nl->fd = -1; /* on failure, the caller remains owner of the fd, hence don't close it here */
97 nl->protocol = -1;
98 return r;
99 }
100
101 *ret = TAKE_PTR(nl);
102
103 return 0;
104 }
105
106 int sd_netlink_open(sd_netlink **ret) {
107 return netlink_open_family(ret, NETLINK_ROUTE);
108 }
109
110 int sd_netlink_increase_rxbuf(sd_netlink *nl, size_t size) {
111 assert_return(nl, -EINVAL);
112 assert_return(!netlink_pid_changed(nl), -ECHILD);
113
114 return fd_increase_rxbuf(nl->fd, size);
115 }
116
117 static sd_netlink *netlink_free(sd_netlink *nl) {
118 sd_netlink_slot *s;
119 unsigned i;
120
121 assert(nl);
122
123 for (i = 0; i < nl->rqueue_size; i++)
124 sd_netlink_message_unref(nl->rqueue[i]);
125 free(nl->rqueue);
126
127 for (i = 0; i < nl->rqueue_partial_size; i++)
128 sd_netlink_message_unref(nl->rqueue_partial[i]);
129 free(nl->rqueue_partial);
130
131 free(nl->rbuffer);
132
133 while ((s = nl->slots)) {
134 assert(s->floating);
135 netlink_slot_disconnect(s, true);
136 }
137 hashmap_free(nl->reply_callbacks);
138 prioq_free(nl->reply_callbacks_prioq);
139
140 sd_event_source_unref(nl->io_event_source);
141 sd_event_source_unref(nl->time_event_source);
142 sd_event_unref(nl->event);
143
144 hashmap_free(nl->broadcast_group_refs);
145
146 genl_clear_family(nl);
147
148 safe_close(nl->fd);
149 return mfree(nl);
150 }
151
152 DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
153
154 int sd_netlink_send(
155 sd_netlink *nl,
156 sd_netlink_message *message,
157 uint32_t *serial) {
158
159 int r;
160
161 assert_return(nl, -EINVAL);
162 assert_return(!netlink_pid_changed(nl), -ECHILD);
163 assert_return(message, -EINVAL);
164 assert_return(!message->sealed, -EPERM);
165
166 netlink_seal_message(nl, message);
167
168 r = socket_write_message(nl, message);
169 if (r < 0)
170 return r;
171
172 if (serial)
173 *serial = message_get_serial(message);
174
175 return 1;
176 }
177
178 int netlink_rqueue_make_room(sd_netlink *nl) {
179 assert(nl);
180
181 if (nl->rqueue_size >= NETLINK_RQUEUE_MAX)
182 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
183 "sd-netlink: exhausted the read queue size (%d)",
184 NETLINK_RQUEUE_MAX);
185
186 if (!GREEDY_REALLOC(nl->rqueue, nl->rqueue_size + 1))
187 return -ENOMEM;
188
189 return 0;
190 }
191
192 int netlink_rqueue_partial_make_room(sd_netlink *nl) {
193 assert(nl);
194
195 if (nl->rqueue_partial_size >= NETLINK_RQUEUE_MAX)
196 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
197 "sd-netlink: exhausted the partial read queue size (%d)",
198 NETLINK_RQUEUE_MAX);
199
200 if (!GREEDY_REALLOC(nl->rqueue_partial, nl->rqueue_partial_size + 1))
201 return -ENOMEM;
202
203 return 0;
204 }
205
206 static int dispatch_rqueue(sd_netlink *nl, sd_netlink_message **message) {
207 int r;
208
209 assert(nl);
210 assert(message);
211
212 if (nl->rqueue_size <= 0) {
213 /* Try to read a new message */
214 r = socket_read_message(nl);
215 if (r == -ENOBUFS) { /* FIXME: ignore buffer overruns for now */
216 log_debug_errno(r, "sd-netlink: Got ENOBUFS from netlink socket, ignoring.");
217 return 1;
218 }
219 if (r <= 0)
220 return r;
221 }
222
223 /* Dispatch a queued message */
224 *message = nl->rqueue[0];
225 nl->rqueue_size--;
226 memmove(nl->rqueue, nl->rqueue + 1, sizeof(sd_netlink_message*) * nl->rqueue_size);
227
228 return 1;
229 }
230
231 static int process_timeout(sd_netlink *nl) {
232 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
233 struct reply_callback *c;
234 sd_netlink_slot *slot;
235 usec_t n;
236 int r;
237
238 assert(nl);
239
240 c = prioq_peek(nl->reply_callbacks_prioq);
241 if (!c)
242 return 0;
243
244 n = now(CLOCK_MONOTONIC);
245 if (c->timeout > n)
246 return 0;
247
248 r = message_new_synthetic_error(nl, -ETIMEDOUT, c->serial, &m);
249 if (r < 0)
250 return r;
251
252 assert_se(prioq_pop(nl->reply_callbacks_prioq) == c);
253 c->timeout = 0;
254 hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(c->serial));
255
256 slot = container_of(c, sd_netlink_slot, reply_callback);
257
258 r = c->callback(nl, m, slot->userdata);
259 if (r < 0)
260 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
261 slot->description ? "'" : "",
262 strempty(slot->description),
263 slot->description ? "' " : "");
264
265 if (slot->floating)
266 netlink_slot_disconnect(slot, true);
267
268 return 1;
269 }
270
271 static int process_reply(sd_netlink *nl, sd_netlink_message *m) {
272 struct reply_callback *c;
273 sd_netlink_slot *slot;
274 uint32_t serial;
275 uint16_t type;
276 int r;
277
278 assert(nl);
279 assert(m);
280
281 serial = message_get_serial(m);
282 c = hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(serial));
283 if (!c)
284 return 0;
285
286 if (c->timeout != 0) {
287 prioq_remove(nl->reply_callbacks_prioq, c, &c->prioq_idx);
288 c->timeout = 0;
289 }
290
291 r = sd_netlink_message_get_type(m, &type);
292 if (r < 0)
293 return r;
294
295 if (type == NLMSG_DONE)
296 m = NULL;
297
298 slot = container_of(c, sd_netlink_slot, reply_callback);
299
300 r = c->callback(nl, m, slot->userdata);
301 if (r < 0)
302 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
303 slot->description ? "'" : "",
304 strempty(slot->description),
305 slot->description ? "' " : "");
306
307 if (slot->floating)
308 netlink_slot_disconnect(slot, true);
309
310 return 1;
311 }
312
313 static int process_match(sd_netlink *nl, sd_netlink_message *m) {
314 uint16_t type;
315 uint8_t cmd;
316 int r;
317
318 assert(nl);
319 assert(m);
320
321 r = sd_netlink_message_get_type(m, &type);
322 if (r < 0)
323 return r;
324
325 if (m->protocol == NETLINK_GENERIC) {
326 r = sd_genl_message_get_command(nl, m, &cmd);
327 if (r < 0)
328 return r;
329 } else
330 cmd = 0;
331
332 LIST_FOREACH(match_callbacks, c, nl->match_callbacks) {
333 sd_netlink_slot *slot;
334 bool found = false;
335
336 if (c->type != type)
337 continue;
338 if (c->cmd != 0 && c->cmd != cmd)
339 continue;
340
341 for (size_t i = 0; i < c->n_groups; i++)
342 if (c->groups[i] == m->multicast_group) {
343 found = true;
344 break;
345 }
346
347 if (!found)
348 continue;
349
350 slot = container_of(c, sd_netlink_slot, match_callback);
351
352 r = c->callback(nl, m, slot->userdata);
353 if (r < 0)
354 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
355 slot->description ? "'" : "",
356 strempty(slot->description),
357 slot->description ? "' " : "");
358 if (r != 0)
359 break;
360 }
361
362 return 1;
363 }
364
365 static int process_running(sd_netlink *nl, sd_netlink_message **ret) {
366 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
367 int r;
368
369 assert(nl);
370
371 r = process_timeout(nl);
372 if (r != 0)
373 goto null_message;
374
375 r = dispatch_rqueue(nl, &m);
376 if (r < 0)
377 return r;
378 if (!m)
379 goto null_message;
380
381 if (sd_netlink_message_is_broadcast(m))
382 r = process_match(nl, m);
383 else
384 r = process_reply(nl, m);
385 if (r != 0)
386 goto null_message;
387
388 if (ret) {
389 *ret = TAKE_PTR(m);
390
391 return 1;
392 }
393
394 return 1;
395
396 null_message:
397 if (r >= 0 && ret)
398 *ret = NULL;
399
400 return r;
401 }
402
403 int sd_netlink_process(sd_netlink *nl, sd_netlink_message **ret) {
404 NETLINK_DONT_DESTROY(nl);
405 int r;
406
407 assert_return(nl, -EINVAL);
408 assert_return(!netlink_pid_changed(nl), -ECHILD);
409 assert_return(!nl->processing, -EBUSY);
410
411 nl->processing = true;
412 r = process_running(nl, ret);
413 nl->processing = false;
414
415 return r;
416 }
417
418 static usec_t calc_elapse(uint64_t usec) {
419 if (usec == UINT64_MAX)
420 return 0;
421
422 if (usec == 0)
423 usec = NETLINK_DEFAULT_TIMEOUT_USEC;
424
425 return usec_add(now(CLOCK_MONOTONIC), usec);
426 }
427
428 static int netlink_poll(sd_netlink *nl, bool need_more, usec_t timeout_usec) {
429 usec_t m = USEC_INFINITY;
430 int r, e;
431
432 assert(nl);
433
434 e = sd_netlink_get_events(nl);
435 if (e < 0)
436 return e;
437
438 if (need_more)
439 /* Caller wants more data, and doesn't care about
440 * what's been read or any other timeouts. */
441 e |= POLLIN;
442 else {
443 usec_t until;
444
445 /* Caller wants to process if there is something to
446 * process, but doesn't care otherwise */
447
448 r = sd_netlink_get_timeout(nl, &until);
449 if (r < 0)
450 return r;
451
452 m = usec_sub_unsigned(until, now(CLOCK_MONOTONIC));
453 }
454
455 r = fd_wait_for_event(nl->fd, e, MIN(m, timeout_usec));
456 if (r <= 0)
457 return r;
458
459 return 1;
460 }
461
462 int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
463 int r;
464
465 assert_return(nl, -EINVAL);
466 assert_return(!netlink_pid_changed(nl), -ECHILD);
467
468 if (nl->rqueue_size > 0)
469 return 0;
470
471 r = netlink_poll(nl, false, timeout_usec);
472 if (r < 0 && ERRNO_IS_TRANSIENT(r)) /* Convert EINTR to "something happened" and give user a chance to run some code before calling back into us */
473 return 1;
474 return r;
475 }
476
477 static int timeout_compare(const void *a, const void *b) {
478 const struct reply_callback *x = a, *y = b;
479
480 if (x->timeout != 0 && y->timeout == 0)
481 return -1;
482
483 if (x->timeout == 0 && y->timeout != 0)
484 return 1;
485
486 return CMP(x->timeout, y->timeout);
487 }
488
489 int sd_netlink_call_async(
490 sd_netlink *nl,
491 sd_netlink_slot **ret_slot,
492 sd_netlink_message *m,
493 sd_netlink_message_handler_t callback,
494 sd_netlink_destroy_t destroy_callback,
495 void *userdata,
496 uint64_t usec,
497 const char *description) {
498
499 _cleanup_free_ sd_netlink_slot *slot = NULL;
500 int r, k;
501
502 assert_return(nl, -EINVAL);
503 assert_return(m, -EINVAL);
504 assert_return(callback, -EINVAL);
505 assert_return(!netlink_pid_changed(nl), -ECHILD);
506
507 if (hashmap_size(nl->reply_callbacks) >= REPLY_CALLBACKS_MAX)
508 return -ERANGE;
509
510 r = hashmap_ensure_allocated(&nl->reply_callbacks, &trivial_hash_ops);
511 if (r < 0)
512 return r;
513
514 if (usec != UINT64_MAX) {
515 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
516 if (r < 0)
517 return r;
518 }
519
520 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
521 if (r < 0)
522 return r;
523
524 slot->reply_callback.callback = callback;
525 slot->reply_callback.timeout = calc_elapse(usec);
526
527 k = sd_netlink_send(nl, m, &slot->reply_callback.serial);
528 if (k < 0)
529 return k;
530
531 r = hashmap_put(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial), &slot->reply_callback);
532 if (r < 0)
533 return r;
534
535 if (slot->reply_callback.timeout != 0) {
536 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
537 if (r < 0) {
538 (void) hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial));
539 return r;
540 }
541 }
542
543 /* Set this at last. Otherwise, some failures in above would call destroy_callback but some would not. */
544 slot->destroy_callback = destroy_callback;
545
546 if (ret_slot)
547 *ret_slot = slot;
548
549 TAKE_PTR(slot);
550
551 return k;
552 }
553
554 int sd_netlink_read(
555 sd_netlink *nl,
556 uint32_t serial,
557 uint64_t usec,
558 sd_netlink_message **ret) {
559
560 usec_t timeout;
561 int r;
562
563 assert_return(nl, -EINVAL);
564 assert_return(!netlink_pid_changed(nl), -ECHILD);
565
566 timeout = calc_elapse(usec);
567
568 for (;;) {
569 usec_t left;
570
571 for (unsigned i = 0; i < nl->rqueue_size; i++) {
572 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *incoming = NULL;
573 uint32_t received_serial;
574 uint16_t type;
575
576 received_serial = message_get_serial(nl->rqueue[i]);
577 if (received_serial != serial)
578 continue;
579
580 incoming = nl->rqueue[i];
581
582 /* found a match, remove from rqueue and return it */
583 memmove(nl->rqueue + i, nl->rqueue + i + 1,
584 sizeof(sd_netlink_message*) * (nl->rqueue_size - i - 1));
585 nl->rqueue_size--;
586
587 r = sd_netlink_message_get_errno(incoming);
588 if (r < 0)
589 return r;
590
591 r = sd_netlink_message_get_type(incoming, &type);
592 if (r < 0)
593 return r;
594
595 if (type == NLMSG_DONE) {
596 if (ret)
597 *ret = NULL;
598 return 0;
599 }
600
601 if (ret)
602 *ret = TAKE_PTR(incoming);
603 return 1;
604 }
605
606 r = socket_read_message(nl);
607 if (r < 0)
608 return r;
609 if (r > 0)
610 /* received message, so try to process straight away */
611 continue;
612
613 if (timeout > 0) {
614 usec_t n;
615
616 n = now(CLOCK_MONOTONIC);
617 if (n >= timeout)
618 return -ETIMEDOUT;
619
620 left = usec_sub_unsigned(timeout, n);
621 } else
622 left = USEC_INFINITY;
623
624 r = netlink_poll(nl, true, left);
625 if (r < 0)
626 return r;
627 if (r == 0)
628 return -ETIMEDOUT;
629 }
630 }
631
632 int sd_netlink_call(
633 sd_netlink *nl,
634 sd_netlink_message *message,
635 uint64_t usec,
636 sd_netlink_message **ret) {
637
638 uint32_t serial;
639 int r;
640
641 assert_return(nl, -EINVAL);
642 assert_return(!netlink_pid_changed(nl), -ECHILD);
643 assert_return(message, -EINVAL);
644
645 r = sd_netlink_send(nl, message, &serial);
646 if (r < 0)
647 return r;
648
649 return sd_netlink_read(nl, serial, usec, ret);
650 }
651
652 int sd_netlink_get_events(sd_netlink *nl) {
653 assert_return(nl, -EINVAL);
654 assert_return(!netlink_pid_changed(nl), -ECHILD);
655
656 return nl->rqueue_size == 0 ? POLLIN : 0;
657 }
658
659 int sd_netlink_get_timeout(sd_netlink *nl, uint64_t *timeout_usec) {
660 struct reply_callback *c;
661
662 assert_return(nl, -EINVAL);
663 assert_return(timeout_usec, -EINVAL);
664 assert_return(!netlink_pid_changed(nl), -ECHILD);
665
666 if (nl->rqueue_size > 0) {
667 *timeout_usec = 0;
668 return 1;
669 }
670
671 c = prioq_peek(nl->reply_callbacks_prioq);
672 if (!c) {
673 *timeout_usec = UINT64_MAX;
674 return 0;
675 }
676
677 *timeout_usec = c->timeout;
678
679 return 1;
680 }
681
682 static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
683 sd_netlink *nl = ASSERT_PTR(userdata);
684 int r;
685
686 r = sd_netlink_process(nl, NULL);
687 if (r < 0)
688 return r;
689
690 return 1;
691 }
692
693 static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
694 sd_netlink *nl = ASSERT_PTR(userdata);
695 int r;
696
697 r = sd_netlink_process(nl, NULL);
698 if (r < 0)
699 return r;
700
701 return 1;
702 }
703
704 static int prepare_callback(sd_event_source *s, void *userdata) {
705 sd_netlink *nl = ASSERT_PTR(userdata);
706 int r, enabled;
707 usec_t until;
708
709 assert(s);
710
711 r = sd_netlink_get_events(nl);
712 if (r < 0)
713 return r;
714
715 r = sd_event_source_set_io_events(nl->io_event_source, r);
716 if (r < 0)
717 return r;
718
719 enabled = sd_netlink_get_timeout(nl, &until);
720 if (enabled < 0)
721 return enabled;
722 if (enabled > 0) {
723 r = sd_event_source_set_time(nl->time_event_source, until);
724 if (r < 0)
725 return r;
726 }
727
728 r = sd_event_source_set_enabled(nl->time_event_source,
729 enabled > 0 ? SD_EVENT_ONESHOT : SD_EVENT_OFF);
730 if (r < 0)
731 return r;
732
733 return 1;
734 }
735
736 int sd_netlink_attach_event(sd_netlink *nl, sd_event *event, int64_t priority) {
737 int r;
738
739 assert_return(nl, -EINVAL);
740 assert_return(!nl->event, -EBUSY);
741
742 assert(!nl->io_event_source);
743 assert(!nl->time_event_source);
744
745 if (event)
746 nl->event = sd_event_ref(event);
747 else {
748 r = sd_event_default(&nl->event);
749 if (r < 0)
750 return r;
751 }
752
753 r = sd_event_add_io(nl->event, &nl->io_event_source, nl->fd, 0, io_callback, nl);
754 if (r < 0)
755 goto fail;
756
757 r = sd_event_source_set_priority(nl->io_event_source, priority);
758 if (r < 0)
759 goto fail;
760
761 r = sd_event_source_set_description(nl->io_event_source, "netlink-receive-message");
762 if (r < 0)
763 goto fail;
764
765 r = sd_event_source_set_prepare(nl->io_event_source, prepare_callback);
766 if (r < 0)
767 goto fail;
768
769 r = sd_event_add_time(nl->event, &nl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, nl);
770 if (r < 0)
771 goto fail;
772
773 r = sd_event_source_set_priority(nl->time_event_source, priority);
774 if (r < 0)
775 goto fail;
776
777 r = sd_event_source_set_description(nl->time_event_source, "netlink-timer");
778 if (r < 0)
779 goto fail;
780
781 return 0;
782
783 fail:
784 sd_netlink_detach_event(nl);
785 return r;
786 }
787
788 int sd_netlink_detach_event(sd_netlink *nl) {
789 assert_return(nl, -EINVAL);
790 assert_return(nl->event, -ENXIO);
791
792 nl->io_event_source = sd_event_source_unref(nl->io_event_source);
793
794 nl->time_event_source = sd_event_source_unref(nl->time_event_source);
795
796 nl->event = sd_event_unref(nl->event);
797
798 return 0;
799 }
800
801 int netlink_add_match_internal(
802 sd_netlink *nl,
803 sd_netlink_slot **ret_slot,
804 const uint32_t *groups,
805 size_t n_groups,
806 uint16_t type,
807 uint8_t cmd,
808 sd_netlink_message_handler_t callback,
809 sd_netlink_destroy_t destroy_callback,
810 void *userdata,
811 const char *description) {
812
813 _cleanup_free_ sd_netlink_slot *slot = NULL;
814 int r;
815
816 assert(groups);
817 assert(n_groups > 0);
818
819 for (size_t i = 0; i < n_groups; i++) {
820 r = socket_broadcast_group_ref(nl, groups[i]);
821 if (r < 0)
822 return r;
823 }
824
825 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback),
826 userdata, description, &slot);
827 if (r < 0)
828 return r;
829
830 slot->match_callback.groups = newdup(uint32_t, groups, n_groups);
831 if (!slot->match_callback.groups)
832 return -ENOMEM;
833
834 slot->match_callback.n_groups = n_groups;
835 slot->match_callback.callback = callback;
836 slot->match_callback.type = type;
837 slot->match_callback.cmd = cmd;
838
839 LIST_PREPEND(match_callbacks, nl->match_callbacks, &slot->match_callback);
840
841 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
842 slot->destroy_callback = destroy_callback;
843
844 if (ret_slot)
845 *ret_slot = slot;
846
847 TAKE_PTR(slot);
848 return 0;
849 }
850
851 int sd_netlink_add_match(
852 sd_netlink *rtnl,
853 sd_netlink_slot **ret_slot,
854 uint16_t type,
855 sd_netlink_message_handler_t callback,
856 sd_netlink_destroy_t destroy_callback,
857 void *userdata,
858 const char *description) {
859
860 static const uint32_t
861 address_groups[] = { RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV6_IFADDR, },
862 link_groups[] = { RTNLGRP_LINK, },
863 neighbor_groups[] = { RTNLGRP_NEIGH, },
864 nexthop_groups[] = { RTNLGRP_NEXTHOP, },
865 route_groups[] = { RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV6_ROUTE, },
866 rule_groups[] = { RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_RULE, },
867 tc_groups[] = { RTNLGRP_TC };
868 const uint32_t *groups;
869 size_t n_groups;
870
871 assert_return(rtnl, -EINVAL);
872 assert_return(callback, -EINVAL);
873 assert_return(!netlink_pid_changed(rtnl), -ECHILD);
874
875 switch (type) {
876 case RTM_NEWLINK:
877 case RTM_DELLINK:
878 groups = link_groups;
879 n_groups = ELEMENTSOF(link_groups);
880 break;
881 case RTM_NEWADDR:
882 case RTM_DELADDR:
883 groups = address_groups;
884 n_groups = ELEMENTSOF(address_groups);
885 break;
886 case RTM_NEWNEIGH:
887 case RTM_DELNEIGH:
888 groups = neighbor_groups;
889 n_groups = ELEMENTSOF(neighbor_groups);
890 break;
891 case RTM_NEWROUTE:
892 case RTM_DELROUTE:
893 groups = route_groups;
894 n_groups = ELEMENTSOF(route_groups);
895 break;
896 case RTM_NEWRULE:
897 case RTM_DELRULE:
898 groups = rule_groups;
899 n_groups = ELEMENTSOF(rule_groups);
900 break;
901 case RTM_NEWNEXTHOP:
902 case RTM_DELNEXTHOP:
903 groups = nexthop_groups;
904 n_groups = ELEMENTSOF(nexthop_groups);
905 break;
906 case RTM_NEWQDISC:
907 case RTM_DELQDISC:
908 case RTM_NEWTCLASS:
909 case RTM_DELTCLASS:
910 groups = tc_groups;
911 n_groups = ELEMENTSOF(tc_groups);
912 break;
913 default:
914 return -EOPNOTSUPP;
915 }
916
917 return netlink_add_match_internal(rtnl, ret_slot, groups, n_groups, type, 0, callback,
918 destroy_callback, userdata, description);
919 }
920
921 int sd_netlink_attach_filter(sd_netlink *nl, size_t len, const struct sock_filter *filter) {
922 assert_return(nl, -EINVAL);
923 assert_return(len == 0 || filter, -EINVAL);
924
925 if (setsockopt(nl->fd, SOL_SOCKET,
926 len == 0 ? SO_DETACH_FILTER : SO_ATTACH_FILTER,
927 &(struct sock_fprog) {
928 .len = len,
929 .filter = (struct sock_filter*) filter,
930 }, sizeof(struct sock_fprog)) < 0)
931 return -errno;
932
933 return 0;
934 }