]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-netlink/sd-netlink.c
Merge pull request #23848 from yuwata/core-device-systemd-wants
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <poll.h>
4
5 #include "sd-netlink.h"
6
7 #include "alloc-util.h"
8 #include "fd-util.h"
9 #include "hashmap.h"
10 #include "io-util.h"
11 #include "macro.h"
12 #include "netlink-genl.h"
13 #include "netlink-internal.h"
14 #include "netlink-slot.h"
15 #include "netlink-util.h"
16 #include "process-util.h"
17 #include "socket-util.h"
18 #include "string-util.h"
19
20 /* Some really high limit, to catch programming errors */
21 #define REPLY_CALLBACKS_MAX UINT16_MAX
22
23 static int netlink_new(sd_netlink **ret) {
24 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
25
26 assert_return(ret, -EINVAL);
27
28 nl = new(sd_netlink, 1);
29 if (!nl)
30 return -ENOMEM;
31
32 *nl = (sd_netlink) {
33 .n_ref = 1,
34 .fd = -1,
35 .sockaddr.nl.nl_family = AF_NETLINK,
36 .original_pid = getpid_cached(),
37 .protocol = -1,
38
39 /* Kernel change notification messages have sequence number 0. We want to avoid that with our
40 * own serials, in order not to get confused when matching up kernel replies to our earlier
41 * requests.
42 *
43 * Moreover, when using netlink socket activation (i.e. where PID 1 binds an AF_NETLINK
44 * socket for us and passes it to us across execve()) and we get restarted multiple times
45 * while the socket sticks around we might get confused by replies from earlier runs coming
46 * in late — which is pretty likely if we'd start our sequence numbers always from 1. Hence,
47 * let's start with a value based on the system clock. This should make collisions much less
48 * likely (though still theoretically possible). We use a 32 bit µs counter starting at boot
49 * for this (and explicitly exclude the zero, see above). This counter will wrap around after
50 * a bit more than 1h, but that's hopefully OK as the kernel shouldn't take that long to
51 * reply to our requests.
52 *
53 * We only pick the initial start value this way. For each message we simply increase the
54 * sequence number by 1. This means we could enqueue 1 netlink message per µs without risking
55 * collisions, which should be OK.
56 *
57 * Note this means the serials will be in the range 1…UINT32_MAX here.
58 *
59 * (In an ideal world we'd attach the current serial counter to the netlink socket itself
60 * somehow, to avoid all this, but I couldn't come up with a nice way to do this) */
61 .serial = (uint32_t) (now(CLOCK_MONOTONIC) % UINT32_MAX) + 1,
62 };
63
64 /* We guarantee that the read buffer has at least space for a message header */
65 if (!greedy_realloc((void**) &nl->rbuffer, sizeof(struct nlmsghdr), sizeof(uint8_t)))
66 return -ENOMEM;
67
68 *ret = TAKE_PTR(nl);
69 return 0;
70 }
71
72 _public_ int sd_netlink_new_from_fd(sd_netlink **ret, int fd) {
73 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
74 socklen_t addrlen;
75 int r;
76
77 assert_return(ret, -EINVAL);
78
79 r = netlink_new(&nl);
80 if (r < 0)
81 return r;
82
83 addrlen = sizeof(nl->sockaddr);
84
85 if (getsockname(fd, &nl->sockaddr.sa, &addrlen) < 0)
86 return -errno;
87
88 if (nl->sockaddr.nl.nl_family != AF_NETLINK)
89 return -EINVAL;
90
91 nl->fd = fd;
92
93 *ret = TAKE_PTR(nl);
94 return 0;
95 }
96
97 _public_ int sd_netlink_open_fd(sd_netlink **ret, int fd) {
98 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
99 int r, protocol;
100
101 assert_return(ret, -EINVAL);
102 assert_return(fd >= 0, -EBADF);
103
104 r = netlink_new(&nl);
105 if (r < 0)
106 return r;
107
108 r = getsockopt_int(fd, SOL_SOCKET, SO_PROTOCOL, &protocol);
109 if (r < 0)
110 return r;
111
112 nl->fd = fd;
113 nl->protocol = protocol;
114
115 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_EXT_ACK, true);
116 if (r < 0)
117 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_EXT_ACK option, ignoring: %m");
118
119 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, true);
120 if (r < 0)
121 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_GET_STRICT_CHK option, ignoring: %m");
122
123 r = socket_bind(nl);
124 if (r < 0) {
125 nl->fd = -1; /* on failure, the caller remains owner of the fd, hence don't close it here */
126 nl->protocol = -1;
127 return r;
128 }
129
130 *ret = TAKE_PTR(nl);
131
132 return 0;
133 }
134
135 _public_ int sd_netlink_open(sd_netlink **ret) {
136 return netlink_open_family(ret, NETLINK_ROUTE);
137 }
138
139 _public_ int sd_netlink_increase_rxbuf(sd_netlink *nl, size_t size) {
140 assert_return(nl, -EINVAL);
141 assert_return(!netlink_pid_changed(nl), -ECHILD);
142
143 return fd_increase_rxbuf(nl->fd, size);
144 }
145
146 static sd_netlink *netlink_free(sd_netlink *nl) {
147 sd_netlink_slot *s;
148 unsigned i;
149
150 assert(nl);
151
152 for (i = 0; i < nl->rqueue_size; i++)
153 sd_netlink_message_unref(nl->rqueue[i]);
154 free(nl->rqueue);
155
156 for (i = 0; i < nl->rqueue_partial_size; i++)
157 sd_netlink_message_unref(nl->rqueue_partial[i]);
158 free(nl->rqueue_partial);
159
160 free(nl->rbuffer);
161
162 while ((s = nl->slots)) {
163 assert(s->floating);
164 netlink_slot_disconnect(s, true);
165 }
166 hashmap_free(nl->reply_callbacks);
167 prioq_free(nl->reply_callbacks_prioq);
168
169 sd_event_source_unref(nl->io_event_source);
170 sd_event_source_unref(nl->time_event_source);
171 sd_event_unref(nl->event);
172
173 hashmap_free(nl->broadcast_group_refs);
174
175 genl_clear_family(nl);
176
177 safe_close(nl->fd);
178 return mfree(nl);
179 }
180
181 DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
182
183 _public_ int sd_netlink_send(
184 sd_netlink *nl,
185 sd_netlink_message *message,
186 uint32_t *serial) {
187
188 int r;
189
190 assert_return(nl, -EINVAL);
191 assert_return(!netlink_pid_changed(nl), -ECHILD);
192 assert_return(message, -EINVAL);
193 assert_return(!message->sealed, -EPERM);
194
195 netlink_seal_message(nl, message);
196
197 r = socket_write_message(nl, message);
198 if (r < 0)
199 return r;
200
201 if (serial)
202 *serial = message_get_serial(message);
203
204 return 1;
205 }
206
207 int netlink_rqueue_make_room(sd_netlink *nl) {
208 assert(nl);
209
210 if (nl->rqueue_size >= NETLINK_RQUEUE_MAX)
211 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
212 "sd-netlink: exhausted the read queue size (%d)",
213 NETLINK_RQUEUE_MAX);
214
215 if (!GREEDY_REALLOC(nl->rqueue, nl->rqueue_size + 1))
216 return -ENOMEM;
217
218 return 0;
219 }
220
221 int netlink_rqueue_partial_make_room(sd_netlink *nl) {
222 assert(nl);
223
224 if (nl->rqueue_partial_size >= NETLINK_RQUEUE_MAX)
225 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
226 "sd-netlink: exhausted the partial read queue size (%d)",
227 NETLINK_RQUEUE_MAX);
228
229 if (!GREEDY_REALLOC(nl->rqueue_partial, nl->rqueue_partial_size + 1))
230 return -ENOMEM;
231
232 return 0;
233 }
234
235 static int dispatch_rqueue(sd_netlink *nl, sd_netlink_message **message) {
236 int r;
237
238 assert(nl);
239 assert(message);
240
241 if (nl->rqueue_size <= 0) {
242 /* Try to read a new message */
243 r = socket_read_message(nl);
244 if (r == -ENOBUFS) { /* FIXME: ignore buffer overruns for now */
245 log_debug_errno(r, "sd-netlink: Got ENOBUFS from netlink socket, ignoring.");
246 return 1;
247 }
248 if (r <= 0)
249 return r;
250 }
251
252 /* Dispatch a queued message */
253 *message = nl->rqueue[0];
254 nl->rqueue_size--;
255 memmove(nl->rqueue, nl->rqueue + 1, sizeof(sd_netlink_message*) * nl->rqueue_size);
256
257 return 1;
258 }
259
260 static int process_timeout(sd_netlink *nl) {
261 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
262 struct reply_callback *c;
263 sd_netlink_slot *slot;
264 usec_t n;
265 int r;
266
267 assert(nl);
268
269 c = prioq_peek(nl->reply_callbacks_prioq);
270 if (!c)
271 return 0;
272
273 n = now(CLOCK_MONOTONIC);
274 if (c->timeout > n)
275 return 0;
276
277 r = message_new_synthetic_error(nl, -ETIMEDOUT, c->serial, &m);
278 if (r < 0)
279 return r;
280
281 assert_se(prioq_pop(nl->reply_callbacks_prioq) == c);
282 c->timeout = 0;
283 hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(c->serial));
284
285 slot = container_of(c, sd_netlink_slot, reply_callback);
286
287 r = c->callback(nl, m, slot->userdata);
288 if (r < 0)
289 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
290 slot->description ? "'" : "",
291 strempty(slot->description),
292 slot->description ? "' " : "");
293
294 if (slot->floating)
295 netlink_slot_disconnect(slot, true);
296
297 return 1;
298 }
299
300 static int process_reply(sd_netlink *nl, sd_netlink_message *m) {
301 struct reply_callback *c;
302 sd_netlink_slot *slot;
303 uint32_t serial;
304 uint16_t type;
305 int r;
306
307 assert(nl);
308 assert(m);
309
310 serial = message_get_serial(m);
311 c = hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(serial));
312 if (!c)
313 return 0;
314
315 if (c->timeout != 0) {
316 prioq_remove(nl->reply_callbacks_prioq, c, &c->prioq_idx);
317 c->timeout = 0;
318 }
319
320 r = sd_netlink_message_get_type(m, &type);
321 if (r < 0)
322 return r;
323
324 if (type == NLMSG_DONE)
325 m = NULL;
326
327 slot = container_of(c, sd_netlink_slot, reply_callback);
328
329 r = c->callback(nl, m, slot->userdata);
330 if (r < 0)
331 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
332 slot->description ? "'" : "",
333 strempty(slot->description),
334 slot->description ? "' " : "");
335
336 if (slot->floating)
337 netlink_slot_disconnect(slot, true);
338
339 return 1;
340 }
341
342 static int process_match(sd_netlink *nl, sd_netlink_message *m) {
343 uint16_t type;
344 uint8_t cmd;
345 int r;
346
347 assert(nl);
348 assert(m);
349
350 r = sd_netlink_message_get_type(m, &type);
351 if (r < 0)
352 return r;
353
354 if (m->protocol == NETLINK_GENERIC) {
355 r = sd_genl_message_get_command(nl, m, &cmd);
356 if (r < 0)
357 return r;
358 } else
359 cmd = 0;
360
361 LIST_FOREACH(match_callbacks, c, nl->match_callbacks) {
362 sd_netlink_slot *slot;
363 bool found = false;
364
365 if (c->type != type)
366 continue;
367 if (c->cmd != 0 && c->cmd != cmd)
368 continue;
369
370 for (size_t i = 0; i < c->n_groups; i++)
371 if (c->groups[i] == m->multicast_group) {
372 found = true;
373 break;
374 }
375
376 if (!found)
377 continue;
378
379 slot = container_of(c, sd_netlink_slot, match_callback);
380
381 r = c->callback(nl, m, slot->userdata);
382 if (r < 0)
383 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
384 slot->description ? "'" : "",
385 strempty(slot->description),
386 slot->description ? "' " : "");
387 if (r != 0)
388 break;
389 }
390
391 return 1;
392 }
393
394 static int process_running(sd_netlink *nl, sd_netlink_message **ret) {
395 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
396 int r;
397
398 assert(nl);
399
400 r = process_timeout(nl);
401 if (r != 0)
402 goto null_message;
403
404 r = dispatch_rqueue(nl, &m);
405 if (r < 0)
406 return r;
407 if (!m)
408 goto null_message;
409
410 if (sd_netlink_message_is_broadcast(m))
411 r = process_match(nl, m);
412 else
413 r = process_reply(nl, m);
414 if (r != 0)
415 goto null_message;
416
417 if (ret) {
418 *ret = TAKE_PTR(m);
419
420 return 1;
421 }
422
423 return 1;
424
425 null_message:
426 if (r >= 0 && ret)
427 *ret = NULL;
428
429 return r;
430 }
431
432 int sd_netlink_process(sd_netlink *nl, sd_netlink_message **ret) {
433 NETLINK_DONT_DESTROY(nl);
434 int r;
435
436 assert_return(nl, -EINVAL);
437 assert_return(!netlink_pid_changed(nl), -ECHILD);
438 assert_return(!nl->processing, -EBUSY);
439
440 nl->processing = true;
441 r = process_running(nl, ret);
442 nl->processing = false;
443
444 return r;
445 }
446
447 static usec_t calc_elapse(uint64_t usec) {
448 if (usec == UINT64_MAX)
449 return 0;
450
451 if (usec == 0)
452 usec = NETLINK_DEFAULT_TIMEOUT_USEC;
453
454 return usec_add(now(CLOCK_MONOTONIC), usec);
455 }
456
457 static int netlink_poll(sd_netlink *nl, bool need_more, usec_t timeout_usec) {
458 usec_t m = USEC_INFINITY;
459 int r, e;
460
461 assert(nl);
462
463 e = sd_netlink_get_events(nl);
464 if (e < 0)
465 return e;
466
467 if (need_more)
468 /* Caller wants more data, and doesn't care about
469 * what's been read or any other timeouts. */
470 e |= POLLIN;
471 else {
472 usec_t until;
473
474 /* Caller wants to process if there is something to
475 * process, but doesn't care otherwise */
476
477 r = sd_netlink_get_timeout(nl, &until);
478 if (r < 0)
479 return r;
480
481 m = usec_sub_unsigned(until, now(CLOCK_MONOTONIC));
482 }
483
484 r = fd_wait_for_event(nl->fd, e, MIN(m, timeout_usec));
485 if (r <= 0)
486 return r;
487
488 return 1;
489 }
490
491 int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
492 assert_return(nl, -EINVAL);
493 assert_return(!netlink_pid_changed(nl), -ECHILD);
494
495 if (nl->rqueue_size > 0)
496 return 0;
497
498 return netlink_poll(nl, false, timeout_usec);
499 }
500
501 static int timeout_compare(const void *a, const void *b) {
502 const struct reply_callback *x = a, *y = b;
503
504 if (x->timeout != 0 && y->timeout == 0)
505 return -1;
506
507 if (x->timeout == 0 && y->timeout != 0)
508 return 1;
509
510 return CMP(x->timeout, y->timeout);
511 }
512
513 _public_ int sd_netlink_call_async(
514 sd_netlink *nl,
515 sd_netlink_slot **ret_slot,
516 sd_netlink_message *m,
517 sd_netlink_message_handler_t callback,
518 sd_netlink_destroy_t destroy_callback,
519 void *userdata,
520 uint64_t usec,
521 const char *description) {
522
523 _cleanup_free_ sd_netlink_slot *slot = NULL;
524 int r, k;
525
526 assert_return(nl, -EINVAL);
527 assert_return(m, -EINVAL);
528 assert_return(callback, -EINVAL);
529 assert_return(!netlink_pid_changed(nl), -ECHILD);
530
531 if (hashmap_size(nl->reply_callbacks) >= REPLY_CALLBACKS_MAX)
532 return -ERANGE;
533
534 r = hashmap_ensure_allocated(&nl->reply_callbacks, &trivial_hash_ops);
535 if (r < 0)
536 return r;
537
538 if (usec != UINT64_MAX) {
539 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
540 if (r < 0)
541 return r;
542 }
543
544 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
545 if (r < 0)
546 return r;
547
548 slot->reply_callback.callback = callback;
549 slot->reply_callback.timeout = calc_elapse(usec);
550
551 k = sd_netlink_send(nl, m, &slot->reply_callback.serial);
552 if (k < 0)
553 return k;
554
555 r = hashmap_put(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial), &slot->reply_callback);
556 if (r < 0)
557 return r;
558
559 if (slot->reply_callback.timeout != 0) {
560 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
561 if (r < 0) {
562 (void) hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial));
563 return r;
564 }
565 }
566
567 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
568 slot->destroy_callback = destroy_callback;
569
570 if (ret_slot)
571 *ret_slot = slot;
572
573 TAKE_PTR(slot);
574
575 return k;
576 }
577
578 _public_ int sd_netlink_read(
579 sd_netlink *nl,
580 uint32_t serial,
581 uint64_t usec,
582 sd_netlink_message **ret) {
583
584 usec_t timeout;
585 int r;
586
587 assert_return(nl, -EINVAL);
588 assert_return(!netlink_pid_changed(nl), -ECHILD);
589
590 timeout = calc_elapse(usec);
591
592 for (;;) {
593 usec_t left;
594
595 for (unsigned i = 0; i < nl->rqueue_size; i++) {
596 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *incoming = NULL;
597 uint32_t received_serial;
598 uint16_t type;
599
600 received_serial = message_get_serial(nl->rqueue[i]);
601 if (received_serial != serial)
602 continue;
603
604 incoming = nl->rqueue[i];
605
606 /* found a match, remove from rqueue and return it */
607 memmove(nl->rqueue + i, nl->rqueue + i + 1,
608 sizeof(sd_netlink_message*) * (nl->rqueue_size - i - 1));
609 nl->rqueue_size--;
610
611 r = sd_netlink_message_get_errno(incoming);
612 if (r < 0)
613 return r;
614
615 r = sd_netlink_message_get_type(incoming, &type);
616 if (r < 0)
617 return r;
618
619 if (type == NLMSG_DONE) {
620 *ret = NULL;
621 return 0;
622 }
623
624 if (ret)
625 *ret = TAKE_PTR(incoming);
626 return 1;
627 }
628
629 r = socket_read_message(nl);
630 if (r < 0)
631 return r;
632 if (r > 0)
633 /* received message, so try to process straight away */
634 continue;
635
636 if (timeout > 0) {
637 usec_t n;
638
639 n = now(CLOCK_MONOTONIC);
640 if (n >= timeout)
641 return -ETIMEDOUT;
642
643 left = usec_sub_unsigned(timeout, n);
644 } else
645 left = USEC_INFINITY;
646
647 r = netlink_poll(nl, true, left);
648 if (r < 0)
649 return r;
650 if (r == 0)
651 return -ETIMEDOUT;
652 }
653 }
654
655 _public_ int sd_netlink_call(
656 sd_netlink *nl,
657 sd_netlink_message *message,
658 uint64_t usec,
659 sd_netlink_message **ret) {
660
661 uint32_t serial;
662 int r;
663
664 assert_return(nl, -EINVAL);
665 assert_return(!netlink_pid_changed(nl), -ECHILD);
666 assert_return(message, -EINVAL);
667
668 r = sd_netlink_send(nl, message, &serial);
669 if (r < 0)
670 return r;
671
672 return sd_netlink_read(nl, serial, usec, ret);
673 }
674
675 _public_ int sd_netlink_get_events(sd_netlink *nl) {
676 assert_return(nl, -EINVAL);
677 assert_return(!netlink_pid_changed(nl), -ECHILD);
678
679 return nl->rqueue_size == 0 ? POLLIN : 0;
680 }
681
682 _public_ int sd_netlink_get_timeout(sd_netlink *nl, uint64_t *timeout_usec) {
683 struct reply_callback *c;
684
685 assert_return(nl, -EINVAL);
686 assert_return(timeout_usec, -EINVAL);
687 assert_return(!netlink_pid_changed(nl), -ECHILD);
688
689 if (nl->rqueue_size > 0) {
690 *timeout_usec = 0;
691 return 1;
692 }
693
694 c = prioq_peek(nl->reply_callbacks_prioq);
695 if (!c) {
696 *timeout_usec = UINT64_MAX;
697 return 0;
698 }
699
700 *timeout_usec = c->timeout;
701
702 return 1;
703 }
704
705 static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
706 sd_netlink *nl = userdata;
707 int r;
708
709 assert(nl);
710
711 r = sd_netlink_process(nl, NULL);
712 if (r < 0)
713 return r;
714
715 return 1;
716 }
717
718 static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
719 sd_netlink *nl = userdata;
720 int r;
721
722 assert(nl);
723
724 r = sd_netlink_process(nl, NULL);
725 if (r < 0)
726 return r;
727
728 return 1;
729 }
730
731 static int prepare_callback(sd_event_source *s, void *userdata) {
732 sd_netlink *nl = userdata;
733 int r, e;
734 usec_t until;
735
736 assert(s);
737 assert(nl);
738
739 e = sd_netlink_get_events(nl);
740 if (e < 0)
741 return e;
742
743 r = sd_event_source_set_io_events(nl->io_event_source, e);
744 if (r < 0)
745 return r;
746
747 r = sd_netlink_get_timeout(nl, &until);
748 if (r < 0)
749 return r;
750 if (r > 0) {
751 int j;
752
753 j = sd_event_source_set_time(nl->time_event_source, until);
754 if (j < 0)
755 return j;
756 }
757
758 r = sd_event_source_set_enabled(nl->time_event_source, r > 0);
759 if (r < 0)
760 return r;
761
762 return 1;
763 }
764
765 _public_ int sd_netlink_attach_event(sd_netlink *nl, sd_event *event, int64_t priority) {
766 int r;
767
768 assert_return(nl, -EINVAL);
769 assert_return(!nl->event, -EBUSY);
770
771 assert(!nl->io_event_source);
772 assert(!nl->time_event_source);
773
774 if (event)
775 nl->event = sd_event_ref(event);
776 else {
777 r = sd_event_default(&nl->event);
778 if (r < 0)
779 return r;
780 }
781
782 r = sd_event_add_io(nl->event, &nl->io_event_source, nl->fd, 0, io_callback, nl);
783 if (r < 0)
784 goto fail;
785
786 r = sd_event_source_set_priority(nl->io_event_source, priority);
787 if (r < 0)
788 goto fail;
789
790 r = sd_event_source_set_description(nl->io_event_source, "netlink-receive-message");
791 if (r < 0)
792 goto fail;
793
794 r = sd_event_source_set_prepare(nl->io_event_source, prepare_callback);
795 if (r < 0)
796 goto fail;
797
798 r = sd_event_add_time(nl->event, &nl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, nl);
799 if (r < 0)
800 goto fail;
801
802 r = sd_event_source_set_priority(nl->time_event_source, priority);
803 if (r < 0)
804 goto fail;
805
806 r = sd_event_source_set_description(nl->time_event_source, "netlink-timer");
807 if (r < 0)
808 goto fail;
809
810 return 0;
811
812 fail:
813 sd_netlink_detach_event(nl);
814 return r;
815 }
816
817 _public_ int sd_netlink_detach_event(sd_netlink *nl) {
818 assert_return(nl, -EINVAL);
819 assert_return(nl->event, -ENXIO);
820
821 nl->io_event_source = sd_event_source_unref(nl->io_event_source);
822
823 nl->time_event_source = sd_event_source_unref(nl->time_event_source);
824
825 nl->event = sd_event_unref(nl->event);
826
827 return 0;
828 }
829
830 int netlink_add_match_internal(
831 sd_netlink *nl,
832 sd_netlink_slot **ret_slot,
833 const uint32_t *groups,
834 size_t n_groups,
835 uint16_t type,
836 uint8_t cmd,
837 sd_netlink_message_handler_t callback,
838 sd_netlink_destroy_t destroy_callback,
839 void *userdata,
840 const char *description) {
841
842 _cleanup_free_ sd_netlink_slot *slot = NULL;
843 int r;
844
845 assert(groups);
846 assert(n_groups > 0);
847
848 for (size_t i = 0; i < n_groups; i++) {
849 r = socket_broadcast_group_ref(nl, groups[i]);
850 if (r < 0)
851 return r;
852 }
853
854 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback),
855 userdata, description, &slot);
856 if (r < 0)
857 return r;
858
859 slot->match_callback.groups = newdup(uint32_t, groups, n_groups);
860 if (!slot->match_callback.groups)
861 return -ENOMEM;
862
863 slot->match_callback.n_groups = n_groups;
864 slot->match_callback.callback = callback;
865 slot->match_callback.type = type;
866 slot->match_callback.cmd = cmd;
867
868 LIST_PREPEND(match_callbacks, nl->match_callbacks, &slot->match_callback);
869
870 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
871 slot->destroy_callback = destroy_callback;
872
873 if (ret_slot)
874 *ret_slot = slot;
875
876 TAKE_PTR(slot);
877 return 0;
878 }
879
880 _public_ int sd_netlink_add_match(
881 sd_netlink *rtnl,
882 sd_netlink_slot **ret_slot,
883 uint16_t type,
884 sd_netlink_message_handler_t callback,
885 sd_netlink_destroy_t destroy_callback,
886 void *userdata,
887 const char *description) {
888
889 static const uint32_t
890 address_groups[] = { RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV6_IFADDR, },
891 link_groups[] = { RTNLGRP_LINK, },
892 neighbor_groups[] = { RTNLGRP_NEIGH, },
893 nexthop_groups[] = { RTNLGRP_NEXTHOP, },
894 route_groups[] = { RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV6_ROUTE, },
895 rule_groups[] = { RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_RULE, },
896 tc_groups[] = { RTNLGRP_TC };
897 const uint32_t *groups;
898 size_t n_groups;
899
900 assert_return(rtnl, -EINVAL);
901 assert_return(callback, -EINVAL);
902 assert_return(!netlink_pid_changed(rtnl), -ECHILD);
903
904 switch (type) {
905 case RTM_NEWLINK:
906 case RTM_DELLINK:
907 groups = link_groups;
908 n_groups = ELEMENTSOF(link_groups);
909 break;
910 case RTM_NEWADDR:
911 case RTM_DELADDR:
912 groups = address_groups;
913 n_groups = ELEMENTSOF(address_groups);
914 break;
915 case RTM_NEWNEIGH:
916 case RTM_DELNEIGH:
917 groups = neighbor_groups;
918 n_groups = ELEMENTSOF(neighbor_groups);
919 break;
920 case RTM_NEWROUTE:
921 case RTM_DELROUTE:
922 groups = route_groups;
923 n_groups = ELEMENTSOF(route_groups);
924 break;
925 case RTM_NEWRULE:
926 case RTM_DELRULE:
927 groups = rule_groups;
928 n_groups = ELEMENTSOF(rule_groups);
929 break;
930 case RTM_NEWNEXTHOP:
931 case RTM_DELNEXTHOP:
932 groups = nexthop_groups;
933 n_groups = ELEMENTSOF(nexthop_groups);
934 break;
935 case RTM_NEWQDISC:
936 case RTM_DELQDISC:
937 case RTM_NEWTCLASS:
938 case RTM_DELTCLASS:
939 groups = tc_groups;
940 n_groups = ELEMENTSOF(tc_groups);
941 break;
942 default:
943 return -EOPNOTSUPP;
944 }
945
946 return netlink_add_match_internal(rtnl, ret_slot, groups, n_groups, type, 0, callback,
947 destroy_callback, userdata, description);
948 }
949
950 _public_ int sd_netlink_attach_filter(sd_netlink *nl, size_t len, struct sock_filter *filter) {
951 assert_return(nl, -EINVAL);
952 assert_return(len == 0 || filter, -EINVAL);
953
954 if (setsockopt(nl->fd, SOL_SOCKET,
955 len == 0 ? SO_DETACH_FILTER : SO_ATTACH_FILTER,
956 &(struct sock_fprog) {
957 .len = len,
958 .filter = filter,
959 }, sizeof(struct sock_fprog)) < 0)
960 return -errno;
961
962 return 0;
963 }