]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-netlink/sd-netlink.c
reboot-util: Add some basic validation on reboot arguments
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <poll.h>
4
5 #include "sd-netlink.h"
6
7 #include "alloc-util.h"
8 #include "fd-util.h"
9 #include "hashmap.h"
10 #include "io-util.h"
11 #include "macro.h"
12 #include "netlink-genl.h"
13 #include "netlink-internal.h"
14 #include "netlink-slot.h"
15 #include "netlink-util.h"
16 #include "process-util.h"
17 #include "socket-util.h"
18 #include "string-util.h"
19
20 /* Some really high limit, to catch programming errors */
21 #define REPLY_CALLBACKS_MAX UINT16_MAX
22
23 static int netlink_new(sd_netlink **ret) {
24 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
25
26 assert_return(ret, -EINVAL);
27
28 nl = new(sd_netlink, 1);
29 if (!nl)
30 return -ENOMEM;
31
32 *nl = (sd_netlink) {
33 .n_ref = 1,
34 .fd = -EBADF,
35 .sockaddr.nl.nl_family = AF_NETLINK,
36 .original_pid = getpid_cached(),
37 .protocol = -1,
38
39 /* Kernel change notification messages have sequence number 0. We want to avoid that with our
40 * own serials, in order not to get confused when matching up kernel replies to our earlier
41 * requests.
42 *
43 * Moreover, when using netlink socket activation (i.e. where PID 1 binds an AF_NETLINK
44 * socket for us and passes it to us across execve()) and we get restarted multiple times
45 * while the socket sticks around we might get confused by replies from earlier runs coming
46 * in late — which is pretty likely if we'd start our sequence numbers always from 1. Hence,
47 * let's start with a value based on the system clock. This should make collisions much less
48 * likely (though still theoretically possible). We use a 32 bit μs counter starting at boot
49 * for this (and explicitly exclude the zero, see above). This counter will wrap around after
50 * a bit more than 1h, but that's hopefully OK as the kernel shouldn't take that long to
51 * reply to our requests.
52 *
53 * We only pick the initial start value this way. For each message we simply increase the
54 * sequence number by 1. This means we could enqueue 1 netlink message per μs without risking
55 * collisions, which should be OK.
56 *
57 * Note this means the serials will be in the range 1…UINT32_MAX here.
58 *
59 * (In an ideal world we'd attach the current serial counter to the netlink socket itself
60 * somehow, to avoid all this, but I couldn't come up with a nice way to do this) */
61 .serial = (uint32_t) (now(CLOCK_MONOTONIC) % UINT32_MAX) + 1,
62 };
63
64 *ret = TAKE_PTR(nl);
65 return 0;
66 }
67
68 int sd_netlink_open_fd(sd_netlink **ret, int fd) {
69 _cleanup_(sd_netlink_unrefp) sd_netlink *nl = NULL;
70 int r, protocol;
71
72 assert_return(ret, -EINVAL);
73 assert_return(fd >= 0, -EBADF);
74
75 r = netlink_new(&nl);
76 if (r < 0)
77 return r;
78
79 r = getsockopt_int(fd, SOL_SOCKET, SO_PROTOCOL, &protocol);
80 if (r < 0)
81 return r;
82
83 nl->fd = fd;
84 nl->protocol = protocol;
85
86 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_EXT_ACK, true);
87 if (r < 0)
88 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_EXT_ACK option, ignoring: %m");
89
90 r = setsockopt_int(fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, true);
91 if (r < 0)
92 log_debug_errno(r, "sd-netlink: Failed to enable NETLINK_GET_STRICT_CHK option, ignoring: %m");
93
94 r = socket_bind(nl);
95 if (r < 0) {
96 nl->fd = -EBADF; /* on failure, the caller remains owner of the fd, hence don't close it here */
97 nl->protocol = -1;
98 return r;
99 }
100
101 *ret = TAKE_PTR(nl);
102
103 return 0;
104 }
105
106 int sd_netlink_open(sd_netlink **ret) {
107 return netlink_open_family(ret, NETLINK_ROUTE);
108 }
109
110 int sd_netlink_increase_rxbuf(sd_netlink *nl, size_t size) {
111 assert_return(nl, -EINVAL);
112 assert_return(!netlink_pid_changed(nl), -ECHILD);
113
114 return fd_increase_rxbuf(nl->fd, size);
115 }
116
117 static sd_netlink *netlink_free(sd_netlink *nl) {
118 sd_netlink_slot *s;
119
120 assert(nl);
121
122 ordered_set_free(nl->rqueue);
123 hashmap_free(nl->rqueue_by_serial);
124 hashmap_free(nl->rqueue_partial_by_serial);
125 free(nl->rbuffer);
126
127 while ((s = nl->slots)) {
128 assert(s->floating);
129 netlink_slot_disconnect(s, true);
130 }
131 hashmap_free(nl->reply_callbacks);
132 prioq_free(nl->reply_callbacks_prioq);
133
134 sd_event_source_unref(nl->io_event_source);
135 sd_event_source_unref(nl->time_event_source);
136 sd_event_unref(nl->event);
137
138 hashmap_free(nl->broadcast_group_refs);
139
140 genl_clear_family(nl);
141
142 safe_close(nl->fd);
143 return mfree(nl);
144 }
145
146 DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
147
148 int sd_netlink_send(
149 sd_netlink *nl,
150 sd_netlink_message *message,
151 uint32_t *serial) {
152
153 int r;
154
155 assert_return(nl, -EINVAL);
156 assert_return(!netlink_pid_changed(nl), -ECHILD);
157 assert_return(message, -EINVAL);
158 assert_return(!message->sealed, -EPERM);
159
160 netlink_seal_message(nl, message);
161
162 r = socket_write_message(nl, message);
163 if (r < 0)
164 return r;
165
166 if (serial)
167 *serial = message_get_serial(message);
168
169 return 1;
170 }
171
172 static int dispatch_rqueue(sd_netlink *nl, sd_netlink_message **ret) {
173 sd_netlink_message *m;
174 int r;
175
176 assert(nl);
177 assert(ret);
178
179 if (ordered_set_isempty(nl->rqueue)) {
180 /* Try to read a new message */
181 r = socket_read_message(nl);
182 if (r == -ENOBUFS) /* FIXME: ignore buffer overruns for now */
183 log_debug_errno(r, "sd-netlink: Got ENOBUFS from netlink socket, ignoring.");
184 else if (r < 0)
185 return r;
186 }
187
188 /* Dispatch a queued message */
189 m = ordered_set_steal_first(nl->rqueue);
190 if (m)
191 sd_netlink_message_unref(hashmap_remove_value(nl->rqueue_by_serial, UINT32_TO_PTR(message_get_serial(m)), m));
192 *ret = m;
193 return !!m;
194 }
195
196 static int process_timeout(sd_netlink *nl) {
197 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
198 struct reply_callback *c;
199 sd_netlink_slot *slot;
200 usec_t n;
201 int r;
202
203 assert(nl);
204
205 c = prioq_peek(nl->reply_callbacks_prioq);
206 if (!c)
207 return 0;
208
209 n = now(CLOCK_MONOTONIC);
210 if (c->timeout > n)
211 return 0;
212
213 r = message_new_synthetic_error(nl, -ETIMEDOUT, c->serial, &m);
214 if (r < 0)
215 return r;
216
217 assert_se(prioq_pop(nl->reply_callbacks_prioq) == c);
218 hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(c->serial));
219
220 slot = container_of(c, sd_netlink_slot, reply_callback);
221
222 r = c->callback(nl, m, slot->userdata);
223 if (r < 0)
224 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
225 slot->description ? "'" : "",
226 strempty(slot->description),
227 slot->description ? "' " : "");
228
229 if (slot->floating)
230 netlink_slot_disconnect(slot, true);
231
232 return 1;
233 }
234
235 static int process_reply(sd_netlink *nl, sd_netlink_message *m) {
236 struct reply_callback *c;
237 sd_netlink_slot *slot;
238 uint32_t serial;
239 uint16_t type;
240 int r;
241
242 assert(nl);
243 assert(m);
244
245 serial = message_get_serial(m);
246 c = hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(serial));
247 if (!c)
248 return 0;
249
250 if (c->timeout != USEC_INFINITY)
251 prioq_remove(nl->reply_callbacks_prioq, c, &c->prioq_idx);
252
253 r = sd_netlink_message_get_type(m, &type);
254 if (r < 0)
255 return r;
256
257 if (type == NLMSG_DONE)
258 m = NULL;
259
260 slot = container_of(c, sd_netlink_slot, reply_callback);
261
262 r = c->callback(nl, m, slot->userdata);
263 if (r < 0)
264 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
265 slot->description ? "'" : "",
266 strempty(slot->description),
267 slot->description ? "' " : "");
268
269 if (slot->floating)
270 netlink_slot_disconnect(slot, true);
271
272 return 1;
273 }
274
275 static int process_match(sd_netlink *nl, sd_netlink_message *m) {
276 uint16_t type;
277 uint8_t cmd;
278 int r;
279
280 assert(nl);
281 assert(m);
282
283 r = sd_netlink_message_get_type(m, &type);
284 if (r < 0)
285 return r;
286
287 if (m->protocol == NETLINK_GENERIC) {
288 r = sd_genl_message_get_command(nl, m, &cmd);
289 if (r < 0)
290 return r;
291 } else
292 cmd = 0;
293
294 LIST_FOREACH(match_callbacks, c, nl->match_callbacks) {
295 sd_netlink_slot *slot;
296 bool found = false;
297
298 if (c->type != type)
299 continue;
300 if (c->cmd != 0 && c->cmd != cmd)
301 continue;
302
303 for (size_t i = 0; i < c->n_groups; i++)
304 if (c->groups[i] == m->multicast_group) {
305 found = true;
306 break;
307 }
308
309 if (!found)
310 continue;
311
312 slot = container_of(c, sd_netlink_slot, match_callback);
313
314 r = c->callback(nl, m, slot->userdata);
315 if (r < 0)
316 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
317 slot->description ? "'" : "",
318 strempty(slot->description),
319 slot->description ? "' " : "");
320 if (r != 0)
321 break;
322 }
323
324 return 1;
325 }
326
327 static int process_running(sd_netlink *nl, sd_netlink_message **ret) {
328 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
329 int r;
330
331 assert(nl);
332
333 r = process_timeout(nl);
334 if (r != 0)
335 goto null_message;
336
337 r = dispatch_rqueue(nl, &m);
338 if (r < 0)
339 return r;
340 if (!m)
341 goto null_message;
342
343 if (sd_netlink_message_is_broadcast(m))
344 r = process_match(nl, m);
345 else
346 r = process_reply(nl, m);
347 if (r != 0)
348 goto null_message;
349
350 if (ret) {
351 *ret = TAKE_PTR(m);
352
353 return 1;
354 }
355
356 return 1;
357
358 null_message:
359 if (r >= 0 && ret)
360 *ret = NULL;
361
362 return r;
363 }
364
365 int sd_netlink_process(sd_netlink *nl, sd_netlink_message **ret) {
366 NETLINK_DONT_DESTROY(nl);
367 int r;
368
369 assert_return(nl, -EINVAL);
370 assert_return(!netlink_pid_changed(nl), -ECHILD);
371 assert_return(!nl->processing, -EBUSY);
372
373 nl->processing = true;
374 r = process_running(nl, ret);
375 nl->processing = false;
376
377 return r;
378 }
379
380 static usec_t timespan_to_timestamp(usec_t usec) {
381 static bool default_timeout_set = false;
382 static usec_t default_timeout;
383 int r;
384
385 if (usec == 0) {
386 if (!default_timeout_set) {
387 const char *e;
388
389 default_timeout_set = true;
390 default_timeout = NETLINK_DEFAULT_TIMEOUT_USEC;
391
392 e = secure_getenv("SYSTEMD_NETLINK_DEFAULT_TIMEOUT");
393 if (e) {
394 r = parse_sec(e, &default_timeout);
395 if (r < 0)
396 log_debug_errno(r, "sd-netlink: Failed to parse $SYSTEMD_NETLINK_DEFAULT_TIMEOUT environment variable, ignoring: %m");
397 }
398 }
399
400 usec = default_timeout;
401 }
402
403 return usec_add(now(CLOCK_MONOTONIC), usec);
404 }
405
406 static int netlink_poll(sd_netlink *nl, bool need_more, usec_t timeout_usec) {
407 usec_t m = USEC_INFINITY;
408 int r, e;
409
410 assert(nl);
411
412 e = sd_netlink_get_events(nl);
413 if (e < 0)
414 return e;
415
416 if (need_more)
417 /* Caller wants more data, and doesn't care about
418 * what's been read or any other timeouts. */
419 e |= POLLIN;
420 else {
421 usec_t until;
422
423 /* Caller wants to process if there is something to
424 * process, but doesn't care otherwise */
425
426 r = sd_netlink_get_timeout(nl, &until);
427 if (r < 0)
428 return r;
429
430 m = usec_sub_unsigned(until, now(CLOCK_MONOTONIC));
431 }
432
433 r = fd_wait_for_event(nl->fd, e, MIN(m, timeout_usec));
434 if (r <= 0)
435 return r;
436
437 return 1;
438 }
439
440 int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
441 int r;
442
443 assert_return(nl, -EINVAL);
444 assert_return(!netlink_pid_changed(nl), -ECHILD);
445
446 if (!ordered_set_isempty(nl->rqueue))
447 return 0;
448
449 r = netlink_poll(nl, false, timeout_usec);
450 if (ERRNO_IS_NEG_TRANSIENT(r)) /* Convert EINTR to "something happened" and give user a chance to run some code before calling back into us */
451 return 1;
452 return r;
453 }
454
455 static int timeout_compare(const void *a, const void *b) {
456 const struct reply_callback *x = a, *y = b;
457
458 return CMP(x->timeout, y->timeout);
459 }
460
461 size_t netlink_get_reply_callback_count(sd_netlink *nl) {
462 assert(nl);
463
464 return hashmap_size(nl->reply_callbacks);
465 }
466
467 int sd_netlink_call_async(
468 sd_netlink *nl,
469 sd_netlink_slot **ret_slot,
470 sd_netlink_message *m,
471 sd_netlink_message_handler_t callback,
472 sd_netlink_destroy_t destroy_callback,
473 void *userdata,
474 uint64_t usec,
475 const char *description) {
476
477 _cleanup_free_ sd_netlink_slot *slot = NULL;
478 int r, k;
479
480 assert_return(nl, -EINVAL);
481 assert_return(m, -EINVAL);
482 assert_return(callback, -EINVAL);
483 assert_return(!netlink_pid_changed(nl), -ECHILD);
484
485 if (hashmap_size(nl->reply_callbacks) >= REPLY_CALLBACKS_MAX)
486 return -EXFULL;
487
488 r = hashmap_ensure_allocated(&nl->reply_callbacks, &trivial_hash_ops);
489 if (r < 0)
490 return r;
491
492 if (usec != UINT64_MAX) {
493 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
494 if (r < 0)
495 return r;
496 }
497
498 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
499 if (r < 0)
500 return r;
501
502 slot->reply_callback.callback = callback;
503 slot->reply_callback.timeout = timespan_to_timestamp(usec);
504
505 k = sd_netlink_send(nl, m, &slot->reply_callback.serial);
506 if (k < 0)
507 return k;
508
509 r = hashmap_put(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial), &slot->reply_callback);
510 if (r < 0)
511 return r;
512
513 if (slot->reply_callback.timeout != USEC_INFINITY) {
514 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
515 if (r < 0) {
516 (void) hashmap_remove(nl->reply_callbacks, UINT32_TO_PTR(slot->reply_callback.serial));
517 return r;
518 }
519 }
520
521 /* Set this at last. Otherwise, some failures in above would call destroy_callback but some would not. */
522 slot->destroy_callback = destroy_callback;
523
524 if (ret_slot)
525 *ret_slot = slot;
526
527 TAKE_PTR(slot);
528
529 return k;
530 }
531
532 int sd_netlink_read(
533 sd_netlink *nl,
534 uint32_t serial,
535 uint64_t usec,
536 sd_netlink_message **ret) {
537
538 usec_t timeout;
539 int r;
540
541 assert_return(nl, -EINVAL);
542 assert_return(!netlink_pid_changed(nl), -ECHILD);
543
544 timeout = timespan_to_timestamp(usec);
545
546 for (;;) {
547 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
548 usec_t left;
549
550 m = hashmap_remove(nl->rqueue_by_serial, UINT32_TO_PTR(serial));
551 if (m) {
552 uint16_t type;
553
554 /* found a match, remove from rqueue and return it */
555 sd_netlink_message_unref(ordered_set_remove(nl->rqueue, m));
556
557 r = sd_netlink_message_get_errno(m);
558 if (r < 0)
559 return r;
560
561 r = sd_netlink_message_get_type(m, &type);
562 if (r < 0)
563 return r;
564
565 if (type == NLMSG_DONE) {
566 if (ret)
567 *ret = NULL;
568 return 0;
569 }
570
571 if (ret)
572 *ret = TAKE_PTR(m);
573 return 1;
574 }
575
576 r = socket_read_message(nl);
577 if (r < 0)
578 return r;
579 if (r > 0)
580 /* received message, so try to process straight away */
581 continue;
582
583 if (timeout != USEC_INFINITY) {
584 usec_t n;
585
586 n = now(CLOCK_MONOTONIC);
587 if (n >= timeout)
588 return -ETIMEDOUT;
589
590 left = usec_sub_unsigned(timeout, n);
591 } else
592 left = USEC_INFINITY;
593
594 r = netlink_poll(nl, true, left);
595 if (r < 0)
596 return r;
597 if (r == 0)
598 return -ETIMEDOUT;
599 }
600 }
601
602 int sd_netlink_call(
603 sd_netlink *nl,
604 sd_netlink_message *message,
605 uint64_t usec,
606 sd_netlink_message **ret) {
607
608 uint32_t serial;
609 int r;
610
611 assert_return(nl, -EINVAL);
612 assert_return(!netlink_pid_changed(nl), -ECHILD);
613 assert_return(message, -EINVAL);
614
615 r = sd_netlink_send(nl, message, &serial);
616 if (r < 0)
617 return r;
618
619 return sd_netlink_read(nl, serial, usec, ret);
620 }
621
622 int sd_netlink_get_events(sd_netlink *nl) {
623 assert_return(nl, -EINVAL);
624 assert_return(!netlink_pid_changed(nl), -ECHILD);
625
626 return ordered_set_isempty(nl->rqueue) ? POLLIN : 0;
627 }
628
629 int sd_netlink_get_timeout(sd_netlink *nl, uint64_t *timeout_usec) {
630 struct reply_callback *c;
631
632 assert_return(nl, -EINVAL);
633 assert_return(timeout_usec, -EINVAL);
634 assert_return(!netlink_pid_changed(nl), -ECHILD);
635
636 if (!ordered_set_isempty(nl->rqueue)) {
637 *timeout_usec = 0;
638 return 1;
639 }
640
641 c = prioq_peek(nl->reply_callbacks_prioq);
642 if (!c) {
643 *timeout_usec = UINT64_MAX;
644 return 0;
645 }
646
647 *timeout_usec = c->timeout;
648 return 1;
649 }
650
651 static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
652 sd_netlink *nl = ASSERT_PTR(userdata);
653 int r;
654
655 r = sd_netlink_process(nl, NULL);
656 if (r < 0)
657 return r;
658
659 return 1;
660 }
661
662 static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
663 sd_netlink *nl = ASSERT_PTR(userdata);
664 int r;
665
666 r = sd_netlink_process(nl, NULL);
667 if (r < 0)
668 return r;
669
670 return 1;
671 }
672
673 static int prepare_callback(sd_event_source *s, void *userdata) {
674 sd_netlink *nl = ASSERT_PTR(userdata);
675 int r, enabled;
676 usec_t until;
677
678 assert(s);
679
680 r = sd_netlink_get_events(nl);
681 if (r < 0)
682 return r;
683
684 r = sd_event_source_set_io_events(nl->io_event_source, r);
685 if (r < 0)
686 return r;
687
688 enabled = sd_netlink_get_timeout(nl, &until);
689 if (enabled < 0)
690 return enabled;
691 if (enabled > 0) {
692 r = sd_event_source_set_time(nl->time_event_source, until);
693 if (r < 0)
694 return r;
695 }
696
697 r = sd_event_source_set_enabled(nl->time_event_source,
698 enabled > 0 ? SD_EVENT_ONESHOT : SD_EVENT_OFF);
699 if (r < 0)
700 return r;
701
702 return 1;
703 }
704
705 int sd_netlink_attach_event(sd_netlink *nl, sd_event *event, int64_t priority) {
706 int r;
707
708 assert_return(nl, -EINVAL);
709 assert_return(!nl->event, -EBUSY);
710
711 assert(!nl->io_event_source);
712 assert(!nl->time_event_source);
713
714 if (event)
715 nl->event = sd_event_ref(event);
716 else {
717 r = sd_event_default(&nl->event);
718 if (r < 0)
719 return r;
720 }
721
722 r = sd_event_add_io(nl->event, &nl->io_event_source, nl->fd, 0, io_callback, nl);
723 if (r < 0)
724 goto fail;
725
726 r = sd_event_source_set_priority(nl->io_event_source, priority);
727 if (r < 0)
728 goto fail;
729
730 r = sd_event_source_set_description(nl->io_event_source, "netlink-receive-message");
731 if (r < 0)
732 goto fail;
733
734 r = sd_event_source_set_prepare(nl->io_event_source, prepare_callback);
735 if (r < 0)
736 goto fail;
737
738 r = sd_event_add_time(nl->event, &nl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, nl);
739 if (r < 0)
740 goto fail;
741
742 r = sd_event_source_set_priority(nl->time_event_source, priority);
743 if (r < 0)
744 goto fail;
745
746 r = sd_event_source_set_description(nl->time_event_source, "netlink-timer");
747 if (r < 0)
748 goto fail;
749
750 return 0;
751
752 fail:
753 sd_netlink_detach_event(nl);
754 return r;
755 }
756
757 int sd_netlink_detach_event(sd_netlink *nl) {
758 assert_return(nl, -EINVAL);
759 assert_return(nl->event, -ENXIO);
760
761 nl->io_event_source = sd_event_source_unref(nl->io_event_source);
762
763 nl->time_event_source = sd_event_source_unref(nl->time_event_source);
764
765 nl->event = sd_event_unref(nl->event);
766
767 return 0;
768 }
769
770 sd_event* sd_netlink_get_event(sd_netlink *nl) {
771 assert_return(nl, NULL);
772
773 return nl->event;
774 }
775
776 int netlink_add_match_internal(
777 sd_netlink *nl,
778 sd_netlink_slot **ret_slot,
779 const uint32_t *groups,
780 size_t n_groups,
781 uint16_t type,
782 uint8_t cmd,
783 sd_netlink_message_handler_t callback,
784 sd_netlink_destroy_t destroy_callback,
785 void *userdata,
786 const char *description) {
787
788 _cleanup_free_ sd_netlink_slot *slot = NULL;
789 int r;
790
791 assert(groups);
792 assert(n_groups > 0);
793
794 for (size_t i = 0; i < n_groups; i++) {
795 r = socket_broadcast_group_ref(nl, groups[i]);
796 if (r < 0)
797 return r;
798 }
799
800 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback),
801 userdata, description, &slot);
802 if (r < 0)
803 return r;
804
805 slot->match_callback.groups = newdup(uint32_t, groups, n_groups);
806 if (!slot->match_callback.groups)
807 return -ENOMEM;
808
809 slot->match_callback.n_groups = n_groups;
810 slot->match_callback.callback = callback;
811 slot->match_callback.type = type;
812 slot->match_callback.cmd = cmd;
813
814 LIST_PREPEND(match_callbacks, nl->match_callbacks, &slot->match_callback);
815
816 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
817 slot->destroy_callback = destroy_callback;
818
819 if (ret_slot)
820 *ret_slot = slot;
821
822 TAKE_PTR(slot);
823 return 0;
824 }
825
826 int sd_netlink_add_match(
827 sd_netlink *rtnl,
828 sd_netlink_slot **ret_slot,
829 uint16_t type,
830 sd_netlink_message_handler_t callback,
831 sd_netlink_destroy_t destroy_callback,
832 void *userdata,
833 const char *description) {
834
835 static const uint32_t
836 address_groups[] = { RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV6_IFADDR, },
837 link_groups[] = { RTNLGRP_LINK, },
838 neighbor_groups[] = { RTNLGRP_NEIGH, },
839 nexthop_groups[] = { RTNLGRP_NEXTHOP, },
840 route_groups[] = { RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV6_ROUTE, },
841 rule_groups[] = { RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_RULE, },
842 tc_groups[] = { RTNLGRP_TC };
843 const uint32_t *groups;
844 size_t n_groups;
845
846 assert_return(rtnl, -EINVAL);
847 assert_return(callback, -EINVAL);
848 assert_return(!netlink_pid_changed(rtnl), -ECHILD);
849
850 switch (type) {
851 case RTM_NEWLINK:
852 case RTM_DELLINK:
853 groups = link_groups;
854 n_groups = ELEMENTSOF(link_groups);
855 break;
856 case RTM_NEWADDR:
857 case RTM_DELADDR:
858 groups = address_groups;
859 n_groups = ELEMENTSOF(address_groups);
860 break;
861 case RTM_NEWNEIGH:
862 case RTM_DELNEIGH:
863 groups = neighbor_groups;
864 n_groups = ELEMENTSOF(neighbor_groups);
865 break;
866 case RTM_NEWROUTE:
867 case RTM_DELROUTE:
868 groups = route_groups;
869 n_groups = ELEMENTSOF(route_groups);
870 break;
871 case RTM_NEWRULE:
872 case RTM_DELRULE:
873 groups = rule_groups;
874 n_groups = ELEMENTSOF(rule_groups);
875 break;
876 case RTM_NEWNEXTHOP:
877 case RTM_DELNEXTHOP:
878 groups = nexthop_groups;
879 n_groups = ELEMENTSOF(nexthop_groups);
880 break;
881 case RTM_NEWQDISC:
882 case RTM_DELQDISC:
883 case RTM_NEWTCLASS:
884 case RTM_DELTCLASS:
885 groups = tc_groups;
886 n_groups = ELEMENTSOF(tc_groups);
887 break;
888 default:
889 return -EOPNOTSUPP;
890 }
891
892 return netlink_add_match_internal(rtnl, ret_slot, groups, n_groups, type, 0, callback,
893 destroy_callback, userdata, description);
894 }
895
896 int sd_netlink_attach_filter(sd_netlink *nl, size_t len, const struct sock_filter *filter) {
897 assert_return(nl, -EINVAL);
898 assert_return(len == 0 || filter, -EINVAL);
899
900 if (setsockopt(nl->fd, SOL_SOCKET,
901 len == 0 ? SO_DETACH_FILTER : SO_ATTACH_FILTER,
902 &(struct sock_fprog) {
903 .len = len,
904 .filter = (struct sock_filter*) filter,
905 }, sizeof(struct sock_fprog)) < 0)
906 return -errno;
907
908 return 0;
909 }