]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-netlink/sd-netlink.c
Merge pull request #11827 from keszybz/pkgconfig-variables
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <poll.h>
4 #include <sys/socket.h>
5
6 #include "sd-netlink.h"
7
8 #include "alloc-util.h"
9 #include "fd-util.h"
10 #include "hashmap.h"
11 #include "macro.h"
12 #include "missing.h"
13 #include "netlink-internal.h"
14 #include "netlink-slot.h"
15 #include "netlink-util.h"
16 #include "process-util.h"
17 #include "socket-util.h"
18 #include "string-util.h"
19 #include "util.h"
20
21 static int sd_netlink_new(sd_netlink **ret) {
22 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
23
24 assert_return(ret, -EINVAL);
25
26 rtnl = new(sd_netlink, 1);
27 if (!rtnl)
28 return -ENOMEM;
29
30 *rtnl = (sd_netlink) {
31 .n_ref = REFCNT_INIT,
32 .fd = -1,
33 .sockaddr.nl.nl_family = AF_NETLINK,
34 .original_pid = getpid_cached(),
35 .protocol = -1,
36
37 /* Change notification responses have sequence 0, so we must
38 * start our request sequence numbers at 1, or we may confuse our
39 * responses with notifications from the kernel */
40 .serial = 1,
41
42 };
43
44 /* We guarantee that the read buffer has at least space for
45 * a message header */
46 if (!greedy_realloc((void**)&rtnl->rbuffer, &rtnl->rbuffer_allocated,
47 sizeof(struct nlmsghdr), sizeof(uint8_t)))
48 return -ENOMEM;
49
50 *ret = TAKE_PTR(rtnl);
51
52 return 0;
53 }
54
55 int sd_netlink_new_from_netlink(sd_netlink **ret, int fd) {
56 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
57 socklen_t addrlen;
58 int r;
59
60 assert_return(ret, -EINVAL);
61
62 r = sd_netlink_new(&rtnl);
63 if (r < 0)
64 return r;
65
66 addrlen = sizeof(rtnl->sockaddr);
67
68 r = getsockname(fd, &rtnl->sockaddr.sa, &addrlen);
69 if (r < 0)
70 return -errno;
71
72 if (rtnl->sockaddr.nl.nl_family != AF_NETLINK)
73 return -EINVAL;
74
75 rtnl->fd = fd;
76
77 *ret = TAKE_PTR(rtnl);
78
79 return 0;
80 }
81
82 static bool rtnl_pid_changed(sd_netlink *rtnl) {
83 assert(rtnl);
84
85 /* We don't support people creating an rtnl connection and
86 * keeping it around over a fork(). Let's complain. */
87
88 return rtnl->original_pid != getpid_cached();
89 }
90
91 int sd_netlink_open_fd(sd_netlink **ret, int fd) {
92 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
93 int r;
94 int protocol;
95 socklen_t l;
96
97 assert_return(ret, -EINVAL);
98 assert_return(fd >= 0, -EBADF);
99
100 r = sd_netlink_new(&rtnl);
101 if (r < 0)
102 return r;
103
104 l = sizeof(protocol);
105 r = getsockopt(fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &l);
106 if (r < 0)
107 return r;
108
109 rtnl->fd = fd;
110 rtnl->protocol = protocol;
111
112 r = socket_bind(rtnl);
113 if (r < 0) {
114 rtnl->fd = -1; /* on failure, the caller remains owner of the fd, hence don't close it here */
115 rtnl->protocol = -1;
116 return r;
117 }
118
119 *ret = TAKE_PTR(rtnl);
120
121 return 0;
122 }
123
124 int netlink_open_family(sd_netlink **ret, int family) {
125 _cleanup_close_ int fd = -1;
126 int r;
127
128 fd = socket_open(family);
129 if (fd < 0)
130 return fd;
131
132 r = sd_netlink_open_fd(ret, fd);
133 if (r < 0)
134 return r;
135
136 fd = -1;
137
138 return 0;
139 }
140
141 int sd_netlink_open(sd_netlink **ret) {
142 return netlink_open_family(ret, NETLINK_ROUTE);
143 }
144
145 int sd_netlink_inc_rcvbuf(sd_netlink *rtnl, size_t size) {
146 assert_return(rtnl, -EINVAL);
147 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
148
149 return fd_inc_rcvbuf(rtnl->fd, size);
150 }
151
152 static sd_netlink *netlink_free(sd_netlink *rtnl) {
153 sd_netlink_slot *s;
154 unsigned i;
155
156 assert(rtnl);
157
158 for (i = 0; i < rtnl->rqueue_size; i++)
159 sd_netlink_message_unref(rtnl->rqueue[i]);
160 free(rtnl->rqueue);
161
162 for (i = 0; i < rtnl->rqueue_partial_size; i++)
163 sd_netlink_message_unref(rtnl->rqueue_partial[i]);
164 free(rtnl->rqueue_partial);
165
166 free(rtnl->rbuffer);
167
168 while ((s = rtnl->slots)) {
169 assert(s->floating);
170 netlink_slot_disconnect(s, true);
171 }
172 hashmap_free(rtnl->reply_callbacks);
173 prioq_free(rtnl->reply_callbacks_prioq);
174
175 sd_event_source_unref(rtnl->io_event_source);
176 sd_event_source_unref(rtnl->time_event_source);
177 sd_event_unref(rtnl->event);
178
179 hashmap_free(rtnl->broadcast_group_refs);
180
181 safe_close(rtnl->fd);
182 return mfree(rtnl);
183 }
184
185 DEFINE_ATOMIC_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
186
187 static void rtnl_seal_message(sd_netlink *rtnl, sd_netlink_message *m) {
188 assert(rtnl);
189 assert(!rtnl_pid_changed(rtnl));
190 assert(m);
191 assert(m->hdr);
192
193 /* don't use seq == 0, as that is used for broadcasts, so we
194 would get confused by replies to such messages */
195 m->hdr->nlmsg_seq = rtnl->serial++ ? : rtnl->serial++;
196
197 rtnl_message_seal(m);
198
199 return;
200 }
201
202 int sd_netlink_send(sd_netlink *nl,
203 sd_netlink_message *message,
204 uint32_t *serial) {
205 int r;
206
207 assert_return(nl, -EINVAL);
208 assert_return(!rtnl_pid_changed(nl), -ECHILD);
209 assert_return(message, -EINVAL);
210 assert_return(!message->sealed, -EPERM);
211
212 rtnl_seal_message(nl, message);
213
214 r = socket_write_message(nl, message);
215 if (r < 0)
216 return r;
217
218 if (serial)
219 *serial = rtnl_message_get_serial(message);
220
221 return 1;
222 }
223
224 int rtnl_rqueue_make_room(sd_netlink *rtnl) {
225 assert(rtnl);
226
227 if (rtnl->rqueue_size >= RTNL_RQUEUE_MAX)
228 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
229 "rtnl: exhausted the read queue size (%d)",
230 RTNL_RQUEUE_MAX);
231
232 if (!GREEDY_REALLOC(rtnl->rqueue, rtnl->rqueue_allocated, rtnl->rqueue_size + 1))
233 return -ENOMEM;
234
235 return 0;
236 }
237
238 int rtnl_rqueue_partial_make_room(sd_netlink *rtnl) {
239 assert(rtnl);
240
241 if (rtnl->rqueue_partial_size >= RTNL_RQUEUE_MAX)
242 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
243 "rtnl: exhausted the partial read queue size (%d)",
244 RTNL_RQUEUE_MAX);
245
246 if (!GREEDY_REALLOC(rtnl->rqueue_partial, rtnl->rqueue_partial_allocated,
247 rtnl->rqueue_partial_size + 1))
248 return -ENOMEM;
249
250 return 0;
251 }
252
253 static int dispatch_rqueue(sd_netlink *rtnl, sd_netlink_message **message) {
254 int r;
255
256 assert(rtnl);
257 assert(message);
258
259 if (rtnl->rqueue_size <= 0) {
260 /* Try to read a new message */
261 r = socket_read_message(rtnl);
262 if (r == -ENOBUFS) { /* FIXME: ignore buffer overruns for now */
263 log_debug_errno(r, "Got ENOBUFS from netlink socket, ignoring.");
264 return 1;
265 }
266 if (r <= 0)
267 return r;
268 }
269
270 /* Dispatch a queued message */
271 *message = rtnl->rqueue[0];
272 rtnl->rqueue_size--;
273 memmove(rtnl->rqueue, rtnl->rqueue + 1, sizeof(sd_netlink_message*) * rtnl->rqueue_size);
274
275 return 1;
276 }
277
278 static int process_timeout(sd_netlink *rtnl) {
279 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
280 struct reply_callback *c;
281 sd_netlink_slot *slot;
282 usec_t n;
283 int r;
284
285 assert(rtnl);
286
287 c = prioq_peek(rtnl->reply_callbacks_prioq);
288 if (!c)
289 return 0;
290
291 n = now(CLOCK_MONOTONIC);
292 if (c->timeout > n)
293 return 0;
294
295 r = rtnl_message_new_synthetic_error(rtnl, -ETIMEDOUT, c->serial, &m);
296 if (r < 0)
297 return r;
298
299 assert_se(prioq_pop(rtnl->reply_callbacks_prioq) == c);
300 c->timeout = 0;
301 hashmap_remove(rtnl->reply_callbacks, &c->serial);
302
303 slot = container_of(c, sd_netlink_slot, reply_callback);
304
305 r = c->callback(rtnl, m, slot->userdata);
306 if (r < 0)
307 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
308 slot->description ? "'" : "",
309 strempty(slot->description),
310 slot->description ? "' " : "");
311
312 if (slot->floating)
313 netlink_slot_disconnect(slot, true);
314
315 return 1;
316 }
317
318 static int process_reply(sd_netlink *rtnl, sd_netlink_message *m) {
319 struct reply_callback *c;
320 sd_netlink_slot *slot;
321 uint64_t serial;
322 uint16_t type;
323 int r;
324
325 assert(rtnl);
326 assert(m);
327
328 serial = rtnl_message_get_serial(m);
329 c = hashmap_remove(rtnl->reply_callbacks, &serial);
330 if (!c)
331 return 0;
332
333 if (c->timeout != 0) {
334 prioq_remove(rtnl->reply_callbacks_prioq, c, &c->prioq_idx);
335 c->timeout = 0;
336 }
337
338 r = sd_netlink_message_get_type(m, &type);
339 if (r < 0)
340 return r;
341
342 if (type == NLMSG_DONE)
343 m = NULL;
344
345 slot = container_of(c, sd_netlink_slot, reply_callback);
346
347 r = c->callback(rtnl, m, slot->userdata);
348 if (r < 0)
349 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
350 slot->description ? "'" : "",
351 strempty(slot->description),
352 slot->description ? "' " : "");
353
354 if (slot->floating)
355 netlink_slot_disconnect(slot, true);
356
357 return 1;
358 }
359
360 static int process_match(sd_netlink *rtnl, sd_netlink_message *m) {
361 struct match_callback *c;
362 sd_netlink_slot *slot;
363 uint16_t type;
364 int r;
365
366 assert(rtnl);
367 assert(m);
368
369 r = sd_netlink_message_get_type(m, &type);
370 if (r < 0)
371 return r;
372
373 LIST_FOREACH(match_callbacks, c, rtnl->match_callbacks) {
374 if (type == c->type) {
375 slot = container_of(c, sd_netlink_slot, match_callback);
376
377 r = c->callback(rtnl, m, slot->userdata);
378 if (r != 0) {
379 if (r < 0)
380 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
381 slot->description ? "'" : "",
382 strempty(slot->description),
383 slot->description ? "' " : "");
384
385 break;
386 }
387 }
388 }
389
390 return 1;
391 }
392
393 static int process_running(sd_netlink *rtnl, sd_netlink_message **ret) {
394 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
395 int r;
396
397 assert(rtnl);
398
399 r = process_timeout(rtnl);
400 if (r != 0)
401 goto null_message;
402
403 r = dispatch_rqueue(rtnl, &m);
404 if (r < 0)
405 return r;
406 if (!m)
407 goto null_message;
408
409 if (sd_netlink_message_is_broadcast(m)) {
410 r = process_match(rtnl, m);
411 if (r != 0)
412 goto null_message;
413 } else {
414 r = process_reply(rtnl, m);
415 if (r != 0)
416 goto null_message;
417 }
418
419 if (ret) {
420 *ret = TAKE_PTR(m);
421
422 return 1;
423 }
424
425 return 1;
426
427 null_message:
428 if (r >= 0 && ret)
429 *ret = NULL;
430
431 return r;
432 }
433
434 int sd_netlink_process(sd_netlink *rtnl, sd_netlink_message **ret) {
435 NETLINK_DONT_DESTROY(rtnl);
436 int r;
437
438 assert_return(rtnl, -EINVAL);
439 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
440 assert_return(!rtnl->processing, -EBUSY);
441
442 rtnl->processing = true;
443 r = process_running(rtnl, ret);
444 rtnl->processing = false;
445
446 return r;
447 }
448
449 static usec_t calc_elapse(uint64_t usec) {
450 if (usec == (uint64_t) -1)
451 return 0;
452
453 if (usec == 0)
454 usec = RTNL_DEFAULT_TIMEOUT;
455
456 return now(CLOCK_MONOTONIC) + usec;
457 }
458
459 static int rtnl_poll(sd_netlink *rtnl, bool need_more, uint64_t timeout_usec) {
460 struct pollfd p[1] = {};
461 struct timespec ts;
462 usec_t m = USEC_INFINITY;
463 int r, e;
464
465 assert(rtnl);
466
467 e = sd_netlink_get_events(rtnl);
468 if (e < 0)
469 return e;
470
471 if (need_more)
472 /* Caller wants more data, and doesn't care about
473 * what's been read or any other timeouts. */
474 e |= POLLIN;
475 else {
476 usec_t until;
477 /* Caller wants to process if there is something to
478 * process, but doesn't care otherwise */
479
480 r = sd_netlink_get_timeout(rtnl, &until);
481 if (r < 0)
482 return r;
483 if (r > 0) {
484 usec_t nw;
485 nw = now(CLOCK_MONOTONIC);
486 m = until > nw ? until - nw : 0;
487 }
488 }
489
490 if (timeout_usec != (uint64_t) -1 && (m == (uint64_t) -1 || timeout_usec < m))
491 m = timeout_usec;
492
493 p[0].fd = rtnl->fd;
494 p[0].events = e;
495
496 r = ppoll(p, 1, m == (uint64_t) -1 ? NULL : timespec_store(&ts, m), NULL);
497 if (r < 0)
498 return -errno;
499
500 return r > 0 ? 1 : 0;
501 }
502
503 int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
504 assert_return(nl, -EINVAL);
505 assert_return(!rtnl_pid_changed(nl), -ECHILD);
506
507 if (nl->rqueue_size > 0)
508 return 0;
509
510 return rtnl_poll(nl, false, timeout_usec);
511 }
512
513 static int timeout_compare(const void *a, const void *b) {
514 const struct reply_callback *x = a, *y = b;
515
516 if (x->timeout != 0 && y->timeout == 0)
517 return -1;
518
519 if (x->timeout == 0 && y->timeout != 0)
520 return 1;
521
522 return CMP(x->timeout, y->timeout);
523 }
524
525 int sd_netlink_call_async(
526 sd_netlink *nl,
527 sd_netlink_slot **ret_slot,
528 sd_netlink_message *m,
529 sd_netlink_message_handler_t callback,
530 sd_netlink_destroy_t destroy_callback,
531 void *userdata,
532 uint64_t usec,
533 const char *description) {
534 _cleanup_free_ sd_netlink_slot *slot = NULL;
535 uint32_t s;
536 int r, k;
537
538 assert_return(nl, -EINVAL);
539 assert_return(m, -EINVAL);
540 assert_return(callback, -EINVAL);
541 assert_return(!rtnl_pid_changed(nl), -ECHILD);
542
543 r = hashmap_ensure_allocated(&nl->reply_callbacks, &uint64_hash_ops);
544 if (r < 0)
545 return r;
546
547 if (usec != (uint64_t) -1) {
548 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
549 if (r < 0)
550 return r;
551 }
552
553 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
554 if (r < 0)
555 return r;
556
557 slot->reply_callback.callback = callback;
558 slot->reply_callback.timeout = calc_elapse(usec);
559
560 k = sd_netlink_send(nl, m, &s);
561 if (k < 0)
562 return k;
563
564 slot->reply_callback.serial = s;
565
566 r = hashmap_put(nl->reply_callbacks, &slot->reply_callback.serial, &slot->reply_callback);
567 if (r < 0)
568 return r;
569
570 if (slot->reply_callback.timeout != 0) {
571 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
572 if (r < 0) {
573 (void) hashmap_remove(nl->reply_callbacks, &slot->reply_callback.serial);
574 return r;
575 }
576 }
577
578 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
579 slot->destroy_callback = destroy_callback;
580
581 if (ret_slot)
582 *ret_slot = slot;
583
584 TAKE_PTR(slot);
585
586 return k;
587 }
588
589 int sd_netlink_call(sd_netlink *rtnl,
590 sd_netlink_message *message,
591 uint64_t usec,
592 sd_netlink_message **ret) {
593 usec_t timeout;
594 uint32_t serial;
595 int r;
596
597 assert_return(rtnl, -EINVAL);
598 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
599 assert_return(message, -EINVAL);
600
601 r = sd_netlink_send(rtnl, message, &serial);
602 if (r < 0)
603 return r;
604
605 timeout = calc_elapse(usec);
606
607 for (;;) {
608 usec_t left;
609 unsigned i;
610
611 for (i = 0; i < rtnl->rqueue_size; i++) {
612 uint32_t received_serial;
613
614 received_serial = rtnl_message_get_serial(rtnl->rqueue[i]);
615
616 if (received_serial == serial) {
617 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *incoming = NULL;
618 uint16_t type;
619
620 incoming = rtnl->rqueue[i];
621
622 /* found a match, remove from rqueue and return it */
623 memmove(rtnl->rqueue + i,rtnl->rqueue + i + 1,
624 sizeof(sd_netlink_message*) * (rtnl->rqueue_size - i - 1));
625 rtnl->rqueue_size--;
626
627 r = sd_netlink_message_get_errno(incoming);
628 if (r < 0)
629 return r;
630
631 r = sd_netlink_message_get_type(incoming, &type);
632 if (r < 0)
633 return r;
634
635 if (type == NLMSG_DONE) {
636 *ret = NULL;
637 return 0;
638 }
639
640 if (ret)
641 *ret = TAKE_PTR(incoming);
642
643 return 1;
644 }
645 }
646
647 r = socket_read_message(rtnl);
648 if (r < 0)
649 return r;
650 if (r > 0)
651 /* received message, so try to process straight away */
652 continue;
653
654 if (timeout > 0) {
655 usec_t n;
656
657 n = now(CLOCK_MONOTONIC);
658 if (n >= timeout)
659 return -ETIMEDOUT;
660
661 left = timeout - n;
662 } else
663 left = (uint64_t) -1;
664
665 r = rtnl_poll(rtnl, true, left);
666 if (r < 0)
667 return r;
668 else if (r == 0)
669 return -ETIMEDOUT;
670 }
671 }
672
673 int sd_netlink_get_events(sd_netlink *rtnl) {
674 assert_return(rtnl, -EINVAL);
675 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
676
677 if (rtnl->rqueue_size == 0)
678 return POLLIN;
679 else
680 return 0;
681 }
682
683 int sd_netlink_get_timeout(sd_netlink *rtnl, uint64_t *timeout_usec) {
684 struct reply_callback *c;
685
686 assert_return(rtnl, -EINVAL);
687 assert_return(timeout_usec, -EINVAL);
688 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
689
690 if (rtnl->rqueue_size > 0) {
691 *timeout_usec = 0;
692 return 1;
693 }
694
695 c = prioq_peek(rtnl->reply_callbacks_prioq);
696 if (!c) {
697 *timeout_usec = (uint64_t) -1;
698 return 0;
699 }
700
701 *timeout_usec = c->timeout;
702
703 return 1;
704 }
705
706 static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
707 sd_netlink *rtnl = userdata;
708 int r;
709
710 assert(rtnl);
711
712 r = sd_netlink_process(rtnl, NULL);
713 if (r < 0)
714 return r;
715
716 return 1;
717 }
718
719 static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
720 sd_netlink *rtnl = userdata;
721 int r;
722
723 assert(rtnl);
724
725 r = sd_netlink_process(rtnl, NULL);
726 if (r < 0)
727 return r;
728
729 return 1;
730 }
731
732 static int prepare_callback(sd_event_source *s, void *userdata) {
733 sd_netlink *rtnl = userdata;
734 int r, e;
735 usec_t until;
736
737 assert(s);
738 assert(rtnl);
739
740 e = sd_netlink_get_events(rtnl);
741 if (e < 0)
742 return e;
743
744 r = sd_event_source_set_io_events(rtnl->io_event_source, e);
745 if (r < 0)
746 return r;
747
748 r = sd_netlink_get_timeout(rtnl, &until);
749 if (r < 0)
750 return r;
751 if (r > 0) {
752 int j;
753
754 j = sd_event_source_set_time(rtnl->time_event_source, until);
755 if (j < 0)
756 return j;
757 }
758
759 r = sd_event_source_set_enabled(rtnl->time_event_source, r > 0);
760 if (r < 0)
761 return r;
762
763 return 1;
764 }
765
766 int sd_netlink_attach_event(sd_netlink *rtnl, sd_event *event, int64_t priority) {
767 int r;
768
769 assert_return(rtnl, -EINVAL);
770 assert_return(!rtnl->event, -EBUSY);
771
772 assert(!rtnl->io_event_source);
773 assert(!rtnl->time_event_source);
774
775 if (event)
776 rtnl->event = sd_event_ref(event);
777 else {
778 r = sd_event_default(&rtnl->event);
779 if (r < 0)
780 return r;
781 }
782
783 r = sd_event_add_io(rtnl->event, &rtnl->io_event_source, rtnl->fd, 0, io_callback, rtnl);
784 if (r < 0)
785 goto fail;
786
787 r = sd_event_source_set_priority(rtnl->io_event_source, priority);
788 if (r < 0)
789 goto fail;
790
791 r = sd_event_source_set_description(rtnl->io_event_source, "rtnl-receive-message");
792 if (r < 0)
793 goto fail;
794
795 r = sd_event_source_set_prepare(rtnl->io_event_source, prepare_callback);
796 if (r < 0)
797 goto fail;
798
799 r = sd_event_add_time(rtnl->event, &rtnl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, rtnl);
800 if (r < 0)
801 goto fail;
802
803 r = sd_event_source_set_priority(rtnl->time_event_source, priority);
804 if (r < 0)
805 goto fail;
806
807 r = sd_event_source_set_description(rtnl->time_event_source, "rtnl-timer");
808 if (r < 0)
809 goto fail;
810
811 return 0;
812
813 fail:
814 sd_netlink_detach_event(rtnl);
815 return r;
816 }
817
818 int sd_netlink_detach_event(sd_netlink *rtnl) {
819 assert_return(rtnl, -EINVAL);
820 assert_return(rtnl->event, -ENXIO);
821
822 rtnl->io_event_source = sd_event_source_unref(rtnl->io_event_source);
823
824 rtnl->time_event_source = sd_event_source_unref(rtnl->time_event_source);
825
826 rtnl->event = sd_event_unref(rtnl->event);
827
828 return 0;
829 }
830
831 int sd_netlink_add_match(
832 sd_netlink *rtnl,
833 sd_netlink_slot **ret_slot,
834 uint16_t type,
835 sd_netlink_message_handler_t callback,
836 sd_netlink_destroy_t destroy_callback,
837 void *userdata,
838 const char *description) {
839 _cleanup_free_ sd_netlink_slot *slot = NULL;
840 int r;
841
842 assert_return(rtnl, -EINVAL);
843 assert_return(callback, -EINVAL);
844 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
845
846 r = netlink_slot_allocate(rtnl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback), userdata, description, &slot);
847 if (r < 0)
848 return r;
849
850 slot->match_callback.callback = callback;
851 slot->match_callback.type = type;
852
853 switch (type) {
854 case RTM_NEWLINK:
855 case RTM_DELLINK:
856 r = socket_broadcast_group_ref(rtnl, RTNLGRP_LINK);
857 if (r < 0)
858 return r;
859
860 break;
861 case RTM_NEWADDR:
862 case RTM_DELADDR:
863 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV4_IFADDR);
864 if (r < 0)
865 return r;
866
867 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV6_IFADDR);
868 if (r < 0)
869 return r;
870
871 break;
872 case RTM_NEWROUTE:
873 case RTM_DELROUTE:
874 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV4_ROUTE);
875 if (r < 0)
876 return r;
877
878 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV6_ROUTE);
879 if (r < 0)
880 return r;
881 break;
882 case RTM_NEWRULE:
883 case RTM_DELRULE:
884 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV4_RULE);
885 if (r < 0)
886 return r;
887
888 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV6_RULE);
889 if (r < 0)
890 return r;
891 break;
892 default:
893 return -EOPNOTSUPP;
894 }
895
896 LIST_PREPEND(match_callbacks, rtnl->match_callbacks, &slot->match_callback);
897
898 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
899 slot->destroy_callback = destroy_callback;
900
901 if (ret_slot)
902 *ret_slot = slot;
903
904 TAKE_PTR(slot);
905
906 return 0;
907 }