]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-netlink/sd-netlink.c
tree-wide: drop socket.h when socket-util.h is included
[thirdparty/systemd.git] / src / libsystemd / sd-netlink / sd-netlink.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <poll.h>
4
5 #include "sd-netlink.h"
6
7 #include "alloc-util.h"
8 #include "fd-util.h"
9 #include "hashmap.h"
10 #include "macro.h"
11 #include "netlink-internal.h"
12 #include "netlink-slot.h"
13 #include "netlink-util.h"
14 #include "process-util.h"
15 #include "socket-util.h"
16 #include "string-util.h"
17 #include "util.h"
18
19 static int sd_netlink_new(sd_netlink **ret) {
20 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
21
22 assert_return(ret, -EINVAL);
23
24 rtnl = new(sd_netlink, 1);
25 if (!rtnl)
26 return -ENOMEM;
27
28 *rtnl = (sd_netlink) {
29 .n_ref = 1,
30 .fd = -1,
31 .sockaddr.nl.nl_family = AF_NETLINK,
32 .original_pid = getpid_cached(),
33 .protocol = -1,
34
35 /* Change notification responses have sequence 0, so we must
36 * start our request sequence numbers at 1, or we may confuse our
37 * responses with notifications from the kernel */
38 .serial = 1,
39
40 };
41
42 /* We guarantee that the read buffer has at least space for
43 * a message header */
44 if (!greedy_realloc((void**)&rtnl->rbuffer, &rtnl->rbuffer_allocated,
45 sizeof(struct nlmsghdr), sizeof(uint8_t)))
46 return -ENOMEM;
47
48 *ret = TAKE_PTR(rtnl);
49
50 return 0;
51 }
52
53 int sd_netlink_new_from_netlink(sd_netlink **ret, int fd) {
54 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
55 socklen_t addrlen;
56 int r;
57
58 assert_return(ret, -EINVAL);
59
60 r = sd_netlink_new(&rtnl);
61 if (r < 0)
62 return r;
63
64 addrlen = sizeof(rtnl->sockaddr);
65
66 r = getsockname(fd, &rtnl->sockaddr.sa, &addrlen);
67 if (r < 0)
68 return -errno;
69
70 if (rtnl->sockaddr.nl.nl_family != AF_NETLINK)
71 return -EINVAL;
72
73 rtnl->fd = fd;
74
75 *ret = TAKE_PTR(rtnl);
76
77 return 0;
78 }
79
80 static bool rtnl_pid_changed(const sd_netlink *rtnl) {
81 assert(rtnl);
82
83 /* We don't support people creating an rtnl connection and
84 * keeping it around over a fork(). Let's complain. */
85
86 return rtnl->original_pid != getpid_cached();
87 }
88
89 int sd_netlink_open_fd(sd_netlink **ret, int fd) {
90 _cleanup_(sd_netlink_unrefp) sd_netlink *rtnl = NULL;
91 int r;
92 int protocol;
93 socklen_t l;
94
95 assert_return(ret, -EINVAL);
96 assert_return(fd >= 0, -EBADF);
97
98 r = sd_netlink_new(&rtnl);
99 if (r < 0)
100 return r;
101
102 l = sizeof(protocol);
103 r = getsockopt(fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &l);
104 if (r < 0)
105 return r;
106
107 rtnl->fd = fd;
108 rtnl->protocol = protocol;
109
110 r = socket_bind(rtnl);
111 if (r < 0) {
112 rtnl->fd = -1; /* on failure, the caller remains owner of the fd, hence don't close it here */
113 rtnl->protocol = -1;
114 return r;
115 }
116
117 *ret = TAKE_PTR(rtnl);
118
119 return 0;
120 }
121
122 int netlink_open_family(sd_netlink **ret, int family) {
123 _cleanup_close_ int fd = -1;
124 int r;
125
126 fd = socket_open(family);
127 if (fd < 0)
128 return fd;
129
130 r = sd_netlink_open_fd(ret, fd);
131 if (r < 0)
132 return r;
133
134 fd = -1;
135
136 return 0;
137 }
138
139 int sd_netlink_open(sd_netlink **ret) {
140 return netlink_open_family(ret, NETLINK_ROUTE);
141 }
142
143 int sd_netlink_inc_rcvbuf(sd_netlink *rtnl, size_t size) {
144 assert_return(rtnl, -EINVAL);
145 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
146
147 return fd_inc_rcvbuf(rtnl->fd, size);
148 }
149
150 static sd_netlink *netlink_free(sd_netlink *rtnl) {
151 sd_netlink_slot *s;
152 unsigned i;
153
154 assert(rtnl);
155
156 for (i = 0; i < rtnl->rqueue_size; i++)
157 sd_netlink_message_unref(rtnl->rqueue[i]);
158 free(rtnl->rqueue);
159
160 for (i = 0; i < rtnl->rqueue_partial_size; i++)
161 sd_netlink_message_unref(rtnl->rqueue_partial[i]);
162 free(rtnl->rqueue_partial);
163
164 free(rtnl->rbuffer);
165
166 while ((s = rtnl->slots)) {
167 assert(s->floating);
168 netlink_slot_disconnect(s, true);
169 }
170 hashmap_free(rtnl->reply_callbacks);
171 prioq_free(rtnl->reply_callbacks_prioq);
172
173 sd_event_source_unref(rtnl->io_event_source);
174 sd_event_source_unref(rtnl->time_event_source);
175 sd_event_unref(rtnl->event);
176
177 hashmap_free(rtnl->broadcast_group_refs);
178
179 hashmap_free(rtnl->genl_family_to_nlmsg_type);
180 hashmap_free(rtnl->nlmsg_type_to_genl_family);
181
182 safe_close(rtnl->fd);
183 return mfree(rtnl);
184 }
185
186 DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink, sd_netlink, netlink_free);
187
188 static void rtnl_seal_message(sd_netlink *rtnl, sd_netlink_message *m) {
189 assert(rtnl);
190 assert(!rtnl_pid_changed(rtnl));
191 assert(m);
192 assert(m->hdr);
193
194 /* don't use seq == 0, as that is used for broadcasts, so we
195 would get confused by replies to such messages */
196 m->hdr->nlmsg_seq = rtnl->serial++ ? : rtnl->serial++;
197
198 rtnl_message_seal(m);
199
200 return;
201 }
202
203 int sd_netlink_send(sd_netlink *nl,
204 sd_netlink_message *message,
205 uint32_t *serial) {
206 int r;
207
208 assert_return(nl, -EINVAL);
209 assert_return(!rtnl_pid_changed(nl), -ECHILD);
210 assert_return(message, -EINVAL);
211 assert_return(!message->sealed, -EPERM);
212
213 rtnl_seal_message(nl, message);
214
215 r = socket_write_message(nl, message);
216 if (r < 0)
217 return r;
218
219 if (serial)
220 *serial = rtnl_message_get_serial(message);
221
222 return 1;
223 }
224
225 int rtnl_rqueue_make_room(sd_netlink *rtnl) {
226 assert(rtnl);
227
228 if (rtnl->rqueue_size >= RTNL_RQUEUE_MAX)
229 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
230 "rtnl: exhausted the read queue size (%d)",
231 RTNL_RQUEUE_MAX);
232
233 if (!GREEDY_REALLOC(rtnl->rqueue, rtnl->rqueue_allocated, rtnl->rqueue_size + 1))
234 return -ENOMEM;
235
236 return 0;
237 }
238
239 int rtnl_rqueue_partial_make_room(sd_netlink *rtnl) {
240 assert(rtnl);
241
242 if (rtnl->rqueue_partial_size >= RTNL_RQUEUE_MAX)
243 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS),
244 "rtnl: exhausted the partial read queue size (%d)",
245 RTNL_RQUEUE_MAX);
246
247 if (!GREEDY_REALLOC(rtnl->rqueue_partial, rtnl->rqueue_partial_allocated,
248 rtnl->rqueue_partial_size + 1))
249 return -ENOMEM;
250
251 return 0;
252 }
253
254 static int dispatch_rqueue(sd_netlink *rtnl, sd_netlink_message **message) {
255 int r;
256
257 assert(rtnl);
258 assert(message);
259
260 if (rtnl->rqueue_size <= 0) {
261 /* Try to read a new message */
262 r = socket_read_message(rtnl);
263 if (r == -ENOBUFS) { /* FIXME: ignore buffer overruns for now */
264 log_debug_errno(r, "Got ENOBUFS from netlink socket, ignoring.");
265 return 1;
266 }
267 if (r <= 0)
268 return r;
269 }
270
271 /* Dispatch a queued message */
272 *message = rtnl->rqueue[0];
273 rtnl->rqueue_size--;
274 memmove(rtnl->rqueue, rtnl->rqueue + 1, sizeof(sd_netlink_message*) * rtnl->rqueue_size);
275
276 return 1;
277 }
278
279 static int process_timeout(sd_netlink *rtnl) {
280 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
281 struct reply_callback *c;
282 sd_netlink_slot *slot;
283 usec_t n;
284 int r;
285
286 assert(rtnl);
287
288 c = prioq_peek(rtnl->reply_callbacks_prioq);
289 if (!c)
290 return 0;
291
292 n = now(CLOCK_MONOTONIC);
293 if (c->timeout > n)
294 return 0;
295
296 r = rtnl_message_new_synthetic_error(rtnl, -ETIMEDOUT, c->serial, &m);
297 if (r < 0)
298 return r;
299
300 assert_se(prioq_pop(rtnl->reply_callbacks_prioq) == c);
301 c->timeout = 0;
302 hashmap_remove(rtnl->reply_callbacks, &c->serial);
303
304 slot = container_of(c, sd_netlink_slot, reply_callback);
305
306 r = c->callback(rtnl, m, slot->userdata);
307 if (r < 0)
308 log_debug_errno(r, "sd-netlink: timedout callback %s%s%sfailed: %m",
309 slot->description ? "'" : "",
310 strempty(slot->description),
311 slot->description ? "' " : "");
312
313 if (slot->floating)
314 netlink_slot_disconnect(slot, true);
315
316 return 1;
317 }
318
319 static int process_reply(sd_netlink *rtnl, sd_netlink_message *m) {
320 struct reply_callback *c;
321 sd_netlink_slot *slot;
322 uint64_t serial;
323 uint16_t type;
324 int r;
325
326 assert(rtnl);
327 assert(m);
328
329 serial = rtnl_message_get_serial(m);
330 c = hashmap_remove(rtnl->reply_callbacks, &serial);
331 if (!c)
332 return 0;
333
334 if (c->timeout != 0) {
335 prioq_remove(rtnl->reply_callbacks_prioq, c, &c->prioq_idx);
336 c->timeout = 0;
337 }
338
339 r = sd_netlink_message_get_type(m, &type);
340 if (r < 0)
341 return r;
342
343 if (type == NLMSG_DONE)
344 m = NULL;
345
346 slot = container_of(c, sd_netlink_slot, reply_callback);
347
348 r = c->callback(rtnl, m, slot->userdata);
349 if (r < 0)
350 log_debug_errno(r, "sd-netlink: reply callback %s%s%sfailed: %m",
351 slot->description ? "'" : "",
352 strempty(slot->description),
353 slot->description ? "' " : "");
354
355 if (slot->floating)
356 netlink_slot_disconnect(slot, true);
357
358 return 1;
359 }
360
361 static int process_match(sd_netlink *rtnl, sd_netlink_message *m) {
362 struct match_callback *c;
363 sd_netlink_slot *slot;
364 uint16_t type;
365 int r;
366
367 assert(rtnl);
368 assert(m);
369
370 r = sd_netlink_message_get_type(m, &type);
371 if (r < 0)
372 return r;
373
374 LIST_FOREACH(match_callbacks, c, rtnl->match_callbacks) {
375 if (type == c->type) {
376 slot = container_of(c, sd_netlink_slot, match_callback);
377
378 r = c->callback(rtnl, m, slot->userdata);
379 if (r != 0) {
380 if (r < 0)
381 log_debug_errno(r, "sd-netlink: match callback %s%s%sfailed: %m",
382 slot->description ? "'" : "",
383 strempty(slot->description),
384 slot->description ? "' " : "");
385
386 break;
387 }
388 }
389 }
390
391 return 1;
392 }
393
394 static int process_running(sd_netlink *rtnl, sd_netlink_message **ret) {
395 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *m = NULL;
396 int r;
397
398 assert(rtnl);
399
400 r = process_timeout(rtnl);
401 if (r != 0)
402 goto null_message;
403
404 r = dispatch_rqueue(rtnl, &m);
405 if (r < 0)
406 return r;
407 if (!m)
408 goto null_message;
409
410 if (sd_netlink_message_is_broadcast(m)) {
411 r = process_match(rtnl, m);
412 if (r != 0)
413 goto null_message;
414 } else {
415 r = process_reply(rtnl, m);
416 if (r != 0)
417 goto null_message;
418 }
419
420 if (ret) {
421 *ret = TAKE_PTR(m);
422
423 return 1;
424 }
425
426 return 1;
427
428 null_message:
429 if (r >= 0 && ret)
430 *ret = NULL;
431
432 return r;
433 }
434
435 int sd_netlink_process(sd_netlink *rtnl, sd_netlink_message **ret) {
436 NETLINK_DONT_DESTROY(rtnl);
437 int r;
438
439 assert_return(rtnl, -EINVAL);
440 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
441 assert_return(!rtnl->processing, -EBUSY);
442
443 rtnl->processing = true;
444 r = process_running(rtnl, ret);
445 rtnl->processing = false;
446
447 return r;
448 }
449
450 static usec_t calc_elapse(uint64_t usec) {
451 if (usec == (uint64_t) -1)
452 return 0;
453
454 if (usec == 0)
455 usec = RTNL_DEFAULT_TIMEOUT;
456
457 return now(CLOCK_MONOTONIC) + usec;
458 }
459
460 static int rtnl_poll(sd_netlink *rtnl, bool need_more, uint64_t timeout_usec) {
461 struct pollfd p[1] = {};
462 struct timespec ts;
463 usec_t m = USEC_INFINITY;
464 int r, e;
465
466 assert(rtnl);
467
468 e = sd_netlink_get_events(rtnl);
469 if (e < 0)
470 return e;
471
472 if (need_more)
473 /* Caller wants more data, and doesn't care about
474 * what's been read or any other timeouts. */
475 e |= POLLIN;
476 else {
477 usec_t until;
478 /* Caller wants to process if there is something to
479 * process, but doesn't care otherwise */
480
481 r = sd_netlink_get_timeout(rtnl, &until);
482 if (r < 0)
483 return r;
484 if (r > 0) {
485 usec_t nw;
486 nw = now(CLOCK_MONOTONIC);
487 m = until > nw ? until - nw : 0;
488 }
489 }
490
491 if (timeout_usec != (uint64_t) -1 && (m == (uint64_t) -1 || timeout_usec < m))
492 m = timeout_usec;
493
494 p[0].fd = rtnl->fd;
495 p[0].events = e;
496
497 r = ppoll(p, 1, m == (uint64_t) -1 ? NULL : timespec_store(&ts, m), NULL);
498 if (r < 0)
499 return -errno;
500
501 return r > 0 ? 1 : 0;
502 }
503
504 int sd_netlink_wait(sd_netlink *nl, uint64_t timeout_usec) {
505 assert_return(nl, -EINVAL);
506 assert_return(!rtnl_pid_changed(nl), -ECHILD);
507
508 if (nl->rqueue_size > 0)
509 return 0;
510
511 return rtnl_poll(nl, false, timeout_usec);
512 }
513
514 static int timeout_compare(const void *a, const void *b) {
515 const struct reply_callback *x = a, *y = b;
516
517 if (x->timeout != 0 && y->timeout == 0)
518 return -1;
519
520 if (x->timeout == 0 && y->timeout != 0)
521 return 1;
522
523 return CMP(x->timeout, y->timeout);
524 }
525
526 int sd_netlink_call_async(
527 sd_netlink *nl,
528 sd_netlink_slot **ret_slot,
529 sd_netlink_message *m,
530 sd_netlink_message_handler_t callback,
531 sd_netlink_destroy_t destroy_callback,
532 void *userdata,
533 uint64_t usec,
534 const char *description) {
535 _cleanup_free_ sd_netlink_slot *slot = NULL;
536 uint32_t s;
537 int r, k;
538
539 assert_return(nl, -EINVAL);
540 assert_return(m, -EINVAL);
541 assert_return(callback, -EINVAL);
542 assert_return(!rtnl_pid_changed(nl), -ECHILD);
543
544 r = hashmap_ensure_allocated(&nl->reply_callbacks, &uint64_hash_ops);
545 if (r < 0)
546 return r;
547
548 if (usec != (uint64_t) -1) {
549 r = prioq_ensure_allocated(&nl->reply_callbacks_prioq, timeout_compare);
550 if (r < 0)
551 return r;
552 }
553
554 r = netlink_slot_allocate(nl, !ret_slot, NETLINK_REPLY_CALLBACK, sizeof(struct reply_callback), userdata, description, &slot);
555 if (r < 0)
556 return r;
557
558 slot->reply_callback.callback = callback;
559 slot->reply_callback.timeout = calc_elapse(usec);
560
561 k = sd_netlink_send(nl, m, &s);
562 if (k < 0)
563 return k;
564
565 slot->reply_callback.serial = s;
566
567 r = hashmap_put(nl->reply_callbacks, &slot->reply_callback.serial, &slot->reply_callback);
568 if (r < 0)
569 return r;
570
571 if (slot->reply_callback.timeout != 0) {
572 r = prioq_put(nl->reply_callbacks_prioq, &slot->reply_callback, &slot->reply_callback.prioq_idx);
573 if (r < 0) {
574 (void) hashmap_remove(nl->reply_callbacks, &slot->reply_callback.serial);
575 return r;
576 }
577 }
578
579 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
580 slot->destroy_callback = destroy_callback;
581
582 if (ret_slot)
583 *ret_slot = slot;
584
585 TAKE_PTR(slot);
586
587 return k;
588 }
589
590 int sd_netlink_call(sd_netlink *rtnl,
591 sd_netlink_message *message,
592 uint64_t usec,
593 sd_netlink_message **ret) {
594 usec_t timeout;
595 uint32_t serial;
596 int r;
597
598 assert_return(rtnl, -EINVAL);
599 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
600 assert_return(message, -EINVAL);
601
602 r = sd_netlink_send(rtnl, message, &serial);
603 if (r < 0)
604 return r;
605
606 timeout = calc_elapse(usec);
607
608 for (;;) {
609 usec_t left;
610 unsigned i;
611
612 for (i = 0; i < rtnl->rqueue_size; i++) {
613 uint32_t received_serial;
614
615 received_serial = rtnl_message_get_serial(rtnl->rqueue[i]);
616
617 if (received_serial == serial) {
618 _cleanup_(sd_netlink_message_unrefp) sd_netlink_message *incoming = NULL;
619 uint16_t type;
620
621 incoming = rtnl->rqueue[i];
622
623 /* found a match, remove from rqueue and return it */
624 memmove(rtnl->rqueue + i,rtnl->rqueue + i + 1,
625 sizeof(sd_netlink_message*) * (rtnl->rqueue_size - i - 1));
626 rtnl->rqueue_size--;
627
628 r = sd_netlink_message_get_errno(incoming);
629 if (r < 0)
630 return r;
631
632 r = sd_netlink_message_get_type(incoming, &type);
633 if (r < 0)
634 return r;
635
636 if (type == NLMSG_DONE) {
637 *ret = NULL;
638 return 0;
639 }
640
641 if (ret)
642 *ret = TAKE_PTR(incoming);
643
644 return 1;
645 }
646 }
647
648 r = socket_read_message(rtnl);
649 if (r < 0)
650 return r;
651 if (r > 0)
652 /* received message, so try to process straight away */
653 continue;
654
655 if (timeout > 0) {
656 usec_t n;
657
658 n = now(CLOCK_MONOTONIC);
659 if (n >= timeout)
660 return -ETIMEDOUT;
661
662 left = timeout - n;
663 } else
664 left = (uint64_t) -1;
665
666 r = rtnl_poll(rtnl, true, left);
667 if (r < 0)
668 return r;
669 else if (r == 0)
670 return -ETIMEDOUT;
671 }
672 }
673
674 int sd_netlink_get_events(const sd_netlink *rtnl) {
675 assert_return(rtnl, -EINVAL);
676 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
677
678 if (rtnl->rqueue_size == 0)
679 return POLLIN;
680 else
681 return 0;
682 }
683
684 int sd_netlink_get_timeout(const sd_netlink *rtnl, uint64_t *timeout_usec) {
685 struct reply_callback *c;
686
687 assert_return(rtnl, -EINVAL);
688 assert_return(timeout_usec, -EINVAL);
689 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
690
691 if (rtnl->rqueue_size > 0) {
692 *timeout_usec = 0;
693 return 1;
694 }
695
696 c = prioq_peek(rtnl->reply_callbacks_prioq);
697 if (!c) {
698 *timeout_usec = (uint64_t) -1;
699 return 0;
700 }
701
702 *timeout_usec = c->timeout;
703
704 return 1;
705 }
706
707 static int io_callback(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
708 sd_netlink *rtnl = userdata;
709 int r;
710
711 assert(rtnl);
712
713 r = sd_netlink_process(rtnl, NULL);
714 if (r < 0)
715 return r;
716
717 return 1;
718 }
719
720 static int time_callback(sd_event_source *s, uint64_t usec, void *userdata) {
721 sd_netlink *rtnl = userdata;
722 int r;
723
724 assert(rtnl);
725
726 r = sd_netlink_process(rtnl, NULL);
727 if (r < 0)
728 return r;
729
730 return 1;
731 }
732
733 static int prepare_callback(sd_event_source *s, void *userdata) {
734 sd_netlink *rtnl = userdata;
735 int r, e;
736 usec_t until;
737
738 assert(s);
739 assert(rtnl);
740
741 e = sd_netlink_get_events(rtnl);
742 if (e < 0)
743 return e;
744
745 r = sd_event_source_set_io_events(rtnl->io_event_source, e);
746 if (r < 0)
747 return r;
748
749 r = sd_netlink_get_timeout(rtnl, &until);
750 if (r < 0)
751 return r;
752 if (r > 0) {
753 int j;
754
755 j = sd_event_source_set_time(rtnl->time_event_source, until);
756 if (j < 0)
757 return j;
758 }
759
760 r = sd_event_source_set_enabled(rtnl->time_event_source, r > 0);
761 if (r < 0)
762 return r;
763
764 return 1;
765 }
766
767 int sd_netlink_attach_event(sd_netlink *rtnl, sd_event *event, int64_t priority) {
768 int r;
769
770 assert_return(rtnl, -EINVAL);
771 assert_return(!rtnl->event, -EBUSY);
772
773 assert(!rtnl->io_event_source);
774 assert(!rtnl->time_event_source);
775
776 if (event)
777 rtnl->event = sd_event_ref(event);
778 else {
779 r = sd_event_default(&rtnl->event);
780 if (r < 0)
781 return r;
782 }
783
784 r = sd_event_add_io(rtnl->event, &rtnl->io_event_source, rtnl->fd, 0, io_callback, rtnl);
785 if (r < 0)
786 goto fail;
787
788 r = sd_event_source_set_priority(rtnl->io_event_source, priority);
789 if (r < 0)
790 goto fail;
791
792 r = sd_event_source_set_description(rtnl->io_event_source, "rtnl-receive-message");
793 if (r < 0)
794 goto fail;
795
796 r = sd_event_source_set_prepare(rtnl->io_event_source, prepare_callback);
797 if (r < 0)
798 goto fail;
799
800 r = sd_event_add_time(rtnl->event, &rtnl->time_event_source, CLOCK_MONOTONIC, 0, 0, time_callback, rtnl);
801 if (r < 0)
802 goto fail;
803
804 r = sd_event_source_set_priority(rtnl->time_event_source, priority);
805 if (r < 0)
806 goto fail;
807
808 r = sd_event_source_set_description(rtnl->time_event_source, "rtnl-timer");
809 if (r < 0)
810 goto fail;
811
812 return 0;
813
814 fail:
815 sd_netlink_detach_event(rtnl);
816 return r;
817 }
818
819 int sd_netlink_detach_event(sd_netlink *rtnl) {
820 assert_return(rtnl, -EINVAL);
821 assert_return(rtnl->event, -ENXIO);
822
823 rtnl->io_event_source = sd_event_source_unref(rtnl->io_event_source);
824
825 rtnl->time_event_source = sd_event_source_unref(rtnl->time_event_source);
826
827 rtnl->event = sd_event_unref(rtnl->event);
828
829 return 0;
830 }
831
832 int sd_netlink_add_match(
833 sd_netlink *rtnl,
834 sd_netlink_slot **ret_slot,
835 uint16_t type,
836 sd_netlink_message_handler_t callback,
837 sd_netlink_destroy_t destroy_callback,
838 void *userdata,
839 const char *description) {
840 _cleanup_free_ sd_netlink_slot *slot = NULL;
841 int r;
842
843 assert_return(rtnl, -EINVAL);
844 assert_return(callback, -EINVAL);
845 assert_return(!rtnl_pid_changed(rtnl), -ECHILD);
846
847 r = netlink_slot_allocate(rtnl, !ret_slot, NETLINK_MATCH_CALLBACK, sizeof(struct match_callback), userdata, description, &slot);
848 if (r < 0)
849 return r;
850
851 slot->match_callback.callback = callback;
852 slot->match_callback.type = type;
853
854 switch (type) {
855 case RTM_NEWLINK:
856 case RTM_DELLINK:
857 r = socket_broadcast_group_ref(rtnl, RTNLGRP_LINK);
858 if (r < 0)
859 return r;
860
861 break;
862 case RTM_NEWADDR:
863 case RTM_DELADDR:
864 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV4_IFADDR);
865 if (r < 0)
866 return r;
867
868 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV6_IFADDR);
869 if (r < 0)
870 return r;
871
872 break;
873 case RTM_NEWNEIGH:
874 case RTM_DELNEIGH:
875 r = socket_broadcast_group_ref(rtnl, RTNLGRP_NEIGH);
876 if (r < 0)
877 return r;
878
879 break;
880 case RTM_NEWROUTE:
881 case RTM_DELROUTE:
882 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV4_ROUTE);
883 if (r < 0)
884 return r;
885
886 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV6_ROUTE);
887 if (r < 0)
888 return r;
889 break;
890 case RTM_NEWRULE:
891 case RTM_DELRULE:
892 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV4_RULE);
893 if (r < 0)
894 return r;
895
896 r = socket_broadcast_group_ref(rtnl, RTNLGRP_IPV6_RULE);
897 if (r < 0)
898 return r;
899 break;
900 case RTM_NEWNEXTHOP:
901 case RTM_DELNEXTHOP:
902 r = socket_broadcast_group_ref(rtnl, RTNLGRP_NEXTHOP);
903 if (r < 0)
904 return r;
905 break;
906
907 default:
908 return -EOPNOTSUPP;
909 }
910
911 LIST_PREPEND(match_callbacks, rtnl->match_callbacks, &slot->match_callback);
912
913 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
914 slot->destroy_callback = destroy_callback;
915
916 if (ret_slot)
917 *ret_slot = slot;
918
919 TAKE_PTR(slot);
920
921 return 0;
922 }