1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
5 #include "sd-netlink.h"
7 #include "alloc-util.h"
12 #include "netlink-genl.h"
13 #include "netlink-internal.h"
14 #include "netlink-slot.h"
15 #include "process-util.h"
16 #include "socket-util.h"
17 #include "string-util.h"
19 /* Some really high limit, to catch programming errors */
20 #define REPLY_CALLBACKS_MAX UINT16_MAX
22 static int netlink_new(sd_netlink
**ret
) {
23 _cleanup_(sd_netlink_unrefp
) sd_netlink
*nl
= NULL
;
25 assert_return(ret
, -EINVAL
);
27 nl
= new(sd_netlink
, 1);
34 .sockaddr
.nl
.nl_family
= AF_NETLINK
,
35 .original_pid
= getpid_cached(),
38 /* Kernel change notification messages have sequence number 0. We want to avoid that with our
39 * own serials, in order not to get confused when matching up kernel replies to our earlier
42 * Moreover, when using netlink socket activation (i.e. where PID 1 binds an AF_NETLINK
43 * socket for us and passes it to us across execve()) and we get restarted multiple times
44 * while the socket sticks around we might get confused by replies from earlier runs coming
45 * in late — which is pretty likely if we'd start our sequence numbers always from 1. Hence,
46 * let's start with a value based on the system clock. This should make collisions much less
47 * likely (though still theoretically possible). We use a 32 bit µs counter starting at boot
48 * for this (and explicitly exclude the zero, see above). This counter will wrap around after
49 * a bit more than 1h, but that's hopefully OK as the kernel shouldn't take that long to
50 * reply to our requests.
52 * We only pick the initial start value this way. For each message we simply increase the
53 * sequence number by 1. This means we could enqueue 1 netlink message per µs without risking
54 * collisions, which should be OK.
56 * Note this means the serials will be in the range 1…UINT32_MAX here.
58 * (In an ideal world we'd attach the current serial counter to the netlink socket itself
59 * somehow, to avoid all this, but I couldn't come up with a nice way to do this) */
60 .serial
= (uint32_t) (now(CLOCK_MONOTONIC
) % UINT32_MAX
) + 1,
63 /* We guarantee that the read buffer has at least space for a message header */
64 if (!greedy_realloc((void**) &nl
->rbuffer
, sizeof(struct nlmsghdr
), sizeof(uint8_t)))
71 int sd_netlink_new_from_fd(sd_netlink
**ret
, int fd
) {
72 _cleanup_(sd_netlink_unrefp
) sd_netlink
*nl
= NULL
;
76 assert_return(ret
, -EINVAL
);
82 addrlen
= sizeof(nl
->sockaddr
);
84 if (getsockname(fd
, &nl
->sockaddr
.sa
, &addrlen
) < 0)
87 if (nl
->sockaddr
.nl
.nl_family
!= AF_NETLINK
)
96 int sd_netlink_open_fd(sd_netlink
**ret
, int fd
) {
97 _cleanup_(sd_netlink_unrefp
) sd_netlink
*nl
= NULL
;
100 assert_return(ret
, -EINVAL
);
101 assert_return(fd
>= 0, -EBADF
);
103 r
= netlink_new(&nl
);
107 r
= getsockopt_int(fd
, SOL_SOCKET
, SO_PROTOCOL
, &protocol
);
112 nl
->protocol
= protocol
;
114 r
= setsockopt_int(fd
, SOL_NETLINK
, NETLINK_EXT_ACK
, true);
116 log_debug_errno(r
, "sd-netlink: Failed to enable NETLINK_EXT_ACK option, ignoring: %m");
118 r
= setsockopt_int(fd
, SOL_NETLINK
, NETLINK_GET_STRICT_CHK
, true);
120 log_debug_errno(r
, "sd-netlink: Failed to enable NETLINK_GET_STRICT_CHK option, ignoring: %m");
124 nl
->fd
= -1; /* on failure, the caller remains owner of the fd, hence don't close it here */
134 int netlink_open_family(sd_netlink
**ret
, int family
) {
135 _cleanup_close_
int fd
= -1;
138 fd
= socket_open(family
);
142 r
= sd_netlink_open_fd(ret
, fd
);
150 int sd_netlink_open(sd_netlink
**ret
) {
151 return netlink_open_family(ret
, NETLINK_ROUTE
);
154 bool netlink_pid_changed(sd_netlink
*nl
) {
157 /* We don't support people creating an nl connection and
158 * keeping it around over a fork(). Let's complain. */
160 return nl
->original_pid
!= getpid_cached();
163 int sd_netlink_inc_rcvbuf(sd_netlink
*nl
, size_t size
) {
164 assert_return(nl
, -EINVAL
);
165 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
167 return fd_inc_rcvbuf(nl
->fd
, size
);
170 static sd_netlink
*netlink_free(sd_netlink
*nl
) {
176 for (i
= 0; i
< nl
->rqueue_size
; i
++)
177 sd_netlink_message_unref(nl
->rqueue
[i
]);
180 for (i
= 0; i
< nl
->rqueue_partial_size
; i
++)
181 sd_netlink_message_unref(nl
->rqueue_partial
[i
]);
182 free(nl
->rqueue_partial
);
186 while ((s
= nl
->slots
)) {
188 netlink_slot_disconnect(s
, true);
190 hashmap_free(nl
->reply_callbacks
);
191 prioq_free(nl
->reply_callbacks_prioq
);
193 sd_event_source_unref(nl
->io_event_source
);
194 sd_event_source_unref(nl
->time_event_source
);
195 sd_event_unref(nl
->event
);
197 hashmap_free(nl
->broadcast_group_refs
);
199 genl_clear_family(nl
);
205 DEFINE_TRIVIAL_REF_UNREF_FUNC(sd_netlink
, sd_netlink
, netlink_free
);
207 static void netlink_seal_message(sd_netlink
*nl
, sd_netlink_message
*m
) {
211 assert(!netlink_pid_changed(nl
));
215 /* Avoid collisions with outstanding requests */
219 /* Don't use seq == 0, as that is used for broadcasts, so we would get confused by replies to
221 nl
->serial
= nl
->serial
== UINT32_MAX
? 1 : nl
->serial
+ 1;
223 } while (hashmap_contains(nl
->reply_callbacks
, UINT32_TO_PTR(picked
)));
225 m
->hdr
->nlmsg_seq
= picked
;
231 sd_netlink_message
*message
,
236 assert_return(nl
, -EINVAL
);
237 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
238 assert_return(message
, -EINVAL
);
239 assert_return(!message
->sealed
, -EPERM
);
241 netlink_seal_message(nl
, message
);
243 r
= socket_write_message(nl
, message
);
248 *serial
= message_get_serial(message
);
253 int sd_netlink_sendv(
255 sd_netlink_message
**messages
,
257 uint32_t **ret_serial
) {
259 _cleanup_free_
uint32_t *serials
= NULL
;
262 assert_return(nl
, -EINVAL
);
263 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
264 assert_return(messages
, -EINVAL
);
265 assert_return(msgcount
> 0, -EINVAL
);
268 serials
= new(uint32_t, msgcount
);
273 for (unsigned i
= 0; i
< msgcount
; i
++) {
274 assert_return(!messages
[i
]->sealed
, -EPERM
);
276 netlink_seal_message(nl
, messages
[i
]);
278 serials
[i
] = message_get_serial(messages
[i
]);
281 r
= socket_writev_message(nl
, messages
, msgcount
);
286 *ret_serial
= TAKE_PTR(serials
);
291 int netlink_rqueue_make_room(sd_netlink
*nl
) {
294 if (nl
->rqueue_size
>= NETLINK_RQUEUE_MAX
)
295 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS
),
296 "sd-netlink: exhausted the read queue size (%d)",
299 if (!GREEDY_REALLOC(nl
->rqueue
, nl
->rqueue_size
+ 1))
305 int netlink_rqueue_partial_make_room(sd_netlink
*nl
) {
308 if (nl
->rqueue_partial_size
>= NETLINK_RQUEUE_MAX
)
309 return log_debug_errno(SYNTHETIC_ERRNO(ENOBUFS
),
310 "sd-netlink: exhausted the partial read queue size (%d)",
313 if (!GREEDY_REALLOC(nl
->rqueue_partial
, nl
->rqueue_partial_size
+ 1))
319 static int dispatch_rqueue(sd_netlink
*nl
, sd_netlink_message
**message
) {
325 if (nl
->rqueue_size
<= 0) {
326 /* Try to read a new message */
327 r
= socket_read_message(nl
);
328 if (r
== -ENOBUFS
) { /* FIXME: ignore buffer overruns for now */
329 log_debug_errno(r
, "sd-netlink: Got ENOBUFS from netlink socket, ignoring.");
336 /* Dispatch a queued message */
337 *message
= nl
->rqueue
[0];
339 memmove(nl
->rqueue
, nl
->rqueue
+ 1, sizeof(sd_netlink_message
*) * nl
->rqueue_size
);
344 static int process_timeout(sd_netlink
*nl
) {
345 _cleanup_(sd_netlink_message_unrefp
) sd_netlink_message
*m
= NULL
;
346 struct reply_callback
*c
;
347 sd_netlink_slot
*slot
;
353 c
= prioq_peek(nl
->reply_callbacks_prioq
);
357 n
= now(CLOCK_MONOTONIC
);
361 r
= message_new_synthetic_error(nl
, -ETIMEDOUT
, c
->serial
, &m
);
365 assert_se(prioq_pop(nl
->reply_callbacks_prioq
) == c
);
367 hashmap_remove(nl
->reply_callbacks
, UINT32_TO_PTR(c
->serial
));
369 slot
= container_of(c
, sd_netlink_slot
, reply_callback
);
371 r
= c
->callback(nl
, m
, slot
->userdata
);
373 log_debug_errno(r
, "sd-netlink: timedout callback %s%s%sfailed: %m",
374 slot
->description
? "'" : "",
375 strempty(slot
->description
),
376 slot
->description
? "' " : "");
379 netlink_slot_disconnect(slot
, true);
384 static int process_reply(sd_netlink
*nl
, sd_netlink_message
*m
) {
385 struct reply_callback
*c
;
386 sd_netlink_slot
*slot
;
394 serial
= message_get_serial(m
);
395 c
= hashmap_remove(nl
->reply_callbacks
, UINT32_TO_PTR(serial
));
399 if (c
->timeout
!= 0) {
400 prioq_remove(nl
->reply_callbacks_prioq
, c
, &c
->prioq_idx
);
404 r
= sd_netlink_message_get_type(m
, &type
);
408 if (type
== NLMSG_DONE
)
411 slot
= container_of(c
, sd_netlink_slot
, reply_callback
);
413 r
= c
->callback(nl
, m
, slot
->userdata
);
415 log_debug_errno(r
, "sd-netlink: reply callback %s%s%sfailed: %m",
416 slot
->description
? "'" : "",
417 strempty(slot
->description
),
418 slot
->description
? "' " : "");
421 netlink_slot_disconnect(slot
, true);
426 static int process_match(sd_netlink
*nl
, sd_netlink_message
*m
) {
434 r
= sd_netlink_message_get_type(m
, &type
);
438 if (m
->protocol
== NETLINK_GENERIC
) {
439 r
= sd_genl_message_get_command(nl
, m
, &cmd
);
445 LIST_FOREACH(match_callbacks
, c
, nl
->match_callbacks
) {
446 sd_netlink_slot
*slot
;
451 if (c
->cmd
!= 0 && c
->cmd
!= cmd
)
454 for (size_t i
= 0; i
< c
->n_groups
; i
++)
455 if (c
->groups
[i
] == m
->multicast_group
) {
463 slot
= container_of(c
, sd_netlink_slot
, match_callback
);
465 r
= c
->callback(nl
, m
, slot
->userdata
);
467 log_debug_errno(r
, "sd-netlink: match callback %s%s%sfailed: %m",
468 slot
->description
? "'" : "",
469 strempty(slot
->description
),
470 slot
->description
? "' " : "");
478 static int process_running(sd_netlink
*nl
, sd_netlink_message
**ret
) {
479 _cleanup_(sd_netlink_message_unrefp
) sd_netlink_message
*m
= NULL
;
484 r
= process_timeout(nl
);
488 r
= dispatch_rqueue(nl
, &m
);
494 if (sd_netlink_message_is_broadcast(m
))
495 r
= process_match(nl
, m
);
497 r
= process_reply(nl
, m
);
516 int sd_netlink_process(sd_netlink
*nl
, sd_netlink_message
**ret
) {
517 NETLINK_DONT_DESTROY(nl
);
520 assert_return(nl
, -EINVAL
);
521 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
522 assert_return(!nl
->processing
, -EBUSY
);
524 nl
->processing
= true;
525 r
= process_running(nl
, ret
);
526 nl
->processing
= false;
531 static usec_t
calc_elapse(uint64_t usec
) {
532 if (usec
== UINT64_MAX
)
536 usec
= NETLINK_DEFAULT_TIMEOUT_USEC
;
538 return usec_add(now(CLOCK_MONOTONIC
), usec
);
541 static int netlink_poll(sd_netlink
*nl
, bool need_more
, usec_t timeout_usec
) {
542 usec_t m
= USEC_INFINITY
;
547 e
= sd_netlink_get_events(nl
);
552 /* Caller wants more data, and doesn't care about
553 * what's been read or any other timeouts. */
558 /* Caller wants to process if there is something to
559 * process, but doesn't care otherwise */
561 r
= sd_netlink_get_timeout(nl
, &until
);
565 m
= usec_sub_unsigned(until
, now(CLOCK_MONOTONIC
));
568 r
= fd_wait_for_event(nl
->fd
, e
, MIN(m
, timeout_usec
));
575 int sd_netlink_wait(sd_netlink
*nl
, uint64_t timeout_usec
) {
576 assert_return(nl
, -EINVAL
);
577 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
579 if (nl
->rqueue_size
> 0)
582 return netlink_poll(nl
, false, timeout_usec
);
585 static int timeout_compare(const void *a
, const void *b
) {
586 const struct reply_callback
*x
= a
, *y
= b
;
588 if (x
->timeout
!= 0 && y
->timeout
== 0)
591 if (x
->timeout
== 0 && y
->timeout
!= 0)
594 return CMP(x
->timeout
, y
->timeout
);
597 int sd_netlink_call_async(
599 sd_netlink_slot
**ret_slot
,
600 sd_netlink_message
*m
,
601 sd_netlink_message_handler_t callback
,
602 sd_netlink_destroy_t destroy_callback
,
605 const char *description
) {
607 _cleanup_free_ sd_netlink_slot
*slot
= NULL
;
610 assert_return(nl
, -EINVAL
);
611 assert_return(m
, -EINVAL
);
612 assert_return(callback
, -EINVAL
);
613 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
615 if (hashmap_size(nl
->reply_callbacks
) >= REPLY_CALLBACKS_MAX
)
618 r
= hashmap_ensure_allocated(&nl
->reply_callbacks
, &trivial_hash_ops
);
622 if (usec
!= UINT64_MAX
) {
623 r
= prioq_ensure_allocated(&nl
->reply_callbacks_prioq
, timeout_compare
);
628 r
= netlink_slot_allocate(nl
, !ret_slot
, NETLINK_REPLY_CALLBACK
, sizeof(struct reply_callback
), userdata
, description
, &slot
);
632 slot
->reply_callback
.callback
= callback
;
633 slot
->reply_callback
.timeout
= calc_elapse(usec
);
635 k
= sd_netlink_send(nl
, m
, &slot
->reply_callback
.serial
);
639 r
= hashmap_put(nl
->reply_callbacks
, UINT32_TO_PTR(slot
->reply_callback
.serial
), &slot
->reply_callback
);
643 if (slot
->reply_callback
.timeout
!= 0) {
644 r
= prioq_put(nl
->reply_callbacks_prioq
, &slot
->reply_callback
, &slot
->reply_callback
.prioq_idx
);
646 (void) hashmap_remove(nl
->reply_callbacks
, UINT32_TO_PTR(slot
->reply_callback
.serial
));
651 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
652 slot
->destroy_callback
= destroy_callback
;
666 sd_netlink_message
**ret
) {
671 assert_return(nl
, -EINVAL
);
672 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
674 timeout
= calc_elapse(usec
);
679 for (unsigned i
= 0; i
< nl
->rqueue_size
; i
++) {
680 _cleanup_(sd_netlink_message_unrefp
) sd_netlink_message
*incoming
= NULL
;
681 uint32_t received_serial
;
684 received_serial
= message_get_serial(nl
->rqueue
[i
]);
685 if (received_serial
!= serial
)
688 incoming
= nl
->rqueue
[i
];
690 /* found a match, remove from rqueue and return it */
691 memmove(nl
->rqueue
+ i
, nl
->rqueue
+ i
+ 1,
692 sizeof(sd_netlink_message
*) * (nl
->rqueue_size
- i
- 1));
695 r
= sd_netlink_message_get_errno(incoming
);
699 r
= sd_netlink_message_get_type(incoming
, &type
);
703 if (type
== NLMSG_DONE
) {
709 *ret
= TAKE_PTR(incoming
);
713 r
= socket_read_message(nl
);
717 /* received message, so try to process straight away */
723 n
= now(CLOCK_MONOTONIC
);
727 left
= usec_sub_unsigned(timeout
, n
);
729 left
= USEC_INFINITY
;
731 r
= netlink_poll(nl
, true, left
);
741 sd_netlink_message
*message
,
743 sd_netlink_message
**ret
) {
748 assert_return(nl
, -EINVAL
);
749 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
750 assert_return(message
, -EINVAL
);
752 r
= sd_netlink_send(nl
, message
, &serial
);
756 return sd_netlink_read(nl
, serial
, usec
, ret
);
759 int sd_netlink_get_events(sd_netlink
*nl
) {
760 assert_return(nl
, -EINVAL
);
761 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
763 return nl
->rqueue_size
== 0 ? POLLIN
: 0;
766 int sd_netlink_get_timeout(sd_netlink
*nl
, uint64_t *timeout_usec
) {
767 struct reply_callback
*c
;
769 assert_return(nl
, -EINVAL
);
770 assert_return(timeout_usec
, -EINVAL
);
771 assert_return(!netlink_pid_changed(nl
), -ECHILD
);
773 if (nl
->rqueue_size
> 0) {
778 c
= prioq_peek(nl
->reply_callbacks_prioq
);
780 *timeout_usec
= UINT64_MAX
;
784 *timeout_usec
= c
->timeout
;
789 static int io_callback(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
790 sd_netlink
*nl
= userdata
;
795 r
= sd_netlink_process(nl
, NULL
);
802 static int time_callback(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
803 sd_netlink
*nl
= userdata
;
808 r
= sd_netlink_process(nl
, NULL
);
815 static int prepare_callback(sd_event_source
*s
, void *userdata
) {
816 sd_netlink
*nl
= userdata
;
823 e
= sd_netlink_get_events(nl
);
827 r
= sd_event_source_set_io_events(nl
->io_event_source
, e
);
831 r
= sd_netlink_get_timeout(nl
, &until
);
837 j
= sd_event_source_set_time(nl
->time_event_source
, until
);
842 r
= sd_event_source_set_enabled(nl
->time_event_source
, r
> 0);
849 int sd_netlink_attach_event(sd_netlink
*nl
, sd_event
*event
, int64_t priority
) {
852 assert_return(nl
, -EINVAL
);
853 assert_return(!nl
->event
, -EBUSY
);
855 assert(!nl
->io_event_source
);
856 assert(!nl
->time_event_source
);
859 nl
->event
= sd_event_ref(event
);
861 r
= sd_event_default(&nl
->event
);
866 r
= sd_event_add_io(nl
->event
, &nl
->io_event_source
, nl
->fd
, 0, io_callback
, nl
);
870 r
= sd_event_source_set_priority(nl
->io_event_source
, priority
);
874 r
= sd_event_source_set_description(nl
->io_event_source
, "netlink-receive-message");
878 r
= sd_event_source_set_prepare(nl
->io_event_source
, prepare_callback
);
882 r
= sd_event_add_time(nl
->event
, &nl
->time_event_source
, CLOCK_MONOTONIC
, 0, 0, time_callback
, nl
);
886 r
= sd_event_source_set_priority(nl
->time_event_source
, priority
);
890 r
= sd_event_source_set_description(nl
->time_event_source
, "netlink-timer");
897 sd_netlink_detach_event(nl
);
901 int sd_netlink_detach_event(sd_netlink
*nl
) {
902 assert_return(nl
, -EINVAL
);
903 assert_return(nl
->event
, -ENXIO
);
905 nl
->io_event_source
= sd_event_source_unref(nl
->io_event_source
);
907 nl
->time_event_source
= sd_event_source_unref(nl
->time_event_source
);
909 nl
->event
= sd_event_unref(nl
->event
);
914 int netlink_add_match_internal(
916 sd_netlink_slot
**ret_slot
,
917 const uint32_t *groups
,
921 sd_netlink_message_handler_t callback
,
922 sd_netlink_destroy_t destroy_callback
,
924 const char *description
) {
926 _cleanup_free_ sd_netlink_slot
*slot
= NULL
;
930 assert(n_groups
> 0);
932 for (size_t i
= 0; i
< n_groups
; i
++) {
933 r
= socket_broadcast_group_ref(nl
, groups
[i
]);
938 r
= netlink_slot_allocate(nl
, !ret_slot
, NETLINK_MATCH_CALLBACK
, sizeof(struct match_callback
),
939 userdata
, description
, &slot
);
943 slot
->match_callback
.groups
= newdup(uint32_t, groups
, n_groups
);
944 if (!slot
->match_callback
.groups
)
947 slot
->match_callback
.n_groups
= n_groups
;
948 slot
->match_callback
.callback
= callback
;
949 slot
->match_callback
.type
= type
;
950 slot
->match_callback
.cmd
= cmd
;
952 LIST_PREPEND(match_callbacks
, nl
->match_callbacks
, &slot
->match_callback
);
954 /* Set this at last. Otherwise, some failures in above call the destroy callback but some do not. */
955 slot
->destroy_callback
= destroy_callback
;
964 int sd_netlink_add_match(
966 sd_netlink_slot
**ret_slot
,
968 sd_netlink_message_handler_t callback
,
969 sd_netlink_destroy_t destroy_callback
,
971 const char *description
) {
973 static const uint32_t
974 address_groups
[] = { RTNLGRP_IPV4_IFADDR
, RTNLGRP_IPV6_IFADDR
, },
975 link_groups
[] = { RTNLGRP_LINK
, },
976 neighbor_groups
[] = { RTNLGRP_NEIGH
, },
977 nexthop_groups
[] = { RTNLGRP_NEXTHOP
, },
978 route_groups
[] = { RTNLGRP_IPV4_ROUTE
, RTNLGRP_IPV6_ROUTE
, },
979 rule_groups
[] = { RTNLGRP_IPV4_RULE
, RTNLGRP_IPV6_RULE
, },
980 tc_groups
[] = { RTNLGRP_TC
};
981 const uint32_t *groups
;
984 assert_return(rtnl
, -EINVAL
);
985 assert_return(callback
, -EINVAL
);
986 assert_return(!netlink_pid_changed(rtnl
), -ECHILD
);
991 groups
= link_groups
;
992 n_groups
= ELEMENTSOF(link_groups
);
996 groups
= address_groups
;
997 n_groups
= ELEMENTSOF(address_groups
);
1001 groups
= neighbor_groups
;
1002 n_groups
= ELEMENTSOF(neighbor_groups
);
1006 groups
= route_groups
;
1007 n_groups
= ELEMENTSOF(route_groups
);
1011 groups
= rule_groups
;
1012 n_groups
= ELEMENTSOF(rule_groups
);
1014 case RTM_NEWNEXTHOP
:
1015 case RTM_DELNEXTHOP
:
1016 groups
= nexthop_groups
;
1017 n_groups
= ELEMENTSOF(nexthop_groups
);
1024 n_groups
= ELEMENTSOF(tc_groups
);
1030 return netlink_add_match_internal(rtnl
, ret_slot
, groups
, n_groups
, type
, 0, callback
,
1031 destroy_callback
, userdata
, description
);
1034 int sd_netlink_attach_filter(sd_netlink
*nl
, size_t len
, struct sock_filter
*filter
) {
1035 assert_return(nl
, -EINVAL
);
1036 assert_return(len
== 0 || filter
, -EINVAL
);
1038 if (setsockopt(nl
->fd
, SOL_SOCKET
,
1039 len
== 0 ? SO_DETACH_FILTER
: SO_ATTACH_FILTER
,
1040 &(struct sock_fprog
) {
1043 }, sizeof(struct sock_fprog
)) < 0)