1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Cloudflare
4 #include <netinet/tcp.h>
7 #include "test_progs.h"
8 #include "test_skmsg_load_helpers.skel.h"
9 #include "test_sockmap_update.skel.h"
10 #include "test_sockmap_invalid_update.skel.h"
11 #include "test_sockmap_skb_verdict_attach.skel.h"
12 #include "test_sockmap_progs_query.skel.h"
13 #include "test_sockmap_pass_prog.skel.h"
14 #include "test_sockmap_drop_prog.skel.h"
15 #include "bpf_iter_sockmap.skel.h"
17 #include "sockmap_helpers.h"
19 #define TCP_REPAIR 19 /* TCP sock is under repair right now */
21 #define TCP_REPAIR_ON 1
22 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
24 static int connected_socket_v4(void)
26 struct sockaddr_in addr
= {
27 .sin_family
= AF_INET
,
28 .sin_port
= htons(80),
29 .sin_addr
= { inet_addr("127.0.0.1") },
31 socklen_t len
= sizeof(addr
);
34 s
= socket(AF_INET
, SOCK_STREAM
, 0);
35 if (!ASSERT_GE(s
, 0, "socket"))
38 repair
= TCP_REPAIR_ON
;
39 err
= setsockopt(s
, SOL_TCP
, TCP_REPAIR
, &repair
, sizeof(repair
));
40 if (!ASSERT_OK(err
, "setsockopt(TCP_REPAIR)"))
43 err
= connect(s
, (struct sockaddr
*)&addr
, len
);
44 if (!ASSERT_OK(err
, "connect"))
47 repair
= TCP_REPAIR_OFF_NO_WP
;
48 err
= setsockopt(s
, SOL_TCP
, TCP_REPAIR
, &repair
, sizeof(repair
));
49 if (!ASSERT_OK(err
, "setsockopt(TCP_REPAIR)"))
59 static void compare_cookies(struct bpf_map
*src
, struct bpf_map
*dst
)
61 __u32 i
, max_entries
= bpf_map__max_entries(src
);
62 int err
, src_fd
, dst_fd
;
64 src_fd
= bpf_map__fd(src
);
65 dst_fd
= bpf_map__fd(dst
);
67 for (i
= 0; i
< max_entries
; i
++) {
68 __u64 src_cookie
, dst_cookie
;
70 err
= bpf_map_lookup_elem(src_fd
, &i
, &src_cookie
);
71 if (err
&& errno
== ENOENT
) {
72 err
= bpf_map_lookup_elem(dst_fd
, &i
, &dst_cookie
);
73 ASSERT_ERR(err
, "map_lookup_elem(dst)");
74 ASSERT_EQ(errno
, ENOENT
, "map_lookup_elem(dst)");
77 if (!ASSERT_OK(err
, "lookup_elem(src)"))
80 err
= bpf_map_lookup_elem(dst_fd
, &i
, &dst_cookie
);
81 if (!ASSERT_OK(err
, "lookup_elem(dst)"))
84 ASSERT_EQ(dst_cookie
, src_cookie
, "cookie mismatch");
88 /* Create a map, populate it with one socket, and free the map. */
89 static void test_sockmap_create_update_free(enum bpf_map_type map_type
)
94 s
= connected_socket_v4();
95 if (!ASSERT_GE(s
, 0, "connected_socket_v4"))
98 map
= bpf_map_create(map_type
, NULL
, sizeof(int), sizeof(int), 1, NULL
);
99 if (!ASSERT_GE(map
, 0, "bpf_map_create"))
102 err
= bpf_map_update_elem(map
, &zero
, &s
, BPF_NOEXIST
);
103 if (!ASSERT_OK(err
, "bpf_map_update"))
111 static void test_skmsg_helpers(enum bpf_map_type map_type
)
113 struct test_skmsg_load_helpers
*skel
;
114 int err
, map
, verdict
;
116 skel
= test_skmsg_load_helpers__open_and_load();
117 if (!ASSERT_OK_PTR(skel
, "test_skmsg_load_helpers__open_and_load"))
120 verdict
= bpf_program__fd(skel
->progs
.prog_msg_verdict
);
121 map
= bpf_map__fd(skel
->maps
.sock_map
);
123 err
= bpf_prog_attach(verdict
, map
, BPF_SK_MSG_VERDICT
, 0);
124 if (!ASSERT_OK(err
, "bpf_prog_attach"))
127 err
= bpf_prog_detach2(verdict
, map
, BPF_SK_MSG_VERDICT
);
128 if (!ASSERT_OK(err
, "bpf_prog_detach2"))
131 test_skmsg_load_helpers__destroy(skel
);
134 static void test_sockmap_update(enum bpf_map_type map_type
)
137 struct test_sockmap_update
*skel
;
138 struct bpf_map
*dst_map
;
139 const __u32 zero
= 0;
140 char dummy
[14] = {0};
141 LIBBPF_OPTS(bpf_test_run_opts
, topts
,
143 .data_size_in
= sizeof(dummy
),
148 sk
= connected_socket_v4();
149 if (!ASSERT_NEQ(sk
, -1, "connected_socket_v4"))
152 skel
= test_sockmap_update__open_and_load();
153 if (!ASSERT_OK_PTR(skel
, "open_and_load"))
156 prog
= bpf_program__fd(skel
->progs
.copy_sock_map
);
157 src
= bpf_map__fd(skel
->maps
.src
);
158 if (map_type
== BPF_MAP_TYPE_SOCKMAP
)
159 dst_map
= skel
->maps
.dst_sock_map
;
161 dst_map
= skel
->maps
.dst_sock_hash
;
163 err
= bpf_map_update_elem(src
, &zero
, &sk
, BPF_NOEXIST
);
164 if (!ASSERT_OK(err
, "update_elem(src)"))
167 err
= bpf_prog_test_run_opts(prog
, &topts
);
168 if (!ASSERT_OK(err
, "test_run"))
170 if (!ASSERT_NEQ(topts
.retval
, 0, "test_run retval"))
173 compare_cookies(skel
->maps
.src
, dst_map
);
176 test_sockmap_update__destroy(skel
);
181 static void test_sockmap_invalid_update(void)
183 struct test_sockmap_invalid_update
*skel
;
185 skel
= test_sockmap_invalid_update__open_and_load();
186 if (!ASSERT_NULL(skel
, "open_and_load"))
187 test_sockmap_invalid_update__destroy(skel
);
190 static void test_sockmap_copy(enum bpf_map_type map_type
)
192 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts
, opts
);
193 int err
, len
, src_fd
, iter_fd
;
194 union bpf_iter_link_info linfo
= {};
195 __u32 i
, num_sockets
, num_elems
;
196 struct bpf_iter_sockmap
*skel
;
197 __s64
*sock_fd
= NULL
;
198 struct bpf_link
*link
;
202 skel
= bpf_iter_sockmap__open_and_load();
203 if (!ASSERT_OK_PTR(skel
, "bpf_iter_sockmap__open_and_load"))
206 if (map_type
== BPF_MAP_TYPE_SOCKMAP
) {
207 src
= skel
->maps
.sockmap
;
208 num_elems
= bpf_map__max_entries(src
);
209 num_sockets
= num_elems
- 1;
211 src
= skel
->maps
.sockhash
;
212 num_elems
= bpf_map__max_entries(src
) - 1;
213 num_sockets
= num_elems
;
216 sock_fd
= calloc(num_sockets
, sizeof(*sock_fd
));
217 if (!ASSERT_OK_PTR(sock_fd
, "calloc(sock_fd)"))
220 for (i
= 0; i
< num_sockets
; i
++)
223 src_fd
= bpf_map__fd(src
);
225 for (i
= 0; i
< num_sockets
; i
++) {
226 sock_fd
[i
] = connected_socket_v4();
227 if (!ASSERT_NEQ(sock_fd
[i
], -1, "connected_socket_v4"))
230 err
= bpf_map_update_elem(src_fd
, &i
, &sock_fd
[i
], BPF_NOEXIST
);
231 if (!ASSERT_OK(err
, "map_update"))
235 linfo
.map
.map_fd
= src_fd
;
236 opts
.link_info
= &linfo
;
237 opts
.link_info_len
= sizeof(linfo
);
238 link
= bpf_program__attach_iter(skel
->progs
.copy
, &opts
);
239 if (!ASSERT_OK_PTR(link
, "attach_iter"))
242 iter_fd
= bpf_iter_create(bpf_link__fd(link
));
243 if (!ASSERT_GE(iter_fd
, 0, "create_iter"))
247 while ((len
= read(iter_fd
, buf
, sizeof(buf
))) > 0)
249 if (!ASSERT_GE(len
, 0, "read"))
253 if (!ASSERT_EQ(skel
->bss
->elems
, num_elems
, "elems"))
256 if (!ASSERT_EQ(skel
->bss
->socks
, num_sockets
, "socks"))
259 compare_cookies(src
, skel
->maps
.dst
);
264 bpf_link__destroy(link
);
266 for (i
= 0; sock_fd
&& i
< num_sockets
; i
++)
271 bpf_iter_sockmap__destroy(skel
);
274 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first
,
275 enum bpf_attach_type second
)
277 struct test_sockmap_skb_verdict_attach
*skel
;
278 int err
, map
, verdict
;
280 skel
= test_sockmap_skb_verdict_attach__open_and_load();
281 if (!ASSERT_OK_PTR(skel
, "open_and_load"))
284 verdict
= bpf_program__fd(skel
->progs
.prog_skb_verdict
);
285 map
= bpf_map__fd(skel
->maps
.sock_map
);
287 err
= bpf_prog_attach(verdict
, map
, first
, 0);
288 if (!ASSERT_OK(err
, "bpf_prog_attach"))
291 err
= bpf_prog_attach(verdict
, map
, second
, 0);
292 ASSERT_EQ(err
, -EBUSY
, "prog_attach_fail");
294 err
= bpf_prog_detach2(verdict
, map
, first
);
295 if (!ASSERT_OK(err
, "bpf_prog_detach2"))
298 test_sockmap_skb_verdict_attach__destroy(skel
);
301 static __u32
query_prog_id(int prog_fd
)
303 struct bpf_prog_info info
= {};
304 __u32 info_len
= sizeof(info
);
307 err
= bpf_prog_get_info_by_fd(prog_fd
, &info
, &info_len
);
308 if (!ASSERT_OK(err
, "bpf_prog_get_info_by_fd") ||
309 !ASSERT_EQ(info_len
, sizeof(info
), "bpf_prog_get_info_by_fd"))
315 static void test_sockmap_progs_query(enum bpf_attach_type attach_type
)
317 struct test_sockmap_progs_query
*skel
;
318 int err
, map_fd
, verdict_fd
;
319 __u32 attach_flags
= 0;
320 __u32 prog_ids
[3] = {};
323 skel
= test_sockmap_progs_query__open_and_load();
324 if (!ASSERT_OK_PTR(skel
, "test_sockmap_progs_query__open_and_load"))
327 map_fd
= bpf_map__fd(skel
->maps
.sock_map
);
329 if (attach_type
== BPF_SK_MSG_VERDICT
)
330 verdict_fd
= bpf_program__fd(skel
->progs
.prog_skmsg_verdict
);
332 verdict_fd
= bpf_program__fd(skel
->progs
.prog_skb_verdict
);
334 err
= bpf_prog_query(map_fd
, attach_type
, 0 /* query flags */,
335 &attach_flags
, prog_ids
, &prog_cnt
);
336 ASSERT_OK(err
, "bpf_prog_query failed");
337 ASSERT_EQ(attach_flags
, 0, "wrong attach_flags on query");
338 ASSERT_EQ(prog_cnt
, 0, "wrong program count on query");
340 err
= bpf_prog_attach(verdict_fd
, map_fd
, attach_type
, 0);
341 if (!ASSERT_OK(err
, "bpf_prog_attach failed"))
345 err
= bpf_prog_query(map_fd
, attach_type
, 0 /* query flags */,
346 &attach_flags
, prog_ids
, &prog_cnt
);
347 ASSERT_OK(err
, "bpf_prog_query failed");
348 ASSERT_EQ(attach_flags
, 0, "wrong attach_flags on query");
349 ASSERT_EQ(prog_cnt
, 1, "wrong program count on query");
350 ASSERT_EQ(prog_ids
[0], query_prog_id(verdict_fd
),
351 "wrong prog_ids on query");
353 bpf_prog_detach2(verdict_fd
, map_fd
, attach_type
);
355 test_sockmap_progs_query__destroy(skel
);
358 #define MAX_EVENTS 10
359 static void test_sockmap_skb_verdict_shutdown(void)
361 struct epoll_event ev
, events
[MAX_EVENTS
];
362 int n
, err
, map
, verdict
, s
, c1
, p1
;
363 struct test_sockmap_pass_prog
*skel
;
368 skel
= test_sockmap_pass_prog__open_and_load();
369 if (!ASSERT_OK_PTR(skel
, "open_and_load"))
372 verdict
= bpf_program__fd(skel
->progs
.prog_skb_verdict
);
373 map
= bpf_map__fd(skel
->maps
.sock_map_rx
);
375 err
= bpf_prog_attach(verdict
, map
, BPF_SK_SKB_STREAM_VERDICT
, 0);
376 if (!ASSERT_OK(err
, "bpf_prog_attach"))
379 s
= socket_loopback(AF_INET
, SOCK_STREAM
);
382 err
= create_pair(s
, AF_INET
, SOCK_STREAM
, &c1
, &p1
);
386 err
= bpf_map_update_elem(map
, &zero
, &c1
, BPF_NOEXIST
);
390 shutdown(p1
, SHUT_WR
);
395 epollfd
= epoll_create1(0);
396 if (!ASSERT_GT(epollfd
, -1, "epoll_create(0)"))
398 err
= epoll_ctl(epollfd
, EPOLL_CTL_ADD
, c1
, &ev
);
399 if (!ASSERT_OK(err
, "epoll_ctl(EPOLL_CTL_ADD)"))
401 err
= epoll_wait(epollfd
, events
, MAX_EVENTS
, -1);
402 if (!ASSERT_EQ(err
, 1, "epoll_wait(fd)"))
405 n
= recv(c1
, &b
, 1, SOCK_NONBLOCK
);
406 ASSERT_EQ(n
, 0, "recv_timeout(fin)");
411 test_sockmap_pass_prog__destroy(skel
);
414 static void test_sockmap_skb_verdict_fionread(bool pass_prog
)
416 int expected
, zero
= 0, sent
, recvd
, avail
;
417 int err
, map
, verdict
, s
, c0
, c1
, p0
, p1
;
418 struct test_sockmap_pass_prog
*pass
;
419 struct test_sockmap_drop_prog
*drop
;
420 char buf
[256] = "0123456789";
423 pass
= test_sockmap_pass_prog__open_and_load();
424 if (!ASSERT_OK_PTR(pass
, "open_and_load"))
426 verdict
= bpf_program__fd(pass
->progs
.prog_skb_verdict
);
427 map
= bpf_map__fd(pass
->maps
.sock_map_rx
);
428 expected
= sizeof(buf
);
430 drop
= test_sockmap_drop_prog__open_and_load();
431 if (!ASSERT_OK_PTR(drop
, "open_and_load"))
433 verdict
= bpf_program__fd(drop
->progs
.prog_skb_verdict
);
434 map
= bpf_map__fd(drop
->maps
.sock_map_rx
);
435 /* On drop data is consumed immediately and copied_seq inc'd */
440 err
= bpf_prog_attach(verdict
, map
, BPF_SK_SKB_STREAM_VERDICT
, 0);
441 if (!ASSERT_OK(err
, "bpf_prog_attach"))
444 s
= socket_loopback(AF_INET
, SOCK_STREAM
);
445 if (!ASSERT_GT(s
, -1, "socket_loopback(s)"))
447 err
= create_socket_pairs(s
, AF_INET
, SOCK_STREAM
, &c0
, &c1
, &p0
, &p1
);
448 if (!ASSERT_OK(err
, "create_socket_pairs(s)"))
451 err
= bpf_map_update_elem(map
, &zero
, &c1
, BPF_NOEXIST
);
452 if (!ASSERT_OK(err
, "bpf_map_update_elem(c1)"))
455 sent
= xsend(p1
, &buf
, sizeof(buf
), 0);
456 ASSERT_EQ(sent
, sizeof(buf
), "xsend(p0)");
457 err
= ioctl(c1
, FIONREAD
, &avail
);
458 ASSERT_OK(err
, "ioctl(FIONREAD) error");
459 ASSERT_EQ(avail
, expected
, "ioctl(FIONREAD)");
460 /* On DROP test there will be no data to read */
462 recvd
= recv_timeout(c1
, &buf
, sizeof(buf
), SOCK_NONBLOCK
, IO_TIMEOUT_SEC
);
463 ASSERT_EQ(recvd
, sizeof(buf
), "recv_timeout(c0)");
473 test_sockmap_pass_prog__destroy(pass
);
475 test_sockmap_drop_prog__destroy(drop
);
478 static void test_sockmap_skb_verdict_peek(void)
480 int err
, map
, verdict
, s
, c1
, p1
, zero
= 0, sent
, recvd
, avail
;
481 struct test_sockmap_pass_prog
*pass
;
482 char snd
[256] = "0123456789";
485 pass
= test_sockmap_pass_prog__open_and_load();
486 if (!ASSERT_OK_PTR(pass
, "open_and_load"))
488 verdict
= bpf_program__fd(pass
->progs
.prog_skb_verdict
);
489 map
= bpf_map__fd(pass
->maps
.sock_map_rx
);
491 err
= bpf_prog_attach(verdict
, map
, BPF_SK_SKB_STREAM_VERDICT
, 0);
492 if (!ASSERT_OK(err
, "bpf_prog_attach"))
495 s
= socket_loopback(AF_INET
, SOCK_STREAM
);
496 if (!ASSERT_GT(s
, -1, "socket_loopback(s)"))
499 err
= create_pair(s
, AF_INET
, SOCK_STREAM
, &c1
, &p1
);
500 if (!ASSERT_OK(err
, "create_pairs(s)"))
503 err
= bpf_map_update_elem(map
, &zero
, &c1
, BPF_NOEXIST
);
504 if (!ASSERT_OK(err
, "bpf_map_update_elem(c1)"))
507 sent
= xsend(p1
, snd
, sizeof(snd
), 0);
508 ASSERT_EQ(sent
, sizeof(snd
), "xsend(p1)");
509 recvd
= recv(c1
, rcv
, sizeof(rcv
), MSG_PEEK
);
510 ASSERT_EQ(recvd
, sizeof(rcv
), "recv(c1)");
511 err
= ioctl(c1
, FIONREAD
, &avail
);
512 ASSERT_OK(err
, "ioctl(FIONREAD) error");
513 ASSERT_EQ(avail
, sizeof(snd
), "after peek ioctl(FIONREAD)");
514 recvd
= recv(c1
, rcv
, sizeof(rcv
), 0);
515 ASSERT_EQ(recvd
, sizeof(rcv
), "recv(p0)");
516 err
= ioctl(c1
, FIONREAD
, &avail
);
517 ASSERT_OK(err
, "ioctl(FIONREAD) error");
518 ASSERT_EQ(avail
, 0, "after read ioctl(FIONREAD)");
524 test_sockmap_pass_prog__destroy(pass
);
527 void test_sockmap_basic(void)
529 if (test__start_subtest("sockmap create_update_free"))
530 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP
);
531 if (test__start_subtest("sockhash create_update_free"))
532 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH
);
533 if (test__start_subtest("sockmap sk_msg load helpers"))
534 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP
);
535 if (test__start_subtest("sockhash sk_msg load helpers"))
536 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH
);
537 if (test__start_subtest("sockmap update"))
538 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP
);
539 if (test__start_subtest("sockhash update"))
540 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH
);
541 if (test__start_subtest("sockmap update in unsafe context"))
542 test_sockmap_invalid_update();
543 if (test__start_subtest("sockmap copy"))
544 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP
);
545 if (test__start_subtest("sockhash copy"))
546 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH
);
547 if (test__start_subtest("sockmap skb_verdict attach")) {
548 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT
,
549 BPF_SK_SKB_STREAM_VERDICT
);
550 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT
,
553 if (test__start_subtest("sockmap msg_verdict progs query"))
554 test_sockmap_progs_query(BPF_SK_MSG_VERDICT
);
555 if (test__start_subtest("sockmap stream_parser progs query"))
556 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER
);
557 if (test__start_subtest("sockmap stream_verdict progs query"))
558 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT
);
559 if (test__start_subtest("sockmap skb_verdict progs query"))
560 test_sockmap_progs_query(BPF_SK_SKB_VERDICT
);
561 if (test__start_subtest("sockmap skb_verdict shutdown"))
562 test_sockmap_skb_verdict_shutdown();
563 if (test__start_subtest("sockmap skb_verdict fionread"))
564 test_sockmap_skb_verdict_fionread(true);
565 if (test__start_subtest("sockmap skb_verdict fionread on drop"))
566 test_sockmap_skb_verdict_fionread(false);
567 if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
568 test_sockmap_skb_verdict_peek();