]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
Merge tag 'kvm-x86-docs-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / tools / testing / selftests / bpf / prog_tests / sockmap_basic.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Cloudflare
3 #include <error.h>
4 #include <netinet/tcp.h>
5 #include <sys/epoll.h>
6
7 #include "test_progs.h"
8 #include "test_skmsg_load_helpers.skel.h"
9 #include "test_sockmap_update.skel.h"
10 #include "test_sockmap_invalid_update.skel.h"
11 #include "test_sockmap_skb_verdict_attach.skel.h"
12 #include "test_sockmap_progs_query.skel.h"
13 #include "test_sockmap_pass_prog.skel.h"
14 #include "test_sockmap_drop_prog.skel.h"
15 #include "bpf_iter_sockmap.skel.h"
16
17 #include "sockmap_helpers.h"
18
19 #define TCP_REPAIR 19 /* TCP sock is under repair right now */
20
21 #define TCP_REPAIR_ON 1
22 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
23
24 static int connected_socket_v4(void)
25 {
26 struct sockaddr_in addr = {
27 .sin_family = AF_INET,
28 .sin_port = htons(80),
29 .sin_addr = { inet_addr("127.0.0.1") },
30 };
31 socklen_t len = sizeof(addr);
32 int s, repair, err;
33
34 s = socket(AF_INET, SOCK_STREAM, 0);
35 if (!ASSERT_GE(s, 0, "socket"))
36 goto error;
37
38 repair = TCP_REPAIR_ON;
39 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
40 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
41 goto error;
42
43 err = connect(s, (struct sockaddr *)&addr, len);
44 if (!ASSERT_OK(err, "connect"))
45 goto error;
46
47 repair = TCP_REPAIR_OFF_NO_WP;
48 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
49 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
50 goto error;
51
52 return s;
53 error:
54 perror(__func__);
55 close(s);
56 return -1;
57 }
58
59 static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
60 {
61 __u32 i, max_entries = bpf_map__max_entries(src);
62 int err, src_fd, dst_fd;
63
64 src_fd = bpf_map__fd(src);
65 dst_fd = bpf_map__fd(dst);
66
67 for (i = 0; i < max_entries; i++) {
68 __u64 src_cookie, dst_cookie;
69
70 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
71 if (err && errno == ENOENT) {
72 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
73 ASSERT_ERR(err, "map_lookup_elem(dst)");
74 ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
75 continue;
76 }
77 if (!ASSERT_OK(err, "lookup_elem(src)"))
78 continue;
79
80 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
81 if (!ASSERT_OK(err, "lookup_elem(dst)"))
82 continue;
83
84 ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
85 }
86 }
87
88 /* Create a map, populate it with one socket, and free the map. */
89 static void test_sockmap_create_update_free(enum bpf_map_type map_type)
90 {
91 const int zero = 0;
92 int s, map, err;
93
94 s = connected_socket_v4();
95 if (!ASSERT_GE(s, 0, "connected_socket_v4"))
96 return;
97
98 map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
99 if (!ASSERT_GE(map, 0, "bpf_map_create"))
100 goto out;
101
102 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
103 if (!ASSERT_OK(err, "bpf_map_update"))
104 goto out;
105
106 out:
107 close(map);
108 close(s);
109 }
110
111 static void test_skmsg_helpers(enum bpf_map_type map_type)
112 {
113 struct test_skmsg_load_helpers *skel;
114 int err, map, verdict;
115
116 skel = test_skmsg_load_helpers__open_and_load();
117 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
118 return;
119
120 verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
121 map = bpf_map__fd(skel->maps.sock_map);
122
123 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
124 if (!ASSERT_OK(err, "bpf_prog_attach"))
125 goto out;
126
127 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
128 if (!ASSERT_OK(err, "bpf_prog_detach2"))
129 goto out;
130 out:
131 test_skmsg_load_helpers__destroy(skel);
132 }
133
134 static void test_sockmap_update(enum bpf_map_type map_type)
135 {
136 int err, prog, src;
137 struct test_sockmap_update *skel;
138 struct bpf_map *dst_map;
139 const __u32 zero = 0;
140 char dummy[14] = {0};
141 LIBBPF_OPTS(bpf_test_run_opts, topts,
142 .data_in = dummy,
143 .data_size_in = sizeof(dummy),
144 .repeat = 1,
145 );
146 __s64 sk;
147
148 sk = connected_socket_v4();
149 if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
150 return;
151
152 skel = test_sockmap_update__open_and_load();
153 if (!ASSERT_OK_PTR(skel, "open_and_load"))
154 goto close_sk;
155
156 prog = bpf_program__fd(skel->progs.copy_sock_map);
157 src = bpf_map__fd(skel->maps.src);
158 if (map_type == BPF_MAP_TYPE_SOCKMAP)
159 dst_map = skel->maps.dst_sock_map;
160 else
161 dst_map = skel->maps.dst_sock_hash;
162
163 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
164 if (!ASSERT_OK(err, "update_elem(src)"))
165 goto out;
166
167 err = bpf_prog_test_run_opts(prog, &topts);
168 if (!ASSERT_OK(err, "test_run"))
169 goto out;
170 if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
171 goto out;
172
173 compare_cookies(skel->maps.src, dst_map);
174
175 out:
176 test_sockmap_update__destroy(skel);
177 close_sk:
178 close(sk);
179 }
180
181 static void test_sockmap_invalid_update(void)
182 {
183 struct test_sockmap_invalid_update *skel;
184
185 skel = test_sockmap_invalid_update__open_and_load();
186 if (!ASSERT_NULL(skel, "open_and_load"))
187 test_sockmap_invalid_update__destroy(skel);
188 }
189
190 static void test_sockmap_copy(enum bpf_map_type map_type)
191 {
192 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
193 int err, len, src_fd, iter_fd;
194 union bpf_iter_link_info linfo = {};
195 __u32 i, num_sockets, num_elems;
196 struct bpf_iter_sockmap *skel;
197 __s64 *sock_fd = NULL;
198 struct bpf_link *link;
199 struct bpf_map *src;
200 char buf[64];
201
202 skel = bpf_iter_sockmap__open_and_load();
203 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
204 return;
205
206 if (map_type == BPF_MAP_TYPE_SOCKMAP) {
207 src = skel->maps.sockmap;
208 num_elems = bpf_map__max_entries(src);
209 num_sockets = num_elems - 1;
210 } else {
211 src = skel->maps.sockhash;
212 num_elems = bpf_map__max_entries(src) - 1;
213 num_sockets = num_elems;
214 }
215
216 sock_fd = calloc(num_sockets, sizeof(*sock_fd));
217 if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
218 goto out;
219
220 for (i = 0; i < num_sockets; i++)
221 sock_fd[i] = -1;
222
223 src_fd = bpf_map__fd(src);
224
225 for (i = 0; i < num_sockets; i++) {
226 sock_fd[i] = connected_socket_v4();
227 if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
228 goto out;
229
230 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
231 if (!ASSERT_OK(err, "map_update"))
232 goto out;
233 }
234
235 linfo.map.map_fd = src_fd;
236 opts.link_info = &linfo;
237 opts.link_info_len = sizeof(linfo);
238 link = bpf_program__attach_iter(skel->progs.copy, &opts);
239 if (!ASSERT_OK_PTR(link, "attach_iter"))
240 goto out;
241
242 iter_fd = bpf_iter_create(bpf_link__fd(link));
243 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
244 goto free_link;
245
246 /* do some tests */
247 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
248 ;
249 if (!ASSERT_GE(len, 0, "read"))
250 goto close_iter;
251
252 /* test results */
253 if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
254 goto close_iter;
255
256 if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
257 goto close_iter;
258
259 compare_cookies(src, skel->maps.dst);
260
261 close_iter:
262 close(iter_fd);
263 free_link:
264 bpf_link__destroy(link);
265 out:
266 for (i = 0; sock_fd && i < num_sockets; i++)
267 if (sock_fd[i] >= 0)
268 close(sock_fd[i]);
269 if (sock_fd)
270 free(sock_fd);
271 bpf_iter_sockmap__destroy(skel);
272 }
273
274 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
275 enum bpf_attach_type second)
276 {
277 struct test_sockmap_skb_verdict_attach *skel;
278 int err, map, verdict;
279
280 skel = test_sockmap_skb_verdict_attach__open_and_load();
281 if (!ASSERT_OK_PTR(skel, "open_and_load"))
282 return;
283
284 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
285 map = bpf_map__fd(skel->maps.sock_map);
286
287 err = bpf_prog_attach(verdict, map, first, 0);
288 if (!ASSERT_OK(err, "bpf_prog_attach"))
289 goto out;
290
291 err = bpf_prog_attach(verdict, map, second, 0);
292 ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
293
294 err = bpf_prog_detach2(verdict, map, first);
295 if (!ASSERT_OK(err, "bpf_prog_detach2"))
296 goto out;
297 out:
298 test_sockmap_skb_verdict_attach__destroy(skel);
299 }
300
301 static __u32 query_prog_id(int prog_fd)
302 {
303 struct bpf_prog_info info = {};
304 __u32 info_len = sizeof(info);
305 int err;
306
307 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
308 if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
309 !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
310 return 0;
311
312 return info.id;
313 }
314
315 static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
316 {
317 struct test_sockmap_progs_query *skel;
318 int err, map_fd, verdict_fd;
319 __u32 attach_flags = 0;
320 __u32 prog_ids[3] = {};
321 __u32 prog_cnt = 3;
322
323 skel = test_sockmap_progs_query__open_and_load();
324 if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
325 return;
326
327 map_fd = bpf_map__fd(skel->maps.sock_map);
328
329 if (attach_type == BPF_SK_MSG_VERDICT)
330 verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
331 else
332 verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
333
334 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
335 &attach_flags, prog_ids, &prog_cnt);
336 ASSERT_OK(err, "bpf_prog_query failed");
337 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
338 ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
339
340 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
341 if (!ASSERT_OK(err, "bpf_prog_attach failed"))
342 goto out;
343
344 prog_cnt = 1;
345 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
346 &attach_flags, prog_ids, &prog_cnt);
347 ASSERT_OK(err, "bpf_prog_query failed");
348 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
349 ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
350 ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
351 "wrong prog_ids on query");
352
353 bpf_prog_detach2(verdict_fd, map_fd, attach_type);
354 out:
355 test_sockmap_progs_query__destroy(skel);
356 }
357
358 #define MAX_EVENTS 10
359 static void test_sockmap_skb_verdict_shutdown(void)
360 {
361 struct epoll_event ev, events[MAX_EVENTS];
362 int n, err, map, verdict, s, c1, p1;
363 struct test_sockmap_pass_prog *skel;
364 int epollfd;
365 int zero = 0;
366 char b;
367
368 skel = test_sockmap_pass_prog__open_and_load();
369 if (!ASSERT_OK_PTR(skel, "open_and_load"))
370 return;
371
372 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
373 map = bpf_map__fd(skel->maps.sock_map_rx);
374
375 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
376 if (!ASSERT_OK(err, "bpf_prog_attach"))
377 goto out;
378
379 s = socket_loopback(AF_INET, SOCK_STREAM);
380 if (s < 0)
381 goto out;
382 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
383 if (err < 0)
384 goto out;
385
386 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
387 if (err < 0)
388 goto out_close;
389
390 shutdown(p1, SHUT_WR);
391
392 ev.events = EPOLLIN;
393 ev.data.fd = c1;
394
395 epollfd = epoll_create1(0);
396 if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
397 goto out_close;
398 err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
399 if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
400 goto out_close;
401 err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
402 if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
403 goto out_close;
404
405 n = recv(c1, &b, 1, SOCK_NONBLOCK);
406 ASSERT_EQ(n, 0, "recv_timeout(fin)");
407 out_close:
408 close(c1);
409 close(p1);
410 out:
411 test_sockmap_pass_prog__destroy(skel);
412 }
413
414 static void test_sockmap_skb_verdict_fionread(bool pass_prog)
415 {
416 int expected, zero = 0, sent, recvd, avail;
417 int err, map, verdict, s, c0, c1, p0, p1;
418 struct test_sockmap_pass_prog *pass;
419 struct test_sockmap_drop_prog *drop;
420 char buf[256] = "0123456789";
421
422 if (pass_prog) {
423 pass = test_sockmap_pass_prog__open_and_load();
424 if (!ASSERT_OK_PTR(pass, "open_and_load"))
425 return;
426 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
427 map = bpf_map__fd(pass->maps.sock_map_rx);
428 expected = sizeof(buf);
429 } else {
430 drop = test_sockmap_drop_prog__open_and_load();
431 if (!ASSERT_OK_PTR(drop, "open_and_load"))
432 return;
433 verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
434 map = bpf_map__fd(drop->maps.sock_map_rx);
435 /* On drop data is consumed immediately and copied_seq inc'd */
436 expected = 0;
437 }
438
439
440 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
441 if (!ASSERT_OK(err, "bpf_prog_attach"))
442 goto out;
443
444 s = socket_loopback(AF_INET, SOCK_STREAM);
445 if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
446 goto out;
447 err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
448 if (!ASSERT_OK(err, "create_socket_pairs(s)"))
449 goto out;
450
451 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
452 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
453 goto out_close;
454
455 sent = xsend(p1, &buf, sizeof(buf), 0);
456 ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
457 err = ioctl(c1, FIONREAD, &avail);
458 ASSERT_OK(err, "ioctl(FIONREAD) error");
459 ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
460 /* On DROP test there will be no data to read */
461 if (pass_prog) {
462 recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
463 ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
464 }
465
466 out_close:
467 close(c0);
468 close(p0);
469 close(c1);
470 close(p1);
471 out:
472 if (pass_prog)
473 test_sockmap_pass_prog__destroy(pass);
474 else
475 test_sockmap_drop_prog__destroy(drop);
476 }
477
478 static void test_sockmap_skb_verdict_peek(void)
479 {
480 int err, map, verdict, s, c1, p1, zero = 0, sent, recvd, avail;
481 struct test_sockmap_pass_prog *pass;
482 char snd[256] = "0123456789";
483 char rcv[256] = "0";
484
485 pass = test_sockmap_pass_prog__open_and_load();
486 if (!ASSERT_OK_PTR(pass, "open_and_load"))
487 return;
488 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
489 map = bpf_map__fd(pass->maps.sock_map_rx);
490
491 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
492 if (!ASSERT_OK(err, "bpf_prog_attach"))
493 goto out;
494
495 s = socket_loopback(AF_INET, SOCK_STREAM);
496 if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
497 goto out;
498
499 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
500 if (!ASSERT_OK(err, "create_pairs(s)"))
501 goto out;
502
503 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
504 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
505 goto out_close;
506
507 sent = xsend(p1, snd, sizeof(snd), 0);
508 ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
509 recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
510 ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
511 err = ioctl(c1, FIONREAD, &avail);
512 ASSERT_OK(err, "ioctl(FIONREAD) error");
513 ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
514 recvd = recv(c1, rcv, sizeof(rcv), 0);
515 ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
516 err = ioctl(c1, FIONREAD, &avail);
517 ASSERT_OK(err, "ioctl(FIONREAD) error");
518 ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
519
520 out_close:
521 close(c1);
522 close(p1);
523 out:
524 test_sockmap_pass_prog__destroy(pass);
525 }
526
527 void test_sockmap_basic(void)
528 {
529 if (test__start_subtest("sockmap create_update_free"))
530 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
531 if (test__start_subtest("sockhash create_update_free"))
532 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
533 if (test__start_subtest("sockmap sk_msg load helpers"))
534 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
535 if (test__start_subtest("sockhash sk_msg load helpers"))
536 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
537 if (test__start_subtest("sockmap update"))
538 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
539 if (test__start_subtest("sockhash update"))
540 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
541 if (test__start_subtest("sockmap update in unsafe context"))
542 test_sockmap_invalid_update();
543 if (test__start_subtest("sockmap copy"))
544 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
545 if (test__start_subtest("sockhash copy"))
546 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
547 if (test__start_subtest("sockmap skb_verdict attach")) {
548 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
549 BPF_SK_SKB_STREAM_VERDICT);
550 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
551 BPF_SK_SKB_VERDICT);
552 }
553 if (test__start_subtest("sockmap msg_verdict progs query"))
554 test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
555 if (test__start_subtest("sockmap stream_parser progs query"))
556 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
557 if (test__start_subtest("sockmap stream_verdict progs query"))
558 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
559 if (test__start_subtest("sockmap skb_verdict progs query"))
560 test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
561 if (test__start_subtest("sockmap skb_verdict shutdown"))
562 test_sockmap_skb_verdict_shutdown();
563 if (test__start_subtest("sockmap skb_verdict fionread"))
564 test_sockmap_skb_verdict_fionread(true);
565 if (test__start_subtest("sockmap skb_verdict fionread on drop"))
566 test_sockmap_skb_verdict_fionread(false);
567 if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
568 test_sockmap_skb_verdict_peek();
569 }