]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob
5af28f359cfd
[thirdparty/kernel/stable.git] /
1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/bpf.h>
3 #include <errno.h>
4 #include <linux/bitmap.h>
5 #include <linux/if_link.h>
6 #include <linux/mman.h>
7 #include <linux/netdev.h>
8 #include <poll.h>
9 #include <pthread.h>
10 #include <signal.h>
11 #include <string.h>
12 #include <sys/mman.h>
13 #include <sys/socket.h>
14 #include <sys/time.h>
15 #include <unistd.h>
16
17 #include "network_helpers.h"
18 #include "test_xsk.h"
19 #include "xsk_xdp_common.h"
20 #include "xsk_xdp_progs.skel.h"
21
22 #define DEFAULT_BATCH_SIZE 64
23 #define MIN_PKT_SIZE 64
24 #define MAX_ETH_JUMBO_SIZE 9000
25 #define MAX_INTERFACES 2
26 #define MAX_TEARDOWN_ITER 10
27 #define MAX_TX_BUDGET_DEFAULT 32
28 #define PKT_DUMP_NB_TO_PRINT 16
29 /* Just to align the data in the packet */
30 #define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2)
31 #define POLL_TMOUT 1000
32 #define THREAD_TMOUT 3
33 #define UMEM_HEADROOM_TEST_SIZE 128
34 #define XSK_DESC__INVALID_OPTION (0xffff)
35 #define XSK_UMEM__INVALID_FRAME_SIZE (MAX_ETH_JUMBO_SIZE + 1)
36 #define XSK_UMEM__LARGE_FRAME_SIZE (3 * 1024)
37 #define XSK_UMEM__MAX_FRAME_SIZE (4 * 1024)
38
39 static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
40
41 bool opt_verbose;
42 pthread_barrier_t barr;
43 pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
44
45 int pkts_in_flight;
46
47 /* The payload is a word consisting of a packet sequence number in the upper
48 * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
49 * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
50 */
51 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
52 {
53 u32 *ptr = (u32 *)dest, i;
54
55 start /= sizeof(*ptr);
56 size /= sizeof(*ptr);
57 for (i = 0; i < size; i++)
58 ptr[i] = htonl(pkt_nb << 16 | (i + start));
59 }
60
61 static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
62 {
63 memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
64 memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
65 eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
66 }
67
68 static bool is_umem_valid(struct ifobject *ifobj)
69 {
70 return !!ifobj->umem->umem;
71 }
72
73 static u32 mode_to_xdp_flags(enum test_mode mode)
74 {
75 return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
76 }
77
78 static u64 umem_size(struct xsk_umem_info *umem)
79 {
80 return umem->num_frames * umem->frame_size;
81 }
82
83 int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
84 u64 size)
85 {
86 struct xsk_umem_config cfg = {
87 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
88 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
89 .frame_size = umem->frame_size,
90 .frame_headroom = umem->frame_headroom,
91 .flags = XSK_UMEM__DEFAULT_FLAGS
92 };
93 int ret;
94
95 if (umem->fill_size)
96 cfg.fill_size = umem->fill_size;
97
98 if (umem->comp_size)
99 cfg.comp_size = umem->comp_size;
100
101 if (umem->unaligned_mode)
102 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
103
104 ret = xsk_umem__create(&umem->umem, buffer, size,
105 &umem->fq, &umem->cq, &cfg);
106 if (ret)
107 return ret;
108
109 umem->buffer = buffer;
110 if (ifobj->shared_umem && ifobj->rx_on) {
111 umem->base_addr = umem_size(umem);
112 umem->next_buffer = umem_size(umem);
113 }
114
115 return 0;
116 }
117
118 static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
119 {
120 u64 addr;
121
122 addr = umem->next_buffer;
123 umem->next_buffer += umem->frame_size;
124 if (umem->next_buffer >= umem->base_addr + umem_size(umem))
125 umem->next_buffer = umem->base_addr;
126
127 return addr;
128 }
129
130 static void umem_reset_alloc(struct xsk_umem_info *umem)
131 {
132 umem->next_buffer = 0;
133 }
134
135 static int enable_busy_poll(struct xsk_socket_info *xsk)
136 {
137 int sock_opt;
138
139 sock_opt = 1;
140 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
141 (void *)&sock_opt, sizeof(sock_opt)) < 0)
142 return -errno;
143
144 sock_opt = 20;
145 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
146 (void *)&sock_opt, sizeof(sock_opt)) < 0)
147 return -errno;
148
149 sock_opt = xsk->batch_size;
150 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
151 (void *)&sock_opt, sizeof(sock_opt)) < 0)
152 return -errno;
153
154 return 0;
155 }
156
157 int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
158 struct ifobject *ifobject, bool shared)
159 {
160 struct xsk_socket_config cfg = {};
161 struct xsk_ring_cons *rxr;
162 struct xsk_ring_prod *txr;
163
164 xsk->umem = umem;
165 cfg.rx_size = xsk->rxqsize;
166 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
167 cfg.bind_flags = ifobject->bind_flags;
168 if (shared)
169 cfg.bind_flags |= XDP_SHARED_UMEM;
170 if (ifobject->mtu > MAX_ETH_PKT_SIZE)
171 cfg.bind_flags |= XDP_USE_SG;
172 if (umem->comp_size)
173 cfg.tx_size = umem->comp_size;
174 if (umem->fill_size)
175 cfg.rx_size = umem->fill_size;
176
177 txr = ifobject->tx_on ? &xsk->tx : NULL;
178 rxr = ifobject->rx_on ? &xsk->rx : NULL;
179 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
180 }
181
182 #define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
183 static unsigned int get_max_skb_frags(void)
184 {
185 unsigned int max_skb_frags = 0;
186 FILE *file;
187
188 file = fopen(MAX_SKB_FRAGS_PATH, "r");
189 if (!file) {
190 ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH);
191 return 0;
192 }
193
194 if (fscanf(file, "%u", &max_skb_frags) != 1)
195 ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH);
196
197 fclose(file);
198 return max_skb_frags;
199 }
200
201 static int set_ring_size(struct ifobject *ifobj)
202 {
203 int ret;
204 u32 ctr = 0;
205
206 while (ctr++ < SOCK_RECONF_CTR) {
207 ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring);
208 if (!ret)
209 break;
210
211 /* Retry if it fails */
212 if (ctr >= SOCK_RECONF_CTR || errno != EBUSY)
213 return -errno;
214
215 usleep(USLEEP_MAX);
216 }
217
218 return ret;
219 }
220
221 int hw_ring_size_reset(struct ifobject *ifobj)
222 {
223 ifobj->ring.tx_pending = ifobj->set_ring.default_tx;
224 ifobj->ring.rx_pending = ifobj->set_ring.default_rx;
225 return set_ring_size(ifobj);
226 }
227
228 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
229 struct ifobject *ifobj_rx)
230 {
231 u32 i, j;
232
233 for (i = 0; i < MAX_INTERFACES; i++) {
234 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
235
236 ifobj->xsk = &ifobj->xsk_arr[0];
237 ifobj->use_poll = false;
238 ifobj->use_fill_ring = true;
239 ifobj->release_rx = true;
240 ifobj->validation_func = NULL;
241 ifobj->use_metadata = false;
242
243 if (i == 0) {
244 ifobj->rx_on = false;
245 ifobj->tx_on = true;
246 } else {
247 ifobj->rx_on = true;
248 ifobj->tx_on = false;
249 }
250
251 memset(ifobj->umem, 0, sizeof(*ifobj->umem));
252 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
253 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
254
255 for (j = 0; j < MAX_SOCKETS; j++) {
256 memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
257 ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
258 ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
259 if (i == 0)
260 ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
261 else
262 ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
263
264 memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
265 memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
266 ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
267 ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
268 }
269 }
270
271 if (ifobj_tx->hw_ring_size_supp)
272 hw_ring_size_reset(ifobj_tx);
273
274 test->ifobj_tx = ifobj_tx;
275 test->ifobj_rx = ifobj_rx;
276 test->current_step = 0;
277 test->total_steps = 1;
278 test->nb_sockets = 1;
279 test->fail = false;
280 test->set_ring = false;
281 test->adjust_tail = false;
282 test->adjust_tail_support = false;
283 test->mtu = MAX_ETH_PKT_SIZE;
284 test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
285 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
286 test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
287 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
288 }
289
290 void test_init(struct test_spec *test, struct ifobject *ifobj_tx,
291 struct ifobject *ifobj_rx, enum test_mode mode,
292 const struct test_spec *test_to_run)
293 {
294 struct pkt_stream *tx_pkt_stream;
295 struct pkt_stream *rx_pkt_stream;
296 u32 i;
297
298 tx_pkt_stream = test->tx_pkt_stream_default;
299 rx_pkt_stream = test->rx_pkt_stream_default;
300 memset(test, 0, sizeof(*test));
301 test->tx_pkt_stream_default = tx_pkt_stream;
302 test->rx_pkt_stream_default = rx_pkt_stream;
303
304 for (i = 0; i < MAX_INTERFACES; i++) {
305 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
306
307 ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
308 if (mode == TEST_MODE_ZC)
309 ifobj->bind_flags |= XDP_ZEROCOPY;
310 else
311 ifobj->bind_flags |= XDP_COPY;
312 }
313
314 memcpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
315 test->test_func = test_to_run->test_func;
316 test->mode = mode;
317 __test_spec_init(test, ifobj_tx, ifobj_rx);
318 }
319
320 static void test_spec_reset(struct test_spec *test)
321 {
322 __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
323 }
324
325 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
326 struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
327 struct bpf_map *xskmap_tx)
328 {
329 test->xdp_prog_rx = xdp_prog_rx;
330 test->xdp_prog_tx = xdp_prog_tx;
331 test->xskmap_rx = xskmap_rx;
332 test->xskmap_tx = xskmap_tx;
333 }
334
335 static int test_spec_set_mtu(struct test_spec *test, int mtu)
336 {
337 int err;
338
339 if (test->ifobj_rx->mtu != mtu) {
340 err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
341 if (err)
342 return err;
343 test->ifobj_rx->mtu = mtu;
344 }
345 if (test->ifobj_tx->mtu != mtu) {
346 err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
347 if (err)
348 return err;
349 test->ifobj_tx->mtu = mtu;
350 }
351
352 return 0;
353 }
354
355 void pkt_stream_reset(struct pkt_stream *pkt_stream)
356 {
357 if (pkt_stream) {
358 pkt_stream->current_pkt_nb = 0;
359 pkt_stream->nb_rx_pkts = 0;
360 }
361 }
362
363 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
364 {
365 if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
366 return NULL;
367
368 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
369 }
370
371 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
372 {
373 while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
374 (*pkts_sent)++;
375 if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
376 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
377 pkt_stream->current_pkt_nb++;
378 }
379 return NULL;
380 }
381
382 void pkt_stream_delete(struct pkt_stream *pkt_stream)
383 {
384 free(pkt_stream->pkts);
385 free(pkt_stream);
386 }
387
388 void pkt_stream_restore_default(struct test_spec *test)
389 {
390 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
391 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
392
393 if (tx_pkt_stream != test->tx_pkt_stream_default) {
394 pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
395 test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
396 }
397
398 if (rx_pkt_stream != test->rx_pkt_stream_default) {
399 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
400 test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
401 }
402 }
403
404 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
405 {
406 struct pkt_stream *pkt_stream;
407
408 pkt_stream = calloc(1, sizeof(*pkt_stream));
409 if (!pkt_stream)
410 return NULL;
411
412 pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
413 if (!pkt_stream->pkts) {
414 free(pkt_stream);
415 return NULL;
416 }
417
418 pkt_stream->nb_pkts = nb_pkts;
419 return pkt_stream;
420 }
421
422 static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
423 {
424 u32 nb_frags = 1, next_frag;
425
426 if (!pkt)
427 return 1;
428
429 if (!pkt_stream->verbatim) {
430 if (!pkt->valid || !pkt->len)
431 return 1;
432 return ceil_u32(pkt->len, frame_size);
433 }
434
435 /* Search for the end of the packet in verbatim mode */
436 if (!pkt_continues(pkt->options))
437 return nb_frags;
438
439 next_frag = pkt_stream->current_pkt_nb;
440 pkt++;
441 while (next_frag++ < pkt_stream->nb_pkts) {
442 nb_frags++;
443 if (!pkt_continues(pkt->options) || !pkt->valid)
444 break;
445 pkt++;
446 }
447 return nb_frags;
448 }
449
450 static bool set_pkt_valid(int offset, u32 len)
451 {
452 return len <= MAX_ETH_JUMBO_SIZE;
453 }
454
455 static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
456 {
457 pkt->offset = offset;
458 pkt->len = len;
459 pkt->valid = set_pkt_valid(offset, len);
460 }
461
462 static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
463 {
464 bool prev_pkt_valid = pkt->valid;
465
466 pkt_set(pkt_stream, pkt, offset, len);
467 pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
468 }
469
470 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
471 {
472 return ceil_u32(len, umem->frame_size) * umem->frame_size;
473 }
474
475 static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
476 {
477 struct pkt_stream *pkt_stream;
478 u32 i;
479
480 pkt_stream = __pkt_stream_alloc(nb_pkts);
481 if (!pkt_stream)
482 return NULL;
483
484 pkt_stream->nb_pkts = nb_pkts;
485 pkt_stream->max_pkt_len = pkt_len;
486 for (i = 0; i < nb_pkts; i++) {
487 struct pkt *pkt = &pkt_stream->pkts[i];
488
489 pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
490 pkt->pkt_nb = nb_start + i * nb_off;
491 }
492
493 return pkt_stream;
494 }
495
496 struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
497 {
498 return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
499 }
500
501 static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
502 {
503 return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
504 }
505
506 static int pkt_stream_replace_ifobject(struct ifobject *ifobj, u32 nb_pkts, u32 pkt_len)
507 {
508 ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
509
510 if (!ifobj->xsk->pkt_stream)
511 return -ENOMEM;
512
513 return 0;
514 }
515
516 static int pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
517 {
518 int ret;
519
520 ret = pkt_stream_replace_ifobject(test->ifobj_tx, nb_pkts, pkt_len);
521 if (ret)
522 return ret;
523
524 return pkt_stream_replace_ifobject(test->ifobj_rx, nb_pkts, pkt_len);
525 }
526
527 static int __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
528 int offset)
529 {
530 struct pkt_stream *pkt_stream;
531 u32 i;
532
533 pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
534 if (!pkt_stream)
535 return -ENOMEM;
536
537 for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
538 pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
539
540 ifobj->xsk->pkt_stream = pkt_stream;
541
542 return 0;
543 }
544
545 static int pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
546 {
547 int ret = __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
548
549 if (ret)
550 return ret;
551
552 return __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
553 }
554
555 static int pkt_stream_receive_half(struct test_spec *test)
556 {
557 struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
558 u32 i;
559
560 if (test->ifobj_rx->xsk->pkt_stream != test->rx_pkt_stream_default)
561 /* Packet stream has already been replaced so we have to release this one.
562 * The newly created one will be freed by the restore_default() at the
563 * end of the test
564 */
565 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
566
567 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
568 pkt_stream->pkts[0].len);
569 if (!test->ifobj_rx->xsk->pkt_stream)
570 return -ENOMEM;
571
572 pkt_stream = test->ifobj_rx->xsk->pkt_stream;
573 for (i = 1; i < pkt_stream->nb_pkts; i += 2)
574 pkt_stream->pkts[i].valid = false;
575
576 pkt_stream->nb_valid_entries /= 2;
577
578 return 0;
579 }
580
581 static int pkt_stream_even_odd_sequence(struct test_spec *test)
582 {
583 struct pkt_stream *pkt_stream;
584 u32 i;
585
586 for (i = 0; i < test->nb_sockets; i++) {
587 pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
588 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
589 pkt_stream->pkts[0].len, i, 2);
590 if (!pkt_stream)
591 return -ENOMEM;
592 test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
593
594 pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
595 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
596 pkt_stream->pkts[0].len, i, 2);
597 if (!pkt_stream)
598 return -ENOMEM;
599 test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
600 }
601
602 return 0;
603 }
604
605 static void release_even_odd_sequence(struct test_spec *test)
606 {
607 struct pkt_stream *later_free_tx = test->ifobj_tx->xsk->pkt_stream;
608 struct pkt_stream *later_free_rx = test->ifobj_rx->xsk->pkt_stream;
609 int i;
610
611 for (i = 0; i < test->nb_sockets; i++) {
612 /* later_free_{rx/tx} will be freed by restore_default() */
613 if (test->ifobj_tx->xsk_arr[i].pkt_stream != later_free_tx)
614 pkt_stream_delete(test->ifobj_tx->xsk_arr[i].pkt_stream);
615 if (test->ifobj_rx->xsk_arr[i].pkt_stream != later_free_rx)
616 pkt_stream_delete(test->ifobj_rx->xsk_arr[i].pkt_stream);
617 }
618
619 }
620
621 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
622 {
623 if (!pkt->valid)
624 return pkt->offset;
625 return pkt->offset + umem_alloc_buffer(umem);
626 }
627
628 static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
629 {
630 pkt_stream->current_pkt_nb--;
631 }
632
633 static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
634 u32 pkt_nb, u32 bytes_written)
635 {
636 void *data = xsk_umem__get_data(umem->buffer, addr);
637
638 if (len < MIN_PKT_SIZE)
639 return;
640
641 if (!bytes_written) {
642 gen_eth_hdr(xsk, data);
643
644 len -= PKT_HDR_SIZE;
645 data += PKT_HDR_SIZE;
646 } else {
647 bytes_written -= PKT_HDR_SIZE;
648 }
649
650 write_payload(data, pkt_nb, bytes_written, len);
651 }
652
653 static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
654 u32 nb_frames, bool verbatim)
655 {
656 u32 i, len = 0, pkt_nb = 0, payload = 0;
657 struct pkt_stream *pkt_stream;
658
659 pkt_stream = __pkt_stream_alloc(nb_frames);
660 if (!pkt_stream)
661 return NULL;
662
663 for (i = 0; i < nb_frames; i++) {
664 struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
665 struct pkt *frame = &frames[i];
666
667 pkt->offset = frame->offset;
668 if (verbatim) {
669 *pkt = *frame;
670 pkt->pkt_nb = payload;
671 if (!frame->valid || !pkt_continues(frame->options))
672 payload++;
673 } else {
674 if (frame->valid)
675 len += frame->len;
676 if (frame->valid && pkt_continues(frame->options))
677 continue;
678
679 pkt->pkt_nb = pkt_nb;
680 pkt->len = len;
681 pkt->valid = frame->valid;
682 pkt->options = 0;
683
684 len = 0;
685 }
686
687 print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
688 pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
689
690 if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
691 pkt_stream->max_pkt_len = pkt->len;
692
693 if (pkt->valid)
694 pkt_stream->nb_valid_entries++;
695
696 pkt_nb++;
697 }
698
699 pkt_stream->nb_pkts = pkt_nb;
700 pkt_stream->verbatim = verbatim;
701 return pkt_stream;
702 }
703
704 static int pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
705 {
706 struct pkt_stream *pkt_stream;
707
708 pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
709 if (!pkt_stream)
710 return -ENOMEM;
711 test->ifobj_tx->xsk->pkt_stream = pkt_stream;
712
713 pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
714 if (!pkt_stream)
715 return -ENOMEM;
716 test->ifobj_rx->xsk->pkt_stream = pkt_stream;
717
718 return 0;
719 }
720
721 static void pkt_print_data(u32 *data, u32 cnt)
722 {
723 u32 i;
724
725 for (i = 0; i < cnt; i++) {
726 u32 seqnum, pkt_nb;
727
728 seqnum = ntohl(*data) & 0xffff;
729 pkt_nb = ntohl(*data) >> 16;
730 ksft_print_msg("%u:%u ", pkt_nb, seqnum);
731 data++;
732 }
733 }
734
735 static void pkt_dump(void *pkt, u32 len, bool eth_header)
736 {
737 struct ethhdr *ethhdr = pkt;
738 u32 i, *data;
739
740 if (eth_header) {
741 /*extract L2 frame */
742 ksft_print_msg("DEBUG>> L2: dst mac: ");
743 for (i = 0; i < ETH_ALEN; i++)
744 ksft_print_msg("%02X", ethhdr->h_dest[i]);
745
746 ksft_print_msg("\nDEBUG>> L2: src mac: ");
747 for (i = 0; i < ETH_ALEN; i++)
748 ksft_print_msg("%02X", ethhdr->h_source[i]);
749
750 data = pkt + PKT_HDR_SIZE;
751 } else {
752 data = pkt;
753 }
754
755 /*extract L5 frame */
756 ksft_print_msg("\nDEBUG>> L5: seqnum: ");
757 pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
758 ksft_print_msg("....");
759 if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
760 ksft_print_msg("\n.... ");
761 pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
762 PKT_DUMP_NB_TO_PRINT);
763 }
764 ksft_print_msg("\n---------------------------------------\n");
765 }
766
767 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
768 {
769 u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
770 u32 offset = addr % umem->frame_size, expected_offset;
771 int pkt_offset = pkt->valid ? pkt->offset : 0;
772
773 if (!umem->unaligned_mode)
774 pkt_offset = 0;
775
776 expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
777
778 if (offset == expected_offset)
779 return true;
780
781 ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
782 return false;
783 }
784
785 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
786 {
787 void *data = xsk_umem__get_data(buffer, addr);
788 struct xdp_info *meta = data - sizeof(struct xdp_info);
789
790 if (meta->count != pkt->pkt_nb) {
791 ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
792 __func__, pkt->pkt_nb,
793 (unsigned long long)meta->count);
794 return false;
795 }
796
797 return true;
798 }
799
800 static int is_adjust_tail_supported(struct xsk_xdp_progs *skel_rx, bool *supported)
801 {
802 struct bpf_map *data_map;
803 int adjust_value = 0;
804 int key = 0;
805 int ret;
806
807 data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
808 if (!data_map || !bpf_map__is_internal(data_map)) {
809 ksft_print_msg("Error: could not find bss section of XDP program\n");
810 return -EINVAL;
811 }
812
813 ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &key, &adjust_value);
814 if (ret) {
815 ksft_print_msg("Error: bpf_map_lookup_elem failed with error %d\n", ret);
816 return ret;
817 }
818
819 /* Set the 'adjust_value' variable to -EOPNOTSUPP in the XDP program if the adjust_tail
820 * helper is not supported. Skip the adjust_tail test case in this scenario.
821 */
822 *supported = adjust_value != -EOPNOTSUPP;
823
824 return 0;
825 }
826
827 static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
828 u32 bytes_processed)
829 {
830 u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
831 void *data = xsk_umem__get_data(umem->buffer, addr);
832
833 addr -= umem->base_addr;
834
835 if (addr >= umem->num_frames * umem->frame_size ||
836 addr + len > umem->num_frames * umem->frame_size) {
837 ksft_print_msg("Frag invalid addr: %llx len: %u\n",
838 (unsigned long long)addr, len);
839 return false;
840 }
841 if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
842 ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
843 (unsigned long long)addr, len);
844 return false;
845 }
846
847 pkt_data = data;
848 if (!bytes_processed) {
849 pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
850 len -= PKT_HDR_SIZE;
851 } else {
852 bytes_processed -= PKT_HDR_SIZE;
853 }
854
855 expected_seqnum = bytes_processed / sizeof(*pkt_data);
856 seqnum = ntohl(*pkt_data) & 0xffff;
857 pkt_nb = ntohl(*pkt_data) >> 16;
858
859 if (expected_pkt_nb != pkt_nb) {
860 ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
861 __func__, expected_pkt_nb, pkt_nb);
862 goto error;
863 }
864 if (expected_seqnum != seqnum) {
865 ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
866 __func__, expected_seqnum, seqnum);
867 goto error;
868 }
869
870 words_to_end = len / sizeof(*pkt_data) - 1;
871 pkt_data += words_to_end;
872 seqnum = ntohl(*pkt_data) & 0xffff;
873 expected_seqnum += words_to_end;
874 if (expected_seqnum != seqnum) {
875 ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
876 __func__, expected_seqnum, seqnum);
877 goto error;
878 }
879
880 return true;
881
882 error:
883 pkt_dump(data, len, !bytes_processed);
884 return false;
885 }
886
887 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
888 {
889 if (pkt->len != len) {
890 ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
891 __func__, pkt->len, len);
892 pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
893 return false;
894 }
895
896 return true;
897 }
898
899 static u32 load_value(u32 *counter)
900 {
901 return __atomic_load_n(counter, __ATOMIC_ACQUIRE);
902 }
903
904 static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret)
905 {
906 u32 max_budget = MAX_TX_BUDGET_DEFAULT;
907 u32 cons, ready_to_send;
908 int delta;
909
910 cons = load_value(xsk->tx.consumer);
911 ready_to_send = load_value(xsk->tx.producer) - cons;
912 *ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
913
914 delta = load_value(xsk->tx.consumer) - cons;
915 /* By default, xsk should consume exact @max_budget descs at one
916 * send in this case where hitting the max budget limit in while
917 * loop is triggered in __xsk_generic_xmit(). Please make sure that
918 * the number of descs to be sent is larger than @max_budget, or
919 * else the tx.consumer will be updated in xskq_cons_peek_desc()
920 * in time which hides the issue we try to verify.
921 */
922 if (ready_to_send > max_budget && delta != max_budget)
923 return false;
924
925 return true;
926 }
927
928 int kick_tx(struct xsk_socket_info *xsk)
929 {
930 int ret;
931
932 if (xsk->check_consumer) {
933 if (!kick_tx_with_check(xsk, &ret))
934 return TEST_FAILURE;
935 } else {
936 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
937 }
938 if (ret >= 0)
939 return TEST_PASS;
940 if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
941 usleep(100);
942 return TEST_PASS;
943 }
944 return TEST_FAILURE;
945 }
946
947 int kick_rx(struct xsk_socket_info *xsk)
948 {
949 int ret;
950
951 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
952 if (ret < 0)
953 return TEST_FAILURE;
954
955 return TEST_PASS;
956 }
957
958 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
959 {
960 unsigned int rcvd;
961 u32 idx;
962 int ret;
963
964 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
965 ret = kick_tx(xsk);
966 if (ret)
967 return TEST_FAILURE;
968 }
969
970 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
971 if (rcvd) {
972 if (rcvd > xsk->outstanding_tx) {
973 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
974
975 ksft_print_msg("[%s] Too many packets completed\n", __func__);
976 ksft_print_msg("Last completion address: %llx\n",
977 (unsigned long long)addr);
978 return TEST_FAILURE;
979 }
980
981 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
982 xsk->outstanding_tx -= rcvd;
983 }
984
985 return TEST_PASS;
986 }
987
988 static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
989 {
990 u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
991 u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
992 struct pkt_stream *pkt_stream = xsk->pkt_stream;
993 struct ifobject *ifobj = test->ifobj_rx;
994 struct xsk_umem_info *umem = xsk->umem;
995 struct pollfd fds = { };
996 struct pkt *pkt;
997 u64 first_addr = 0;
998 int ret;
999
1000 fds.fd = xsk_socket__fd(xsk->xsk);
1001 fds.events = POLLIN;
1002
1003 ret = kick_rx(xsk);
1004 if (ret)
1005 return TEST_FAILURE;
1006
1007 if (ifobj->use_poll) {
1008 ret = poll(&fds, 1, POLL_TMOUT);
1009 if (ret < 0)
1010 return TEST_FAILURE;
1011
1012 if (!ret) {
1013 if (!is_umem_valid(test->ifobj_tx))
1014 return TEST_PASS;
1015
1016 ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
1017 return TEST_CONTINUE;
1018 }
1019
1020 if (!(fds.revents & POLLIN))
1021 return TEST_CONTINUE;
1022 }
1023
1024 rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
1025 if (!rcvd)
1026 return TEST_CONTINUE;
1027
1028 if (ifobj->use_fill_ring) {
1029 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1030 while (ret != rcvd) {
1031 if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
1032 ret = poll(&fds, 1, POLL_TMOUT);
1033 if (ret < 0)
1034 return TEST_FAILURE;
1035 }
1036 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1037 }
1038 }
1039
1040 while (frags_processed < rcvd) {
1041 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
1042 u64 addr = desc->addr, orig;
1043
1044 orig = xsk_umem__extract_addr(addr);
1045 addr = xsk_umem__add_offset_to_addr(addr);
1046
1047 if (!nb_frags) {
1048 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
1049 if (!pkt) {
1050 ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
1051 __func__, addr, desc->len);
1052 return TEST_FAILURE;
1053 }
1054 }
1055
1056 print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
1057 addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
1058
1059 if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
1060 !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
1061 !is_metadata_correct(pkt, umem->buffer, addr)))
1062 return TEST_FAILURE;
1063
1064 if (!nb_frags++)
1065 first_addr = addr;
1066 frags_processed++;
1067 pkt_len += desc->len;
1068 if (ifobj->use_fill_ring)
1069 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
1070
1071 if (pkt_continues(desc->options))
1072 continue;
1073
1074 /* The complete packet has been received */
1075 if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
1076 !is_offset_correct(umem, pkt, addr))
1077 return TEST_FAILURE;
1078
1079 pkt_stream->nb_rx_pkts++;
1080 nb_frags = 0;
1081 pkt_len = 0;
1082 }
1083
1084 if (nb_frags) {
1085 /* In the middle of a packet. Start over from beginning of packet. */
1086 idx_rx -= nb_frags;
1087 xsk_ring_cons__cancel(&xsk->rx, nb_frags);
1088 if (ifobj->use_fill_ring) {
1089 idx_fq -= nb_frags;
1090 xsk_ring_prod__cancel(&umem->fq, nb_frags);
1091 }
1092 frags_processed -= nb_frags;
1093 }
1094
1095 if (ifobj->use_fill_ring)
1096 xsk_ring_prod__submit(&umem->fq, frags_processed);
1097 if (ifobj->release_rx)
1098 xsk_ring_cons__release(&xsk->rx, frags_processed);
1099
1100 pthread_mutex_lock(&pacing_mutex);
1101 pkts_in_flight -= pkts_sent;
1102 pthread_mutex_unlock(&pacing_mutex);
1103 pkts_sent = 0;
1104
1105 return TEST_CONTINUE;
1106 }
1107
1108 bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
1109 unsigned long *bitmap)
1110 {
1111 struct pkt_stream *pkt_stream = xsk->pkt_stream;
1112
1113 if (!pkt_stream) {
1114 __set_bit(sock_num, bitmap);
1115 return false;
1116 }
1117
1118 if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
1119 __set_bit(sock_num, bitmap);
1120 if (bitmap_full(bitmap, test->nb_sockets))
1121 return true;
1122 }
1123
1124 return false;
1125 }
1126
1127 static int receive_pkts(struct test_spec *test)
1128 {
1129 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1130 DECLARE_BITMAP(bitmap, test->nb_sockets);
1131 struct xsk_socket_info *xsk;
1132 u32 sock_num = 0;
1133 int res, ret;
1134
1135 bitmap_zero(bitmap, test->nb_sockets);
1136
1137 ret = gettimeofday(&tv_now, NULL);
1138 if (ret)
1139 return TEST_FAILURE;
1140
1141 timeradd(&tv_now, &tv_timeout, &tv_end);
1142
1143 while (1) {
1144 xsk = &test->ifobj_rx->xsk_arr[sock_num];
1145
1146 if ((all_packets_received(test, xsk, sock_num, bitmap)))
1147 break;
1148
1149 res = __receive_pkts(test, xsk);
1150 if (!(res == TEST_PASS || res == TEST_CONTINUE))
1151 return res;
1152
1153 ret = gettimeofday(&tv_now, NULL);
1154 if (ret)
1155 return TEST_FAILURE;
1156
1157 if (timercmp(&tv_now, &tv_end, >)) {
1158 ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
1159 return TEST_FAILURE;
1160 }
1161 sock_num = (sock_num + 1) % test->nb_sockets;
1162 }
1163
1164 return TEST_PASS;
1165 }
1166
1167 static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
1168 {
1169 u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
1170 struct pkt_stream *pkt_stream = xsk->pkt_stream;
1171 struct xsk_umem_info *umem = ifobject->umem;
1172 bool use_poll = ifobject->use_poll;
1173 struct pollfd fds = { };
1174 int ret;
1175
1176 buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
1177 /* pkts_in_flight might be negative if many invalid packets are sent */
1178 if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
1179 buffer_len)) {
1180 ret = kick_tx(xsk);
1181 if (ret)
1182 return TEST_FAILURE;
1183 return TEST_CONTINUE;
1184 }
1185
1186 fds.fd = xsk_socket__fd(xsk->xsk);
1187 fds.events = POLLOUT;
1188
1189 while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
1190 if (use_poll) {
1191 ret = poll(&fds, 1, POLL_TMOUT);
1192 if (timeout) {
1193 if (ret < 0) {
1194 ksft_print_msg("ERROR: [%s] Poll error %d\n",
1195 __func__, errno);
1196 return TEST_FAILURE;
1197 }
1198 if (ret == 0)
1199 return TEST_PASS;
1200 break;
1201 }
1202 if (ret <= 0) {
1203 ksft_print_msg("ERROR: [%s] Poll error %d\n",
1204 __func__, errno);
1205 return TEST_FAILURE;
1206 }
1207 }
1208
1209 complete_pkts(xsk, xsk->batch_size);
1210 }
1211
1212 for (i = 0; i < xsk->batch_size; i++) {
1213 struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1214 u32 nb_frags_left, nb_frags, bytes_written = 0;
1215
1216 if (!pkt)
1217 break;
1218
1219 nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
1220 if (nb_frags > xsk->batch_size - i) {
1221 pkt_stream_cancel(pkt_stream);
1222 xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
1223 break;
1224 }
1225 nb_frags_left = nb_frags;
1226
1227 while (nb_frags_left--) {
1228 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1229
1230 tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
1231 if (pkt_stream->verbatim) {
1232 tx_desc->len = pkt->len;
1233 tx_desc->options = pkt->options;
1234 } else if (nb_frags_left) {
1235 tx_desc->len = umem->frame_size;
1236 tx_desc->options = XDP_PKT_CONTD;
1237 } else {
1238 tx_desc->len = pkt->len - bytes_written;
1239 tx_desc->options = 0;
1240 }
1241 if (pkt->valid)
1242 pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
1243 bytes_written);
1244 bytes_written += tx_desc->len;
1245
1246 print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
1247 tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
1248
1249 if (nb_frags_left) {
1250 i++;
1251 if (pkt_stream->verbatim)
1252 pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1253 }
1254 }
1255
1256 if (pkt && pkt->valid) {
1257 valid_pkts++;
1258 valid_frags += nb_frags;
1259 }
1260 }
1261
1262 pthread_mutex_lock(&pacing_mutex);
1263 pkts_in_flight += valid_pkts;
1264 pthread_mutex_unlock(&pacing_mutex);
1265
1266 xsk_ring_prod__submit(&xsk->tx, i);
1267 xsk->outstanding_tx += valid_frags;
1268
1269 if (use_poll) {
1270 ret = poll(&fds, 1, POLL_TMOUT);
1271 if (ret <= 0) {
1272 if (ret == 0 && timeout)
1273 return TEST_PASS;
1274
1275 ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1276 return TEST_FAILURE;
1277 }
1278 }
1279
1280 if (!timeout) {
1281 if (complete_pkts(xsk, i))
1282 return TEST_FAILURE;
1283
1284 usleep(10);
1285 return TEST_PASS;
1286 }
1287
1288 return TEST_CONTINUE;
1289 }
1290
1291 static int wait_for_tx_completion(struct xsk_socket_info *xsk)
1292 {
1293 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
1294 int ret;
1295
1296 ret = gettimeofday(&tv_now, NULL);
1297 if (ret)
1298 return TEST_FAILURE;
1299 timeradd(&tv_now, &tv_timeout, &tv_end);
1300
1301 while (xsk->outstanding_tx) {
1302 ret = gettimeofday(&tv_now, NULL);
1303 if (ret)
1304 return TEST_FAILURE;
1305 if (timercmp(&tv_now, &tv_end, >)) {
1306 ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
1307 return TEST_FAILURE;
1308 }
1309
1310 complete_pkts(xsk, xsk->batch_size);
1311 }
1312
1313 return TEST_PASS;
1314 }
1315
1316 bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
1317 {
1318 return bitmap_full(bitmap, test->nb_sockets);
1319 }
1320
1321 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1322 {
1323 bool timeout = !is_umem_valid(test->ifobj_rx);
1324 DECLARE_BITMAP(bitmap, test->nb_sockets);
1325 u32 i, ret;
1326
1327 bitmap_zero(bitmap, test->nb_sockets);
1328
1329 while (!(all_packets_sent(test, bitmap))) {
1330 for (i = 0; i < test->nb_sockets; i++) {
1331 struct pkt_stream *pkt_stream;
1332
1333 pkt_stream = ifobject->xsk_arr[i].pkt_stream;
1334 if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
1335 __set_bit(i, bitmap);
1336 continue;
1337 }
1338 ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
1339 if (ret == TEST_CONTINUE && !test->fail)
1340 continue;
1341
1342 if ((ret || test->fail) && !timeout)
1343 return TEST_FAILURE;
1344
1345 if (ret == TEST_PASS && timeout)
1346 return ret;
1347
1348 ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
1349 if (ret)
1350 return TEST_FAILURE;
1351 }
1352 }
1353
1354 return TEST_PASS;
1355 }
1356
1357 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1358 {
1359 int fd = xsk_socket__fd(xsk), err;
1360 socklen_t optlen, expected_len;
1361
1362 optlen = sizeof(*stats);
1363 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1364 if (err) {
1365 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1366 __func__, -err, strerror(-err));
1367 return TEST_FAILURE;
1368 }
1369
1370 expected_len = sizeof(struct xdp_statistics);
1371 if (optlen != expected_len) {
1372 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1373 __func__, expected_len, optlen);
1374 return TEST_FAILURE;
1375 }
1376
1377 return TEST_PASS;
1378 }
1379
1380 static int validate_rx_dropped(struct ifobject *ifobject)
1381 {
1382 struct xsk_socket *xsk = ifobject->xsk->xsk;
1383 struct xdp_statistics stats;
1384 int err;
1385
1386 err = kick_rx(ifobject->xsk);
1387 if (err)
1388 return TEST_FAILURE;
1389
1390 err = get_xsk_stats(xsk, &stats);
1391 if (err)
1392 return TEST_FAILURE;
1393
1394 /* The receiver calls getsockopt after receiving the last (valid)
1395 * packet which is not the final packet sent in this test (valid and
1396 * invalid packets are sent in alternating fashion with the final
1397 * packet being invalid). Since the last packet may or may not have
1398 * been dropped already, both outcomes must be allowed.
1399 */
1400 if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
1401 stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
1402 return TEST_PASS;
1403
1404 return TEST_FAILURE;
1405 }
1406
1407 static int validate_rx_full(struct ifobject *ifobject)
1408 {
1409 struct xsk_socket *xsk = ifobject->xsk->xsk;
1410 struct xdp_statistics stats;
1411 int err;
1412
1413 usleep(1000);
1414 err = kick_rx(ifobject->xsk);
1415 if (err)
1416 return TEST_FAILURE;
1417
1418 err = get_xsk_stats(xsk, &stats);
1419 if (err)
1420 return TEST_FAILURE;
1421
1422 if (stats.rx_ring_full)
1423 return TEST_PASS;
1424
1425 return TEST_FAILURE;
1426 }
1427
1428 static int validate_fill_empty(struct ifobject *ifobject)
1429 {
1430 struct xsk_socket *xsk = ifobject->xsk->xsk;
1431 struct xdp_statistics stats;
1432 int err;
1433
1434 usleep(1000);
1435 err = kick_rx(ifobject->xsk);
1436 if (err)
1437 return TEST_FAILURE;
1438
1439 err = get_xsk_stats(xsk, &stats);
1440 if (err)
1441 return TEST_FAILURE;
1442
1443 if (stats.rx_fill_ring_empty_descs)
1444 return TEST_PASS;
1445
1446 return TEST_FAILURE;
1447 }
1448
1449 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1450 {
1451 struct xsk_socket *xsk = ifobject->xsk->xsk;
1452 int fd = xsk_socket__fd(xsk);
1453 struct xdp_statistics stats;
1454 socklen_t optlen;
1455 int err;
1456
1457 optlen = sizeof(stats);
1458 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1459 if (err) {
1460 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1461 __func__, -err, strerror(-err));
1462 return TEST_FAILURE;
1463 }
1464
1465 if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
1466 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
1467 __func__,
1468 (unsigned long long)stats.tx_invalid_descs,
1469 ifobject->xsk->pkt_stream->nb_pkts);
1470 return TEST_FAILURE;
1471 }
1472
1473 return TEST_PASS;
1474 }
1475
1476 static int xsk_configure(struct test_spec *test, struct ifobject *ifobject,
1477 struct xsk_umem_info *umem, bool tx)
1478 {
1479 int i, ret;
1480
1481 for (i = 0; i < test->nb_sockets; i++) {
1482 bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1483 u32 ctr = 0;
1484
1485 while (ctr++ < SOCK_RECONF_CTR) {
1486 ret = xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1487 ifobject, shared);
1488 if (!ret)
1489 break;
1490
1491 /* Retry if it fails as xsk_socket__create() is asynchronous */
1492 if (ctr >= SOCK_RECONF_CTR)
1493 return ret;
1494 usleep(USLEEP_MAX);
1495 }
1496 if (ifobject->busy_poll) {
1497 ret = enable_busy_poll(&ifobject->xsk_arr[i]);
1498 if (ret)
1499 return ret;
1500 }
1501 }
1502
1503 return 0;
1504 }
1505
1506 static int thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1507 {
1508 int ret = xsk_configure(test, ifobject, test->ifobj_rx->umem, true);
1509
1510 if (ret)
1511 return ret;
1512 ifobject->xsk = &ifobject->xsk_arr[0];
1513 ifobject->xskmap = test->ifobj_rx->xskmap;
1514 memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1515 ifobject->umem->base_addr = 0;
1516
1517 return 0;
1518 }
1519
1520 static int xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
1521 bool fill_up)
1522 {
1523 u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
1524 u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
1525 int ret;
1526
1527 if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1528 buffers_to_fill = umem->num_frames;
1529 else
1530 buffers_to_fill = umem->fill_size;
1531
1532 ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1533 if (ret != buffers_to_fill)
1534 return -ENOSPC;
1535
1536 while (filled < buffers_to_fill) {
1537 struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
1538 u64 addr;
1539 u32 i;
1540
1541 for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
1542 if (!pkt) {
1543 if (!fill_up)
1544 break;
1545 addr = filled * umem->frame_size + umem->base_addr;
1546 } else if (pkt->offset >= 0) {
1547 addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
1548 } else {
1549 addr = pkt->offset + umem_alloc_buffer(umem);
1550 }
1551
1552 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1553 if (++filled >= buffers_to_fill)
1554 break;
1555 }
1556 }
1557 xsk_ring_prod__submit(&umem->fq, filled);
1558 xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
1559
1560 pkt_stream_reset(pkt_stream);
1561 umem_reset_alloc(umem);
1562
1563 return 0;
1564 }
1565
1566 static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1567 {
1568 LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1569 int mmap_flags;
1570 u64 umem_sz;
1571 void *bufs;
1572 int ret;
1573 u32 i;
1574
1575 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1576 mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1577
1578 if (ifobject->umem->unaligned_mode)
1579 mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
1580
1581 if (ifobject->shared_umem)
1582 umem_sz *= 2;
1583
1584 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1585 if (bufs == MAP_FAILED)
1586 return -errno;
1587
1588 ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
1589 if (ret)
1590 return ret;
1591
1592 ret = xsk_configure(test, ifobject, ifobject->umem, false);
1593 if (ret)
1594 return ret;
1595
1596 ifobject->xsk = &ifobject->xsk_arr[0];
1597
1598 if (!ifobject->rx_on)
1599 return 0;
1600
1601 ret = xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream,
1602 ifobject->use_fill_ring);
1603 if (ret)
1604 return ret;
1605
1606 for (i = 0; i < test->nb_sockets; i++) {
1607 ifobject->xsk = &ifobject->xsk_arr[i];
1608 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
1609 if (ret)
1610 return ret;
1611 }
1612
1613 return 0;
1614 }
1615
1616 void *worker_testapp_validate_tx(void *arg)
1617 {
1618 struct test_spec *test = (struct test_spec *)arg;
1619 struct ifobject *ifobject = test->ifobj_tx;
1620 int err;
1621
1622 if (test->current_step == 1) {
1623 if (!ifobject->shared_umem) {
1624 if (thread_common_ops(test, ifobject)) {
1625 test->fail = true;
1626 pthread_exit(NULL);
1627 }
1628 } else {
1629 if (thread_common_ops_tx(test, ifobject)) {
1630 test->fail = true;
1631 pthread_exit(NULL);
1632 }
1633 }
1634 }
1635
1636 err = send_pkts(test, ifobject);
1637
1638 if (!err && ifobject->validation_func)
1639 err = ifobject->validation_func(ifobject);
1640 if (err)
1641 test->fail = true;
1642
1643 pthread_exit(NULL);
1644 }
1645
1646 void *worker_testapp_validate_rx(void *arg)
1647 {
1648 struct test_spec *test = (struct test_spec *)arg;
1649 struct ifobject *ifobject = test->ifobj_rx;
1650 int err;
1651
1652 if (test->current_step == 1) {
1653 err = thread_common_ops(test, ifobject);
1654 } else {
1655 xsk_clear_xskmap(ifobject->xskmap);
1656 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
1657 if (err)
1658 ksft_print_msg("Error: Failed to update xskmap, error %s\n",
1659 strerror(-err));
1660 }
1661
1662 pthread_barrier_wait(&barr);
1663
1664 /* We leave only now in case of error to avoid getting stuck in the barrier */
1665 if (err) {
1666 test->fail = true;
1667 pthread_exit(NULL);
1668 }
1669
1670 err = receive_pkts(test);
1671
1672 if (!err && ifobject->validation_func)
1673 err = ifobject->validation_func(ifobject);
1674
1675 if (err) {
1676 if (!test->adjust_tail) {
1677 test->fail = true;
1678 } else {
1679 bool supported;
1680
1681 if (is_adjust_tail_supported(ifobject->xdp_progs, &supported))
1682 test->fail = true;
1683 else if (!supported)
1684 test->adjust_tail_support = false;
1685 else
1686 test->fail = true;
1687 }
1688 }
1689
1690 pthread_exit(NULL);
1691 }
1692
1693 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1694 {
1695 u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1696
1697 if (ifobj->shared_umem)
1698 umem_sz *= 2;
1699
1700 umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
1701 xsk_umem__delete(ifobj->umem->umem);
1702 munmap(ifobj->umem->buffer, umem_sz);
1703 }
1704
1705 static void handler(int signum)
1706 {
1707 pthread_exit(NULL);
1708 }
1709
1710 static bool xdp_prog_changed_rx(struct test_spec *test)
1711 {
1712 struct ifobject *ifobj = test->ifobj_rx;
1713
1714 return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
1715 }
1716
1717 static bool xdp_prog_changed_tx(struct test_spec *test)
1718 {
1719 struct ifobject *ifobj = test->ifobj_tx;
1720
1721 return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
1722 }
1723
1724 static int xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
1725 struct bpf_map *xskmap, enum test_mode mode)
1726 {
1727 int err;
1728
1729 xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
1730 err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
1731 if (err) {
1732 ksft_print_msg("Error attaching XDP program\n");
1733 return err;
1734 }
1735
1736 if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
1737 if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
1738 ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
1739 return -EINVAL;
1740 }
1741
1742 ifobj->xdp_prog = xdp_prog;
1743 ifobj->xskmap = xskmap;
1744 ifobj->mode = mode;
1745
1746 return 0;
1747 }
1748
1749 static int xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
1750 struct ifobject *ifobj_tx)
1751 {
1752 int err = 0;
1753
1754 if (xdp_prog_changed_rx(test)) {
1755 err = xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
1756 if (err)
1757 return err;
1758 }
1759
1760 if (!ifobj_tx || ifobj_tx->shared_umem)
1761 return 0;
1762
1763 if (xdp_prog_changed_tx(test))
1764 err = xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
1765
1766 return err;
1767 }
1768
1769 static void clean_sockets(struct test_spec *test, struct ifobject *ifobj)
1770 {
1771 u32 i;
1772
1773 if (!ifobj || !test)
1774 return;
1775
1776 for (i = 0; i < test->nb_sockets; i++)
1777 xsk_socket__delete(ifobj->xsk_arr[i].xsk);
1778 }
1779
1780 static void clean_umem(struct test_spec *test, struct ifobject *ifobj1, struct ifobject *ifobj2)
1781 {
1782 if (!ifobj1)
1783 return;
1784
1785 testapp_clean_xsk_umem(ifobj1);
1786 if (ifobj2 && !ifobj2->shared_umem)
1787 testapp_clean_xsk_umem(ifobj2);
1788 }
1789
1790 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
1791 struct ifobject *ifobj2)
1792 {
1793 pthread_t t0, t1;
1794 int err;
1795
1796 if (test->mtu > MAX_ETH_PKT_SIZE) {
1797 if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
1798 (ifobj2 && !ifobj2->multi_buff_zc_supp))) {
1799 ksft_print_msg("Multi buffer for zero-copy not supported.\n");
1800 return TEST_SKIP;
1801 }
1802 if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
1803 (ifobj2 && !ifobj2->multi_buff_supp))) {
1804 ksft_print_msg("Multi buffer not supported.\n");
1805 return TEST_SKIP;
1806 }
1807 }
1808 err = test_spec_set_mtu(test, test->mtu);
1809 if (err) {
1810 ksft_print_msg("Error, could not set mtu.\n");
1811 return TEST_FAILURE;
1812 }
1813
1814 if (ifobj2) {
1815 if (pthread_barrier_init(&barr, NULL, 2))
1816 return TEST_FAILURE;
1817 pkt_stream_reset(ifobj2->xsk->pkt_stream);
1818 }
1819
1820 test->current_step++;
1821 pkt_stream_reset(ifobj1->xsk->pkt_stream);
1822 pkts_in_flight = 0;
1823
1824 signal(SIGUSR1, handler);
1825 /*Spawn RX thread */
1826 pthread_create(&t0, NULL, ifobj1->func_ptr, test);
1827
1828 if (ifobj2) {
1829 pthread_barrier_wait(&barr);
1830 if (pthread_barrier_destroy(&barr)) {
1831 pthread_kill(t0, SIGUSR1);
1832 clean_sockets(test, ifobj1);
1833 clean_umem(test, ifobj1, NULL);
1834 return TEST_FAILURE;
1835 }
1836
1837 /*Spawn TX thread */
1838 pthread_create(&t1, NULL, ifobj2->func_ptr, test);
1839
1840 pthread_join(t1, NULL);
1841 }
1842
1843 if (!ifobj2)
1844 pthread_kill(t0, SIGUSR1);
1845 else
1846 pthread_join(t0, NULL);
1847
1848 if (test->total_steps == test->current_step || test->fail) {
1849 clean_sockets(test, ifobj1);
1850 clean_sockets(test, ifobj2);
1851 clean_umem(test, ifobj1, ifobj2);
1852 }
1853
1854 if (test->fail)
1855 return TEST_FAILURE;
1856
1857 return TEST_PASS;
1858 }
1859
1860 static int testapp_validate_traffic(struct test_spec *test)
1861 {
1862 struct ifobject *ifobj_rx = test->ifobj_rx;
1863 struct ifobject *ifobj_tx = test->ifobj_tx;
1864
1865 if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
1866 (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
1867 ksft_print_msg("No huge pages present.\n");
1868 return TEST_SKIP;
1869 }
1870
1871 if (test->set_ring) {
1872 if (ifobj_tx->hw_ring_size_supp) {
1873 if (set_ring_size(ifobj_tx)) {
1874 ksft_print_msg("Failed to change HW ring size.\n");
1875 return TEST_FAILURE;
1876 }
1877 } else {
1878 ksft_print_msg("Changing HW ring size not supported.\n");
1879 return TEST_SKIP;
1880 }
1881 }
1882
1883 if (xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx))
1884 return TEST_FAILURE;
1885 return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
1886 }
1887
1888 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
1889 {
1890 return __testapp_validate_traffic(test, ifobj, NULL);
1891 }
1892
1893 int testapp_teardown(struct test_spec *test)
1894 {
1895 int i;
1896
1897 for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1898 if (testapp_validate_traffic(test))
1899 return TEST_FAILURE;
1900 test_spec_reset(test);
1901 }
1902
1903 return TEST_PASS;
1904 }
1905
1906 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1907 {
1908 thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1909 struct ifobject *tmp_ifobj = (*ifobj1);
1910
1911 (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1912 (*ifobj2)->func_ptr = tmp_func_ptr;
1913
1914 *ifobj1 = *ifobj2;
1915 *ifobj2 = tmp_ifobj;
1916 }
1917
1918 int testapp_bidirectional(struct test_spec *test)
1919 {
1920 int res;
1921
1922 test->ifobj_tx->rx_on = true;
1923 test->ifobj_rx->tx_on = true;
1924 test->total_steps = 2;
1925 if (testapp_validate_traffic(test))
1926 return TEST_FAILURE;
1927
1928 print_verbose("Switching Tx/Rx direction\n");
1929 swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1930 res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
1931
1932 swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1933 return res;
1934 }
1935
1936 static int swap_xsk_resources(struct test_spec *test)
1937 {
1938 int ret;
1939
1940 test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
1941 test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
1942 test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
1943 test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
1944 test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
1945 test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
1946
1947 ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
1948 if (ret)
1949 return TEST_FAILURE;
1950
1951 return TEST_PASS;
1952 }
1953
1954 int testapp_xdp_prog_cleanup(struct test_spec *test)
1955 {
1956 test->total_steps = 2;
1957 test->nb_sockets = 2;
1958 if (testapp_validate_traffic(test))
1959 return TEST_FAILURE;
1960
1961 if (swap_xsk_resources(test)) {
1962 clean_sockets(test, test->ifobj_rx);
1963 clean_sockets(test, test->ifobj_tx);
1964 clean_umem(test, test->ifobj_rx, test->ifobj_tx);
1965 return TEST_FAILURE;
1966 }
1967
1968 return testapp_validate_traffic(test);
1969 }
1970
1971 int testapp_headroom(struct test_spec *test)
1972 {
1973 test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
1974 return testapp_validate_traffic(test);
1975 }
1976
1977 int testapp_stats_rx_dropped(struct test_spec *test)
1978 {
1979 if (test->mode == TEST_MODE_ZC) {
1980 ksft_print_msg("Can not run RX_DROPPED test for ZC mode\n");
1981 return TEST_SKIP;
1982 }
1983
1984 if (pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0))
1985 return TEST_FAILURE;
1986 test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
1987 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
1988 if (pkt_stream_receive_half(test))
1989 return TEST_FAILURE;
1990 test->ifobj_rx->validation_func = validate_rx_dropped;
1991 return testapp_validate_traffic(test);
1992 }
1993
1994 int testapp_stats_tx_invalid_descs(struct test_spec *test)
1995 {
1996 if (pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0))
1997 return TEST_FAILURE;
1998 test->ifobj_tx->validation_func = validate_tx_invalid_descs;
1999 return testapp_validate_traffic(test);
2000 }
2001
2002 int testapp_stats_rx_full(struct test_spec *test)
2003 {
2004 if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
2005 return TEST_FAILURE;
2006 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2007
2008 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
2009 test->ifobj_rx->release_rx = false;
2010 test->ifobj_rx->validation_func = validate_rx_full;
2011 return testapp_validate_traffic(test);
2012 }
2013
2014 int testapp_stats_fill_empty(struct test_spec *test)
2015 {
2016 if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
2017 return TEST_FAILURE;
2018 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
2019
2020 test->ifobj_rx->use_fill_ring = false;
2021 test->ifobj_rx->validation_func = validate_fill_empty;
2022 return testapp_validate_traffic(test);
2023 }
2024
2025 int testapp_send_receive_unaligned(struct test_spec *test)
2026 {
2027 test->ifobj_tx->umem->unaligned_mode = true;
2028 test->ifobj_rx->umem->unaligned_mode = true;
2029 /* Let half of the packets straddle a 4K buffer boundary */
2030 if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
2031 return TEST_FAILURE;
2032
2033 return testapp_validate_traffic(test);
2034 }
2035
2036 int testapp_send_receive_unaligned_mb(struct test_spec *test)
2037 {
2038 test->mtu = MAX_ETH_JUMBO_SIZE;
2039 test->ifobj_tx->umem->unaligned_mode = true;
2040 test->ifobj_rx->umem->unaligned_mode = true;
2041 if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
2042 return TEST_FAILURE;
2043 return testapp_validate_traffic(test);
2044 }
2045
2046 int testapp_single_pkt(struct test_spec *test)
2047 {
2048 struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
2049
2050 if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2051 return TEST_FAILURE;
2052 return testapp_validate_traffic(test);
2053 }
2054
2055 int testapp_send_receive_mb(struct test_spec *test)
2056 {
2057 test->mtu = MAX_ETH_JUMBO_SIZE;
2058 if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
2059 return TEST_FAILURE;
2060
2061 return testapp_validate_traffic(test);
2062 }
2063
2064 int testapp_invalid_desc_mb(struct test_spec *test)
2065 {
2066 struct xsk_umem_info *umem = test->ifobj_tx->umem;
2067 u64 umem_size = umem->num_frames * umem->frame_size;
2068 struct pkt pkts[] = {
2069 /* Valid packet for synch to start with */
2070 {0, MIN_PKT_SIZE, 0, true, 0},
2071 /* Zero frame len is not legal */
2072 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2073 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2074 {0, 0, 0, false, 0},
2075 /* Invalid address in the second frame */
2076 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2077 {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2078 /* Invalid len in the middle */
2079 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2080 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2081 /* Invalid options in the middle */
2082 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2083 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
2084 /* Transmit 2 frags, receive 3 */
2085 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
2086 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
2087 /* Middle frame crosses chunk boundary with small length */
2088 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
2089 {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
2090 /* Valid packet for synch so that something is received */
2091 {0, MIN_PKT_SIZE, 0, true, 0}};
2092
2093 if (umem->unaligned_mode) {
2094 /* Crossing a chunk boundary allowed */
2095 pkts[12].valid = true;
2096 pkts[13].valid = true;
2097 }
2098
2099 test->mtu = MAX_ETH_JUMBO_SIZE;
2100 if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2101 return TEST_FAILURE;
2102 return testapp_validate_traffic(test);
2103 }
2104
2105 int testapp_invalid_desc(struct test_spec *test)
2106 {
2107 struct xsk_umem_info *umem = test->ifobj_tx->umem;
2108 u64 umem_size = umem->num_frames * umem->frame_size;
2109 struct pkt pkts[] = {
2110 /* Zero packet address allowed */
2111 {0, MIN_PKT_SIZE, 0, true},
2112 /* Allowed packet */
2113 {0, MIN_PKT_SIZE, 0, true},
2114 /* Straddling the start of umem */
2115 {-2, MIN_PKT_SIZE, 0, false},
2116 /* Packet too large */
2117 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
2118 /* Up to end of umem allowed */
2119 {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
2120 /* After umem ends */
2121 {umem_size, MIN_PKT_SIZE, 0, false},
2122 /* Straddle the end of umem */
2123 {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2124 /* Straddle a 4K boundary */
2125 {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
2126 /* Straddle a 2K boundary */
2127 {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
2128 /* Valid packet for synch so that something is received */
2129 {0, MIN_PKT_SIZE, 0, true}};
2130
2131 if (umem->unaligned_mode) {
2132 /* Crossing a page boundary allowed */
2133 pkts[7].valid = true;
2134 }
2135 if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
2136 /* Crossing a 2K frame size boundary not allowed */
2137 pkts[8].valid = false;
2138 }
2139
2140 if (test->ifobj_tx->shared_umem) {
2141 pkts[4].offset += umem_size;
2142 pkts[5].offset += umem_size;
2143 pkts[6].offset += umem_size;
2144 }
2145
2146 if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
2147 return TEST_FAILURE;
2148 return testapp_validate_traffic(test);
2149 }
2150
2151 int testapp_xdp_drop(struct test_spec *test)
2152 {
2153 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2154 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2155
2156 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
2157 skel_rx->maps.xsk, skel_tx->maps.xsk);
2158
2159 if (pkt_stream_receive_half(test))
2160 return TEST_FAILURE;
2161 return testapp_validate_traffic(test);
2162 }
2163
2164 int testapp_xdp_metadata_copy(struct test_spec *test)
2165 {
2166 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2167 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2168
2169 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
2170 skel_tx->progs.xsk_xdp_populate_metadata,
2171 skel_rx->maps.xsk, skel_tx->maps.xsk);
2172 test->ifobj_rx->use_metadata = true;
2173
2174 skel_rx->bss->count = 0;
2175
2176 return testapp_validate_traffic(test);
2177 }
2178
2179 int testapp_xdp_shared_umem(struct test_spec *test)
2180 {
2181 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2182 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2183 int ret;
2184
2185 test->total_steps = 1;
2186 test->nb_sockets = 2;
2187
2188 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
2189 skel_tx->progs.xsk_xdp_shared_umem,
2190 skel_rx->maps.xsk, skel_tx->maps.xsk);
2191
2192 if (pkt_stream_even_odd_sequence(test))
2193 return TEST_FAILURE;
2194
2195 ret = testapp_validate_traffic(test);
2196
2197 release_even_odd_sequence(test);
2198
2199 return ret;
2200 }
2201
2202 int testapp_poll_txq_tmout(struct test_spec *test)
2203 {
2204 test->ifobj_tx->use_poll = true;
2205 /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
2206 test->ifobj_tx->umem->frame_size = 2048;
2207 if (pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048))
2208 return TEST_FAILURE;
2209 return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
2210 }
2211
2212 int testapp_poll_rxq_tmout(struct test_spec *test)
2213 {
2214 test->ifobj_rx->use_poll = true;
2215 return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
2216 }
2217
2218 int testapp_too_many_frags(struct test_spec *test)
2219 {
2220 struct pkt *pkts;
2221 u32 max_frags, i;
2222 int ret = TEST_FAILURE;
2223
2224 if (test->mode == TEST_MODE_ZC) {
2225 max_frags = test->ifobj_tx->xdp_zc_max_segs;
2226 } else {
2227 max_frags = get_max_skb_frags();
2228 if (!max_frags) {
2229 ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n");
2230 max_frags = 17;
2231 }
2232 max_frags += 1;
2233 }
2234
2235 pkts = calloc(2 * max_frags + 2, sizeof(struct pkt));
2236 if (!pkts)
2237 return TEST_FAILURE;
2238
2239 test->mtu = MAX_ETH_JUMBO_SIZE;
2240
2241 /* Valid packet for synch */
2242 pkts[0].len = MIN_PKT_SIZE;
2243 pkts[0].valid = true;
2244
2245 /* One valid packet with the max amount of frags */
2246 for (i = 1; i < max_frags + 1; i++) {
2247 pkts[i].len = MIN_PKT_SIZE;
2248 pkts[i].options = XDP_PKT_CONTD;
2249 pkts[i].valid = true;
2250 }
2251 pkts[max_frags].options = 0;
2252
2253 /* An invalid packet with the max amount of frags but signals packet
2254 * continues on the last frag
2255 */
2256 for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
2257 pkts[i].len = MIN_PKT_SIZE;
2258 pkts[i].options = XDP_PKT_CONTD;
2259 pkts[i].valid = false;
2260 }
2261
2262 /* Valid packet for synch */
2263 pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
2264 pkts[2 * max_frags + 1].valid = true;
2265
2266 if (pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2)) {
2267 free(pkts);
2268 return TEST_FAILURE;
2269 }
2270
2271 ret = testapp_validate_traffic(test);
2272 free(pkts);
2273 return ret;
2274 }
2275
2276 static int xsk_load_xdp_programs(struct ifobject *ifobj)
2277 {
2278 ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
2279 if (libbpf_get_error(ifobj->xdp_progs))
2280 return libbpf_get_error(ifobj->xdp_progs);
2281
2282 return 0;
2283 }
2284
2285 /* Simple test */
2286 static bool hugepages_present(void)
2287 {
2288 size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
2289 void *bufs;
2290
2291 bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
2292 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
2293 if (bufs == MAP_FAILED)
2294 return false;
2295
2296 mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
2297 munmap(bufs, mmap_sz);
2298 return true;
2299 }
2300
2301 int init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
2302 {
2303 LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
2304 int err;
2305
2306 ifobj->func_ptr = func_ptr;
2307
2308 err = xsk_load_xdp_programs(ifobj);
2309 if (err) {
2310 ksft_print_msg("Error loading XDP program\n");
2311 return err;
2312 }
2313
2314 if (hugepages_present())
2315 ifobj->unaligned_supp = true;
2316
2317 err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
2318 if (err) {
2319 ksft_print_msg("Error querying XDP capabilities\n");
2320 return err;
2321 }
2322 if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
2323 ifobj->multi_buff_supp = true;
2324 if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
2325 if (query_opts.xdp_zc_max_segs > 1) {
2326 ifobj->multi_buff_zc_supp = true;
2327 ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
2328 } else {
2329 ifobj->xdp_zc_max_segs = 0;
2330 }
2331 }
2332
2333 return 0;
2334 }
2335
2336 int testapp_send_receive(struct test_spec *test)
2337 {
2338 return testapp_validate_traffic(test);
2339 }
2340
2341 int testapp_send_receive_2k_frame(struct test_spec *test)
2342 {
2343 test->ifobj_tx->umem->frame_size = 2048;
2344 test->ifobj_rx->umem->frame_size = 2048;
2345 if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE))
2346 return TEST_FAILURE;
2347 return testapp_validate_traffic(test);
2348 }
2349
2350 int testapp_poll_rx(struct test_spec *test)
2351 {
2352 test->ifobj_rx->use_poll = true;
2353 return testapp_validate_traffic(test);
2354 }
2355
2356 int testapp_poll_tx(struct test_spec *test)
2357 {
2358 test->ifobj_tx->use_poll = true;
2359 return testapp_validate_traffic(test);
2360 }
2361
2362 int testapp_aligned_inv_desc(struct test_spec *test)
2363 {
2364 return testapp_invalid_desc(test);
2365 }
2366
2367 int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
2368 {
2369 test->ifobj_tx->umem->frame_size = 2048;
2370 test->ifobj_rx->umem->frame_size = 2048;
2371 return testapp_invalid_desc(test);
2372 }
2373
2374 int testapp_unaligned_inv_desc(struct test_spec *test)
2375 {
2376 test->ifobj_tx->umem->unaligned_mode = true;
2377 test->ifobj_rx->umem->unaligned_mode = true;
2378 return testapp_invalid_desc(test);
2379 }
2380
2381 int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
2382 {
2383 u64 page_size, umem_size;
2384
2385 /* Odd frame size so the UMEM doesn't end near a page boundary. */
2386 test->ifobj_tx->umem->frame_size = 4001;
2387 test->ifobj_rx->umem->frame_size = 4001;
2388 test->ifobj_tx->umem->unaligned_mode = true;
2389 test->ifobj_rx->umem->unaligned_mode = true;
2390 /* This test exists to test descriptors that staddle the end of
2391 * the UMEM but not a page.
2392 */
2393 page_size = sysconf(_SC_PAGESIZE);
2394 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
2395 assert(umem_size % page_size > MIN_PKT_SIZE);
2396 assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
2397
2398 return testapp_invalid_desc(test);
2399 }
2400
2401 int testapp_aligned_inv_desc_mb(struct test_spec *test)
2402 {
2403 return testapp_invalid_desc_mb(test);
2404 }
2405
2406 int testapp_unaligned_inv_desc_mb(struct test_spec *test)
2407 {
2408 test->ifobj_tx->umem->unaligned_mode = true;
2409 test->ifobj_rx->umem->unaligned_mode = true;
2410 return testapp_invalid_desc_mb(test);
2411 }
2412
2413 int testapp_xdp_metadata(struct test_spec *test)
2414 {
2415 return testapp_xdp_metadata_copy(test);
2416 }
2417
2418 int testapp_xdp_metadata_mb(struct test_spec *test)
2419 {
2420 test->mtu = MAX_ETH_JUMBO_SIZE;
2421 return testapp_xdp_metadata_copy(test);
2422 }
2423
2424 int testapp_hw_sw_min_ring_size(struct test_spec *test)
2425 {
2426 int ret;
2427
2428 test->set_ring = true;
2429 test->total_steps = 2;
2430 test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE;
2431 test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2;
2432 test->ifobj_tx->xsk->batch_size = 1;
2433 test->ifobj_rx->xsk->batch_size = 1;
2434 ret = testapp_validate_traffic(test);
2435 if (ret)
2436 return ret;
2437
2438 /* Set batch size to hw_ring_size - 1 */
2439 test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2440 test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
2441 return testapp_validate_traffic(test);
2442 }
2443
2444 int testapp_hw_sw_max_ring_size(struct test_spec *test)
2445 {
2446 u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
2447 int ret;
2448
2449 test->set_ring = true;
2450 test->total_steps = 2;
2451 test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
2452 test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
2453 test->ifobj_rx->umem->num_frames = max_descs;
2454 test->ifobj_rx->umem->fill_size = max_descs;
2455 test->ifobj_rx->umem->comp_size = max_descs;
2456 test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2457 test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
2458
2459 ret = testapp_validate_traffic(test);
2460 if (ret)
2461 return ret;
2462
2463 /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
2464 * updating the Rx HW tail register.
2465 */
2466 test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2467 test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
2468 if (pkt_stream_replace(test, max_descs, MIN_PKT_SIZE)) {
2469 clean_sockets(test, test->ifobj_tx);
2470 clean_sockets(test, test->ifobj_rx);
2471 clean_umem(test, test->ifobj_rx, test->ifobj_tx);
2472 return TEST_FAILURE;
2473 }
2474
2475 return testapp_validate_traffic(test);
2476 }
2477
2478 static int testapp_xdp_adjust_tail(struct test_spec *test, int adjust_value)
2479 {
2480 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
2481 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
2482
2483 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_adjust_tail,
2484 skel_tx->progs.xsk_xdp_adjust_tail,
2485 skel_rx->maps.xsk, skel_tx->maps.xsk);
2486
2487 skel_rx->bss->adjust_value = adjust_value;
2488
2489 return testapp_validate_traffic(test);
2490 }
2491
2492 static int testapp_adjust_tail(struct test_spec *test, u32 value, u32 pkt_len)
2493 {
2494 int ret;
2495
2496 test->adjust_tail_support = true;
2497 test->adjust_tail = true;
2498 test->total_steps = 1;
2499
2500 ret = pkt_stream_replace_ifobject(test->ifobj_tx, DEFAULT_BATCH_SIZE, pkt_len);
2501 if (ret)
2502 return TEST_FAILURE;
2503
2504 ret = pkt_stream_replace_ifobject(test->ifobj_rx, DEFAULT_BATCH_SIZE, pkt_len + value);
2505 if (ret)
2506 return TEST_FAILURE;
2507
2508 ret = testapp_xdp_adjust_tail(test, value);
2509 if (ret)
2510 return ret;
2511
2512 if (!test->adjust_tail_support) {
2513 ksft_print_msg("%s %sResize pkt with bpf_xdp_adjust_tail() not supported\n",
2514 mode_string(test), busy_poll_string(test));
2515 return TEST_SKIP;
2516 }
2517
2518 return 0;
2519 }
2520
2521 int testapp_adjust_tail_shrink(struct test_spec *test)
2522 {
2523 /* Shrink by 4 bytes for testing purpose */
2524 return testapp_adjust_tail(test, -4, MIN_PKT_SIZE * 2);
2525 }
2526
2527 int testapp_adjust_tail_shrink_mb(struct test_spec *test)
2528 {
2529 test->mtu = MAX_ETH_JUMBO_SIZE;
2530 /* Shrink by the frag size */
2531 return testapp_adjust_tail(test, -XSK_UMEM__MAX_FRAME_SIZE, XSK_UMEM__LARGE_FRAME_SIZE * 2);
2532 }
2533
2534 int testapp_adjust_tail_grow(struct test_spec *test)
2535 {
2536 /* Grow by 4 bytes for testing purpose */
2537 return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2);
2538 }
2539
2540 int testapp_adjust_tail_grow_mb(struct test_spec *test)
2541 {
2542 test->mtu = MAX_ETH_JUMBO_SIZE;
2543 /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */
2544 return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1,
2545 XSK_UMEM__LARGE_FRAME_SIZE * 2);
2546 }
2547
2548 int testapp_tx_queue_consumer(struct test_spec *test)
2549 {
2550 int nr_packets;
2551
2552 if (test->mode == TEST_MODE_ZC) {
2553 ksft_print_msg("Can not run TX_QUEUE_CONSUMER test for ZC mode\n");
2554 return TEST_SKIP;
2555 }
2556
2557 nr_packets = MAX_TX_BUDGET_DEFAULT + 1;
2558 if (pkt_stream_replace(test, nr_packets, MIN_PKT_SIZE))
2559 return TEST_FAILURE;
2560 test->ifobj_tx->xsk->batch_size = nr_packets;
2561 test->ifobj_tx->xsk->check_consumer = true;
2562
2563 return testapp_validate_traffic(test);
2564 }
2565
2566 struct ifobject *ifobject_create(void)
2567 {
2568 struct ifobject *ifobj;
2569
2570 ifobj = calloc(1, sizeof(struct ifobject));
2571 if (!ifobj)
2572 return NULL;
2573
2574 ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
2575 if (!ifobj->xsk_arr)
2576 goto out_xsk_arr;
2577
2578 ifobj->umem = calloc(1, sizeof(*ifobj->umem));
2579 if (!ifobj->umem)
2580 goto out_umem;
2581
2582 return ifobj;
2583
2584 out_umem:
2585 free(ifobj->xsk_arr);
2586 out_xsk_arr:
2587 free(ifobj);
2588 return NULL;
2589 }
2590
2591 void ifobject_delete(struct ifobject *ifobj)
2592 {
2593 free(ifobj->umem);
2594 free(ifobj->xsk_arr);
2595 free(ifobj);
2596 }