2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool/helpers.h>
40 #include <net/inet_ecn.h>
44 #include <net/xdp_sock_drv.h>
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/ipsec_rxtx.h"
55 #include "en_accel/ktls_txrx.h"
57 #include "en/xsk/rx.h"
58 #include "en/health.h"
59 #include "en/params.h"
61 #include "en/devlink.h"
63 static struct sk_buff
*
64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
,
65 struct mlx5_cqe64
*cqe
, u16 cqe_bcnt
, u32 head_offset
,
67 static struct sk_buff
*
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
,
69 struct mlx5_cqe64
*cqe
, u16 cqe_bcnt
, u32 head_offset
,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
75 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic
= {
76 .handle_rx_cqe
= mlx5e_handle_rx_cqe
,
77 .handle_rx_cqe_mpwqe
= mlx5e_handle_rx_cqe_mpwrq
,
78 .handle_rx_cqe_mpwqe_shampo
= mlx5e_handle_rx_cqe_mpwrq_shampo
,
81 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq
*wq
,
84 u32 ci
= mlx5_cqwq_ctr2ix(wq
, cqcc
);
86 memcpy(data
, mlx5_cqwq_get_wqe(wq
, ci
), sizeof(struct mlx5_cqe64
));
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq
*rq
,
90 struct mlx5_cqe64
*cqe
)
92 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
93 struct mlx5_cqe64
*title
= &cqd
->title
;
95 memcpy(title
, cqe
, sizeof(struct mlx5_cqe64
));
97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX
, &rq
->state
)))
100 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
101 cqd
->wqe_counter
= mpwrq_get_cqe_stride_index(title
) +
102 mpwrq_get_cqe_consumed_strides(title
);
105 mlx5_wq_cyc_ctr2ix(&rq
->wqe
.wq
, be16_to_cpu(title
->wqe_counter
) + 1);
108 static inline void mlx5e_read_title_slot(struct mlx5e_rq
*rq
,
109 struct mlx5_cqwq
*wq
,
112 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
113 struct mlx5_cqe64
*title
= &cqd
->title
;
115 mlx5e_read_cqe_slot(wq
, cqcc
, title
);
116 cqd
->left
= be32_to_cpu(title
->byte_cnt
);
117 cqd
->wqe_counter
= be16_to_cpu(title
->wqe_counter
);
118 rq
->stats
->cqe_compress_blks
++;
121 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq
*wq
,
122 struct mlx5e_cq_decomp
*cqd
,
125 mlx5e_read_cqe_slot(wq
, cqcc
, cqd
->mini_arr
);
126 cqd
->mini_arr_idx
= 0;
129 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq
*wq
, int n
)
132 u8 op_own
= mlx5_cqwq_get_ctr_wrap_cnt(wq
, cqcc
) & 1;
133 u32 ci
= mlx5_cqwq_ctr2ix(wq
, cqcc
);
134 u32 wq_sz
= mlx5_cqwq_get_size(wq
);
135 u32 ci_top
= min_t(u32
, wq_sz
, ci
+ n
);
137 for (; ci
< ci_top
; ci
++, n
--) {
138 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(wq
, ci
);
140 cqe
->op_own
= op_own
;
143 if (unlikely(ci
== wq_sz
)) {
145 for (ci
= 0; ci
< n
; ci
++) {
146 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(wq
, ci
);
148 cqe
->op_own
= op_own
;
153 static inline void mlx5e_decompress_cqe(struct mlx5e_rq
*rq
,
154 struct mlx5_cqwq
*wq
,
157 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
158 struct mlx5_mini_cqe8
*mini_cqe
= &cqd
->mini_arr
[cqd
->mini_arr_idx
];
159 struct mlx5_cqe64
*title
= &cqd
->title
;
161 title
->byte_cnt
= mini_cqe
->byte_cnt
;
162 title
->check_sum
= mini_cqe
->checksum
;
163 title
->op_own
&= 0xf0;
164 title
->op_own
|= 0x01 & (cqcc
>> wq
->fbc
.log_sz
);
166 /* state bit set implies linked-list striding RQ wq type and
167 * HW stride index capability supported
169 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX
, &rq
->state
)) {
170 title
->wqe_counter
= mini_cqe
->stridx
;
174 /* HW stride index capability not supported */
175 title
->wqe_counter
= cpu_to_be16(cqd
->wqe_counter
);
176 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
177 cqd
->wqe_counter
+= mpwrq_get_cqe_consumed_strides(title
);
180 mlx5_wq_cyc_ctr2ix(&rq
->wqe
.wq
, cqd
->wqe_counter
+ 1);
183 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq
*rq
,
184 struct mlx5_cqwq
*wq
,
187 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
189 mlx5e_decompress_cqe(rq
, wq
, cqcc
);
190 cqd
->title
.rss_hash_type
= 0;
191 cqd
->title
.rss_hash_result
= 0;
194 static u32
mlx5e_decompress_enhanced_cqe(struct mlx5e_rq
*rq
,
195 struct mlx5_cqwq
*wq
,
196 struct mlx5_cqe64
*cqe
,
199 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
203 left
= get_cqe_enhanced_num_mini_cqes(cqe
);
204 /* Here we avoid breaking the cqe compression session in the middle
205 * in case budget is not sufficient to handle all of it. In this case
206 * we return work_done == budget_rem to give 'busy' napi indication.
208 if (unlikely(left
> budget_rem
))
212 cqd
->mini_arr_idx
= 0;
213 memcpy(cqd
->mini_arr
, cqe
, sizeof(struct mlx5_cqe64
));
214 for (i
= 0; i
< left
; i
++, cqd
->mini_arr_idx
++, cqcc
++) {
215 mlx5e_decompress_cqe_no_hash(rq
, wq
, cqcc
);
216 INDIRECT_CALL_3(rq
->handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq
,
217 mlx5e_handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq_shampo
,
221 rq
->stats
->cqe_compress_pkts
+= left
;
226 static inline u32
mlx5e_decompress_cqes_cont(struct mlx5e_rq
*rq
,
227 struct mlx5_cqwq
*wq
,
228 int update_owner_only
,
231 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
232 u32 cqcc
= wq
->cc
+ update_owner_only
;
236 cqe_count
= min_t(u32
, cqd
->left
, budget_rem
);
238 for (i
= update_owner_only
; i
< cqe_count
;
239 i
++, cqd
->mini_arr_idx
++, cqcc
++) {
240 if (cqd
->mini_arr_idx
== MLX5_MINI_CQE_ARRAY_SIZE
)
241 mlx5e_read_mini_arr_slot(wq
, cqd
, cqcc
);
243 mlx5e_decompress_cqe_no_hash(rq
, wq
, cqcc
);
244 INDIRECT_CALL_3(rq
->handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq
,
245 mlx5e_handle_rx_cqe_mpwrq_shampo
, mlx5e_handle_rx_cqe
,
248 mlx5e_cqes_update_owner(wq
, cqcc
- wq
->cc
);
250 cqd
->left
-= cqe_count
;
251 rq
->stats
->cqe_compress_pkts
+= cqe_count
;
256 static inline u32
mlx5e_decompress_cqes_start(struct mlx5e_rq
*rq
,
257 struct mlx5_cqwq
*wq
,
260 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
263 mlx5e_read_title_slot(rq
, wq
, cc
);
264 mlx5e_read_mini_arr_slot(wq
, cqd
, cc
+ 1);
265 mlx5e_decompress_cqe(rq
, wq
, cc
);
266 INDIRECT_CALL_3(rq
->handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq
,
267 mlx5e_handle_rx_cqe_mpwrq_shampo
, mlx5e_handle_rx_cqe
,
271 return mlx5e_decompress_cqes_cont(rq
, wq
, 1, budget_rem
);
274 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
276 static int mlx5e_page_alloc_fragmented(struct mlx5e_rq
*rq
,
277 struct mlx5e_frag_page
*frag_page
)
281 page
= page_pool_dev_alloc_pages(rq
->page_pool
);
285 page_pool_fragment_page(page
, MLX5E_PAGECNT_BIAS_MAX
);
287 *frag_page
= (struct mlx5e_frag_page
) {
295 static void mlx5e_page_release_fragmented(struct mlx5e_rq
*rq
,
296 struct mlx5e_frag_page
*frag_page
)
298 u16 drain_count
= MLX5E_PAGECNT_BIAS_MAX
- frag_page
->frags
;
299 struct page
*page
= frag_page
->page
;
301 if (page_pool_defrag_page(page
, drain_count
) == 0)
302 page_pool_put_defragged_page(rq
->page_pool
, page
, -1, true);
305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq
*rq
,
306 struct mlx5e_wqe_frag_info
*frag
)
311 /* On first frag (offset == 0), replenish page.
312 * Other frags that point to the same page (with a different
313 * offset) should just use the new one without replenishing again
316 err
= mlx5e_page_alloc_fragmented(rq
, frag
->frag_page
);
321 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info
*frag
)
323 #define CAN_RELEASE_MASK \
324 (BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
326 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
328 return (frag
->flags
& CAN_RELEASE_MASK
) == CAN_RELEASE_VALUE
;
331 static inline void mlx5e_put_rx_frag(struct mlx5e_rq
*rq
,
332 struct mlx5e_wqe_frag_info
*frag
)
334 if (mlx5e_frag_can_release(frag
))
335 mlx5e_page_release_fragmented(rq
, frag
->frag_page
);
338 static inline struct mlx5e_wqe_frag_info
*get_frag(struct mlx5e_rq
*rq
, u16 ix
)
340 return &rq
->wqe
.frags
[ix
<< rq
->wqe
.info
.log_num_frags
];
343 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe_cyc
*wqe
,
346 struct mlx5e_wqe_frag_info
*frag
= get_frag(rq
, ix
);
350 for (i
= 0; i
< rq
->wqe
.info
.num_frags
; i
++, frag
++) {
354 err
= mlx5e_get_rx_frag(rq
, frag
);
358 frag
->flags
&= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE
);
360 headroom
= i
== 0 ? rq
->buff
.headroom
: 0;
361 addr
= page_pool_get_dma_addr(frag
->frag_page
->page
);
362 wqe
->data
[i
].addr
= cpu_to_be64(addr
+ frag
->offset
+ headroom
);
369 mlx5e_put_rx_frag(rq
, --frag
);
374 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq
*rq
,
375 struct mlx5e_wqe_frag_info
*wi
)
379 for (i
= 0; i
< rq
->wqe
.info
.num_frags
; i
++, wi
++)
380 mlx5e_put_rx_frag(rq
, wi
);
383 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info
*wi
)
385 if (!(wi
->flags
& BIT(MLX5E_WQE_FRAG_SKIP_RELEASE
)))
386 xsk_buff_free(*wi
->xskp
);
389 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq
*rq
, u16 ix
)
391 struct mlx5e_wqe_frag_info
*wi
= get_frag(rq
, ix
);
394 mlx5e_xsk_free_rx_wqe(wi
);
396 mlx5e_free_rx_wqe(rq
, wi
);
398 /* Avoid a second release of the wqe pages: dealloc is called
399 * for the same missing wqes on regular RQ flush and on regular
400 * RQ close. This happens when XSK RQs come into play.
402 for (int i
= 0; i
< rq
->wqe
.info
.num_frags
; i
++, wi
++)
403 wi
->flags
|= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE
);
407 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq
*rq
, u16 ix
, int wqe_bulk
)
409 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
412 for (i
= 0; i
< wqe_bulk
; i
++) {
413 int j
= mlx5_wq_cyc_ctr2ix(wq
, ix
+ i
);
414 struct mlx5e_wqe_frag_info
*wi
;
416 wi
= get_frag(rq
, j
);
417 /* The page is always put into the Reuse Ring, because there
418 * is no way to return the page to the userspace when the
419 * interface goes down.
421 mlx5e_xsk_free_rx_wqe(wi
);
425 static void mlx5e_free_rx_wqes(struct mlx5e_rq
*rq
, u16 ix
, int wqe_bulk
)
427 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
430 for (i
= 0; i
< wqe_bulk
; i
++) {
431 int j
= mlx5_wq_cyc_ctr2ix(wq
, ix
+ i
);
432 struct mlx5e_wqe_frag_info
*wi
;
434 wi
= get_frag(rq
, j
);
435 mlx5e_free_rx_wqe(rq
, wi
);
439 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq
*rq
, u16 ix
, int wqe_bulk
)
441 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
444 for (i
= 0; i
< wqe_bulk
; i
++) {
445 int j
= mlx5_wq_cyc_ctr2ix(wq
, ix
+ i
);
446 struct mlx5e_rx_wqe_cyc
*wqe
;
448 wqe
= mlx5_wq_cyc_get_wqe(wq
, j
);
450 if (unlikely(mlx5e_alloc_rx_wqe(rq
, wqe
, j
)))
457 static int mlx5e_refill_rx_wqes(struct mlx5e_rq
*rq
, u16 ix
, int wqe_bulk
)
459 int remaining
= wqe_bulk
;
464 /* The WQE bulk is split into smaller bulks that are sized
465 * according to the page pool cache refill size to avoid overflowing
466 * the page pool cache due to too many page releases at once.
469 refill
= min_t(u16
, rq
->wqe
.info
.refill_unit
, remaining
);
471 mlx5e_free_rx_wqes(rq
, ix
+ total_alloc
, refill
);
472 refill_alloc
= mlx5e_alloc_rx_wqes(rq
, ix
+ total_alloc
, refill
);
473 if (unlikely(refill_alloc
!= refill
))
476 total_alloc
+= refill_alloc
;
483 mlx5e_free_rx_wqes(rq
, ix
, total_alloc
+ refill_alloc
);
485 for (int i
= 0; i
< total_alloc
+ refill
; i
++) {
486 int j
= mlx5_wq_cyc_ctr2ix(&rq
->wqe
.wq
, ix
+ i
);
487 struct mlx5e_wqe_frag_info
*frag
;
489 frag
= get_frag(rq
, j
);
490 for (int k
= 0; k
< rq
->wqe
.info
.num_frags
; k
++, frag
++)
491 frag
->flags
|= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE
);
498 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq
*rq
, struct skb_shared_info
*sinfo
,
499 struct xdp_buff
*xdp
, struct mlx5e_frag_page
*frag_page
,
500 u32 frag_offset
, u32 len
)
504 dma_addr_t addr
= page_pool_get_dma_addr(frag_page
->page
);
506 dma_sync_single_for_cpu(rq
->pdev
, addr
+ frag_offset
, len
, rq
->buff
.map_dir
);
507 if (!xdp_buff_has_frags(xdp
)) {
508 /* Init on the first fragment to avoid cold cache access
512 sinfo
->xdp_frags_size
= 0;
513 xdp_buff_set_frags_flag(xdp
);
516 frag
= &sinfo
->frags
[sinfo
->nr_frags
++];
517 skb_frag_fill_page_desc(frag
, frag_page
->page
, frag_offset
, len
);
519 if (page_is_pfmemalloc(frag_page
->page
))
520 xdp_buff_set_frag_pfmemalloc(xdp
);
521 sinfo
->xdp_frags_size
+= len
;
525 mlx5e_add_skb_frag(struct mlx5e_rq
*rq
, struct sk_buff
*skb
,
526 struct page
*page
, u32 frag_offset
, u32 len
,
527 unsigned int truesize
)
529 dma_addr_t addr
= page_pool_get_dma_addr(page
);
531 dma_sync_single_for_cpu(rq
->pdev
, addr
+ frag_offset
, len
,
533 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
534 page
, frag_offset
, len
, truesize
);
538 mlx5e_copy_skb_header(struct mlx5e_rq
*rq
, struct sk_buff
*skb
,
539 struct page
*page
, dma_addr_t addr
,
540 int offset_from
, int dma_offset
, u32 headlen
)
542 const void *from
= page_address(page
) + offset_from
;
543 /* Aligning len to sizeof(long) optimizes memcpy performance */
544 unsigned int len
= ALIGN(headlen
, sizeof(long));
546 dma_sync_single_for_cpu(rq
->pdev
, addr
+ dma_offset
, len
,
548 skb_copy_to_linear_data(skb
, from
, len
);
552 mlx5e_free_rx_mpwqe(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
)
557 /* A common case for AF_XDP. */
558 if (bitmap_full(wi
->skip_release_bitmap
, rq
->mpwqe
.pages_per_wqe
))
561 no_xdp_xmit
= bitmap_empty(wi
->skip_release_bitmap
, rq
->mpwqe
.pages_per_wqe
);
564 struct xdp_buff
**xsk_buffs
= wi
->alloc_units
.xsk_buffs
;
566 /* The page is always put into the Reuse Ring, because there
567 * is no way to return the page to userspace when the interface
570 for (i
= 0; i
< rq
->mpwqe
.pages_per_wqe
; i
++)
571 if (no_xdp_xmit
|| !test_bit(i
, wi
->skip_release_bitmap
))
572 xsk_buff_free(xsk_buffs
[i
]);
574 for (i
= 0; i
< rq
->mpwqe
.pages_per_wqe
; i
++) {
575 if (no_xdp_xmit
|| !test_bit(i
, wi
->skip_release_bitmap
)) {
576 struct mlx5e_frag_page
*frag_page
;
578 frag_page
= &wi
->alloc_units
.frag_pages
[i
];
579 mlx5e_page_release_fragmented(rq
, frag_page
);
585 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq
*rq
, u8 n
)
587 struct mlx5_wq_ll
*wq
= &rq
->mpwqe
.wq
;
590 u16 next_wqe_index
= mlx5_wq_ll_get_wqe_next_ix(wq
, wq
->head
);
592 mlx5_wq_ll_push(wq
, next_wqe_index
);
595 /* ensure wqes are visible to device before updating doorbell record */
598 mlx5_wq_ll_update_db_record(wq
);
601 /* This function returns the size of the continuous free space inside a bitmap
602 * that starts from first and no longer than len including circular ones.
604 static int bitmap_find_window(unsigned long *bitmap
, int len
,
605 int bitmap_size
, int first
)
609 next_one
= find_next_bit(bitmap
, bitmap_size
, first
);
610 if (next_one
== bitmap_size
) {
611 if (bitmap_size
- first
>= len
)
613 next_one
= find_next_bit(bitmap
, bitmap_size
, 0);
614 count
= next_one
+ bitmap_size
- first
;
616 count
= next_one
- first
;
619 return min(len
, count
);
622 static void build_klm_umr(struct mlx5e_icosq
*sq
, struct mlx5e_umr_wqe
*umr_wqe
,
623 __be32 key
, u16 offset
, u16 klm_len
, u16 wqe_bbs
)
625 memset(umr_wqe
, 0, offsetof(struct mlx5e_umr_wqe
, inline_klms
));
626 umr_wqe
->ctrl
.opmod_idx_opcode
=
627 cpu_to_be32((sq
->pc
<< MLX5_WQE_CTRL_WQE_INDEX_SHIFT
) |
629 umr_wqe
->ctrl
.umr_mkey
= key
;
630 umr_wqe
->ctrl
.qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
)
631 | MLX5E_KLM_UMR_DS_CNT(klm_len
));
632 umr_wqe
->uctrl
.flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
| MLX5_UMR_INLINE
;
633 umr_wqe
->uctrl
.xlt_offset
= cpu_to_be16(offset
);
634 umr_wqe
->uctrl
.xlt_octowords
= cpu_to_be16(klm_len
);
635 umr_wqe
->uctrl
.mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
638 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq
*rq
,
639 struct mlx5e_icosq
*sq
,
640 u16 klm_entries
, u16 index
)
642 struct mlx5e_shampo_hd
*shampo
= rq
->mpwqe
.shampo
;
643 u16 entries
, pi
, header_offset
, err
, wqe_bbs
, new_entries
;
644 u32 lkey
= rq
->mdev
->mlx5e_res
.hw_objs
.mkey
;
645 u16 page_index
= shampo
->curr_page_index
;
646 struct mlx5e_frag_page
*frag_page
;
647 u64 addr
= shampo
->last_addr
;
648 struct mlx5e_dma_info
*dma_info
;
649 struct mlx5e_umr_wqe
*umr_wqe
;
652 headroom
= rq
->buff
.headroom
;
653 new_entries
= klm_entries
- (shampo
->pi
& (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT
- 1));
654 entries
= ALIGN(klm_entries
, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT
);
655 wqe_bbs
= MLX5E_KLM_UMR_WQEBBS(entries
);
656 pi
= mlx5e_icosq_get_next_pi(sq
, wqe_bbs
);
657 umr_wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, pi
);
658 build_klm_umr(sq
, umr_wqe
, shampo
->key
, index
, entries
, wqe_bbs
);
660 frag_page
= &shampo
->pages
[page_index
];
662 for (i
= 0; i
< entries
; i
++, index
++) {
663 dma_info
= &shampo
->info
[index
];
664 if (i
>= klm_entries
|| (index
< shampo
->pi
&& shampo
->pi
- index
<
665 MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT
))
667 header_offset
= (index
& (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE
- 1)) <<
668 MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE
;
669 if (!(header_offset
& (PAGE_SIZE
- 1))) {
670 page_index
= (page_index
+ 1) & (shampo
->hd_per_wq
- 1);
671 frag_page
= &shampo
->pages
[page_index
];
673 err
= mlx5e_page_alloc_fragmented(rq
, frag_page
);
677 addr
= page_pool_get_dma_addr(frag_page
->page
);
679 dma_info
->addr
= addr
;
680 dma_info
->frag_page
= frag_page
;
682 dma_info
->addr
= addr
+ header_offset
;
683 dma_info
->frag_page
= frag_page
;
687 umr_wqe
->inline_klms
[i
].bcount
=
688 cpu_to_be32(MLX5E_RX_MAX_HEAD
);
689 umr_wqe
->inline_klms
[i
].key
= cpu_to_be32(lkey
);
690 umr_wqe
->inline_klms
[i
].va
=
691 cpu_to_be64(dma_info
->addr
+ headroom
);
694 sq
->db
.wqe_info
[pi
] = (struct mlx5e_icosq_wqe_info
) {
695 .wqe_type
= MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR
,
696 .num_wqebbs
= wqe_bbs
,
697 .shampo
.len
= new_entries
,
700 shampo
->pi
= (shampo
->pi
+ new_entries
) & (shampo
->hd_per_wq
- 1);
701 shampo
->curr_page_index
= page_index
;
702 shampo
->last_addr
= addr
;
704 sq
->doorbell_cseg
= &umr_wqe
->ctrl
;
710 dma_info
= &shampo
->info
[--index
];
711 if (!(i
& (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE
- 1))) {
712 dma_info
->addr
= ALIGN_DOWN(dma_info
->addr
, PAGE_SIZE
);
713 mlx5e_page_release_fragmented(rq
, dma_info
->frag_page
);
716 rq
->stats
->buff_alloc_err
++;
720 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq
*rq
)
722 struct mlx5e_shampo_hd
*shampo
= rq
->mpwqe
.shampo
;
723 u16 klm_entries
, num_wqe
, index
, entries_before
;
724 struct mlx5e_icosq
*sq
= rq
->icosq
;
725 int i
, err
, max_klm_entries
, len
;
727 max_klm_entries
= MLX5E_MAX_KLM_PER_WQE(rq
->mdev
);
728 klm_entries
= bitmap_find_window(shampo
->bitmap
,
730 shampo
->hd_per_wq
, shampo
->pi
);
734 klm_entries
+= (shampo
->pi
& (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT
- 1));
735 index
= ALIGN_DOWN(shampo
->pi
, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT
);
736 entries_before
= shampo
->hd_per_wq
- index
;
738 if (unlikely(entries_before
< klm_entries
))
739 num_wqe
= DIV_ROUND_UP(entries_before
, max_klm_entries
) +
740 DIV_ROUND_UP(klm_entries
- entries_before
, max_klm_entries
);
742 num_wqe
= DIV_ROUND_UP(klm_entries
, max_klm_entries
);
744 for (i
= 0; i
< num_wqe
; i
++) {
745 len
= (klm_entries
> max_klm_entries
) ? max_klm_entries
:
747 if (unlikely(index
+ len
> shampo
->hd_per_wq
))
748 len
= shampo
->hd_per_wq
- index
;
749 err
= mlx5e_build_shampo_hd_umr(rq
, sq
, len
, index
);
752 index
= (index
+ len
) & (rq
->mpwqe
.shampo
->hd_per_wq
- 1);
759 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq
*rq
, u16 ix
)
761 struct mlx5e_mpw_info
*wi
= mlx5e_get_mpw_info(rq
, ix
);
762 struct mlx5e_icosq
*sq
= rq
->icosq
;
763 struct mlx5e_frag_page
*frag_page
;
764 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
765 struct mlx5e_umr_wqe
*umr_wqe
;
766 u32 offset
; /* 17-bit value with MTT. */
771 if (test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
)) {
772 err
= mlx5e_alloc_rx_hd_mpwqe(rq
);
777 pi
= mlx5e_icosq_get_next_pi(sq
, rq
->mpwqe
.umr_wqebbs
);
778 umr_wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
779 memcpy(umr_wqe
, &rq
->mpwqe
.umr_wqe
, sizeof(struct mlx5e_umr_wqe
));
781 frag_page
= &wi
->alloc_units
.frag_pages
[0];
783 for (i
= 0; i
< rq
->mpwqe
.pages_per_wqe
; i
++, frag_page
++) {
786 err
= mlx5e_page_alloc_fragmented(rq
, frag_page
);
789 addr
= page_pool_get_dma_addr(frag_page
->page
);
790 umr_wqe
->inline_mtts
[i
] = (struct mlx5_mtt
) {
791 .ptag
= cpu_to_be64(addr
| MLX5_EN_WR
),
795 /* Pad if needed, in case the value set to ucseg->xlt_octowords
796 * in mlx5e_build_umr_wqe() needed alignment.
798 if (rq
->mpwqe
.pages_per_wqe
& (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT
- 1)) {
799 int pad
= ALIGN(rq
->mpwqe
.pages_per_wqe
, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT
) -
800 rq
->mpwqe
.pages_per_wqe
;
802 memset(&umr_wqe
->inline_mtts
[rq
->mpwqe
.pages_per_wqe
], 0,
803 sizeof(*umr_wqe
->inline_mtts
) * pad
);
806 bitmap_zero(wi
->skip_release_bitmap
, rq
->mpwqe
.pages_per_wqe
);
807 wi
->consumed_strides
= 0;
809 umr_wqe
->ctrl
.opmod_idx_opcode
=
810 cpu_to_be32((sq
->pc
<< MLX5_WQE_CTRL_WQE_INDEX_SHIFT
) |
813 offset
= (ix
* rq
->mpwqe
.mtts_per_wqe
) * sizeof(struct mlx5_mtt
) / MLX5_OCTWORD
;
814 umr_wqe
->uctrl
.xlt_offset
= cpu_to_be16(offset
);
816 sq
->db
.wqe_info
[pi
] = (struct mlx5e_icosq_wqe_info
) {
817 .wqe_type
= MLX5E_ICOSQ_WQE_UMR_RX
,
818 .num_wqebbs
= rq
->mpwqe
.umr_wqebbs
,
822 sq
->pc
+= rq
->mpwqe
.umr_wqebbs
;
824 sq
->doorbell_cseg
= &umr_wqe
->ctrl
;
831 mlx5e_page_release_fragmented(rq
, frag_page
);
834 bitmap_fill(wi
->skip_release_bitmap
, rq
->mpwqe
.pages_per_wqe
);
837 rq
->stats
->buff_alloc_err
++;
842 /* This function is responsible to dealloc SHAMPO header buffer.
843 * close == true specifies that we are in the middle of closing RQ operation so
844 * we go over all the entries and if they are not in use we free them,
845 * otherwise we only go over a specific range inside the header buffer that are
848 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq
*rq
, u16 len
, u16 start
, bool close
)
850 struct mlx5e_shampo_hd
*shampo
= rq
->mpwqe
.shampo
;
851 struct mlx5e_frag_page
*deleted_page
= NULL
;
852 int hd_per_wq
= shampo
->hd_per_wq
;
853 struct mlx5e_dma_info
*hd_info
;
854 int i
, index
= start
;
856 for (i
= 0; i
< len
; i
++, index
++) {
857 if (index
== hd_per_wq
)
860 if (close
&& !test_bit(index
, shampo
->bitmap
))
863 hd_info
= &shampo
->info
[index
];
864 hd_info
->addr
= ALIGN_DOWN(hd_info
->addr
, PAGE_SIZE
);
865 if (hd_info
->frag_page
&& hd_info
->frag_page
!= deleted_page
) {
866 deleted_page
= hd_info
->frag_page
;
867 mlx5e_page_release_fragmented(rq
, hd_info
->frag_page
);
870 hd_info
->frag_page
= NULL
;
873 if (start
+ len
> hd_per_wq
) {
874 len
-= hd_per_wq
- start
;
875 bitmap_clear(shampo
->bitmap
, start
, hd_per_wq
- start
);
879 bitmap_clear(shampo
->bitmap
, start
, len
);
882 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq
*rq
, u16 ix
)
884 struct mlx5e_mpw_info
*wi
= mlx5e_get_mpw_info(rq
, ix
);
885 /* This function is called on rq/netdev close. */
886 mlx5e_free_rx_mpwqe(rq
, wi
);
888 /* Avoid a second release of the wqe pages: dealloc is called also
889 * for missing wqes on an already flushed RQ.
891 bitmap_fill(wi
->skip_release_bitmap
, rq
->mpwqe
.pages_per_wqe
);
894 INDIRECT_CALLABLE_SCOPE
bool mlx5e_post_rx_wqes(struct mlx5e_rq
*rq
)
896 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
901 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
)))
904 if (mlx5_wq_cyc_missing(wq
) < rq
->wqe
.info
.wqe_bulk
)
908 page_pool_nid_changed(rq
->page_pool
, numa_mem_id());
910 wqe_bulk
= mlx5_wq_cyc_missing(wq
);
911 head
= mlx5_wq_cyc_get_head(wq
);
913 /* Don't allow any newly allocated WQEs to share the same page with old
914 * WQEs that aren't completed yet. Stop earlier.
916 wqe_bulk
-= (head
+ wqe_bulk
) & rq
->wqe
.info
.wqe_index_mask
;
919 count
= mlx5e_refill_rx_wqes(rq
, head
, wqe_bulk
);
920 } else if (likely(!rq
->xsk_pool
->dma_need_sync
)) {
921 mlx5e_xsk_free_rx_wqes(rq
, head
, wqe_bulk
);
922 count
= mlx5e_xsk_alloc_rx_wqes_batched(rq
, head
, wqe_bulk
);
924 mlx5e_xsk_free_rx_wqes(rq
, head
, wqe_bulk
);
925 /* If dma_need_sync is true, it's more efficient to call
926 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
927 * because the latter does the same check and returns only one
930 count
= mlx5e_xsk_alloc_rx_wqes(rq
, head
, wqe_bulk
);
933 mlx5_wq_cyc_push_n(wq
, count
);
934 if (unlikely(count
!= wqe_bulk
)) {
935 rq
->stats
->buff_alloc_err
++;
939 /* ensure wqes are visible to device before updating doorbell record */
942 mlx5_wq_cyc_update_db_record(wq
);
947 void mlx5e_free_icosq_descs(struct mlx5e_icosq
*sq
)
953 while (sqcc
!= sq
->pc
) {
954 struct mlx5e_icosq_wqe_info
*wi
;
957 ci
= mlx5_wq_cyc_ctr2ix(&sq
->wq
, sqcc
);
958 wi
= &sq
->db
.wqe_info
[ci
];
959 sqcc
+= wi
->num_wqebbs
;
960 #ifdef CONFIG_MLX5_EN_TLS
961 switch (wi
->wqe_type
) {
962 case MLX5E_ICOSQ_WQE_SET_PSV_TLS
:
963 mlx5e_ktls_handle_ctx_completion(wi
);
965 case MLX5E_ICOSQ_WQE_GET_PSV_TLS
:
966 mlx5e_ktls_handle_get_psv_completion(wi
, sq
);
974 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr
,
975 struct mlx5e_icosq
*sq
)
977 struct mlx5e_channel
*c
= container_of(sq
, struct mlx5e_channel
, icosq
);
978 struct mlx5e_shampo_hd
*shampo
;
979 /* assume 1:1 relationship between RQ and icosq */
980 struct mlx5e_rq
*rq
= &c
->rq
;
981 int end
, from
, len
= umr
.len
;
983 shampo
= rq
->mpwqe
.shampo
;
984 end
= shampo
->hd_per_wq
;
986 if (from
+ len
> shampo
->hd_per_wq
) {
988 bitmap_set(shampo
->bitmap
, from
, end
- from
);
992 bitmap_set(shampo
->bitmap
, from
, len
);
993 shampo
->ci
= (shampo
->ci
+ umr
.len
) & (shampo
->hd_per_wq
- 1);
996 int mlx5e_poll_ico_cq(struct mlx5e_cq
*cq
)
998 struct mlx5e_icosq
*sq
= container_of(cq
, struct mlx5e_icosq
, cq
);
999 struct mlx5_cqe64
*cqe
;
1003 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
)))
1006 cqe
= mlx5_cqwq_get_cqe(&cq
->wq
);
1010 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1011 * otherwise a cq overrun may occur
1020 mlx5_cqwq_pop(&cq
->wq
);
1022 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
1025 struct mlx5e_icosq_wqe_info
*wi
;
1028 last_wqe
= (sqcc
== wqe_counter
);
1030 ci
= mlx5_wq_cyc_ctr2ix(&sq
->wq
, sqcc
);
1031 wi
= &sq
->db
.wqe_info
[ci
];
1032 sqcc
+= wi
->num_wqebbs
;
1034 if (last_wqe
&& unlikely(get_cqe_opcode(cqe
) != MLX5_CQE_REQ
)) {
1035 netdev_WARN_ONCE(cq
->netdev
,
1036 "Bad OP in ICOSQ CQE: 0x%x\n",
1037 get_cqe_opcode(cqe
));
1038 mlx5e_dump_error_cqe(&sq
->cq
, sq
->sqn
,
1039 (struct mlx5_err_cqe
*)cqe
);
1040 mlx5_wq_cyc_wqe_dump(&sq
->wq
, ci
, wi
->num_wqebbs
);
1041 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING
, &sq
->state
))
1042 queue_work(cq
->priv
->wq
, &sq
->recover_work
);
1046 switch (wi
->wqe_type
) {
1047 case MLX5E_ICOSQ_WQE_UMR_RX
:
1048 wi
->umr
.rq
->mpwqe
.umr_completed
++;
1050 case MLX5E_ICOSQ_WQE_NOP
:
1052 case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR
:
1053 mlx5e_handle_shampo_hd_umr(wi
->shampo
, sq
);
1055 #ifdef CONFIG_MLX5_EN_TLS
1056 case MLX5E_ICOSQ_WQE_UMR_TLS
:
1058 case MLX5E_ICOSQ_WQE_SET_PSV_TLS
:
1059 mlx5e_ktls_handle_ctx_completion(wi
);
1061 case MLX5E_ICOSQ_WQE_GET_PSV_TLS
:
1062 mlx5e_ktls_handle_get_psv_completion(wi
, sq
);
1066 netdev_WARN_ONCE(cq
->netdev
,
1067 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
1070 } while (!last_wqe
);
1071 } while ((++i
< MLX5E_TX_CQ_POLL_BUDGET
) && (cqe
= mlx5_cqwq_get_cqe(&cq
->wq
)));
1075 mlx5_cqwq_update_db_record(&cq
->wq
);
1080 INDIRECT_CALLABLE_SCOPE
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq
*rq
)
1082 struct mlx5_wq_ll
*wq
= &rq
->mpwqe
.wq
;
1083 u8 umr_completed
= rq
->mpwqe
.umr_completed
;
1084 struct mlx5e_icosq
*sq
= rq
->icosq
;
1089 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
)))
1092 if (umr_completed
) {
1093 mlx5e_post_rx_mpwqe(rq
, umr_completed
);
1094 rq
->mpwqe
.umr_in_progress
-= umr_completed
;
1095 rq
->mpwqe
.umr_completed
= 0;
1098 missing
= mlx5_wq_ll_missing(wq
) - rq
->mpwqe
.umr_in_progress
;
1100 if (unlikely(rq
->mpwqe
.umr_in_progress
> rq
->mpwqe
.umr_last_bulk
))
1101 rq
->stats
->congst_umr
++;
1103 if (likely(missing
< rq
->mpwqe
.min_wqe_bulk
))
1107 page_pool_nid_changed(rq
->page_pool
, numa_mem_id());
1109 head
= rq
->mpwqe
.actual_wq_head
;
1112 struct mlx5e_mpw_info
*wi
= mlx5e_get_mpw_info(rq
, head
);
1114 /* Deferred free for better page pool cache usage. */
1115 mlx5e_free_rx_mpwqe(rq
, wi
);
1117 alloc_err
= rq
->xsk_pool
? mlx5e_xsk_alloc_rx_mpwqe(rq
, head
) :
1118 mlx5e_alloc_rx_mpwqe(rq
, head
);
1120 if (unlikely(alloc_err
))
1122 head
= mlx5_wq_ll_get_wqe_next_ix(wq
, head
);
1125 rq
->mpwqe
.umr_last_bulk
= missing
- i
;
1126 if (sq
->doorbell_cseg
) {
1127 mlx5e_notify_hw(&sq
->wq
, sq
->pc
, sq
->uar_map
, sq
->doorbell_cseg
);
1128 sq
->doorbell_cseg
= NULL
;
1131 rq
->mpwqe
.umr_in_progress
+= rq
->mpwqe
.umr_last_bulk
;
1132 rq
->mpwqe
.actual_wq_head
= head
;
1134 /* If XSK Fill Ring doesn't have enough frames, report the error, so
1135 * that one of the actions can be performed:
1136 * 1. If need_wakeup is used, signal that the application has to kick
1137 * the driver when it refills the Fill Ring.
1138 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
1140 if (unlikely(alloc_err
== -ENOMEM
&& rq
->xsk_pool
))
1146 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64
*cqe
, struct tcphdr
*tcp
)
1148 u8 l4_hdr_type
= get_cqe_l4_hdr_type(cqe
);
1149 u8 tcp_ack
= (l4_hdr_type
== CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA
) ||
1150 (l4_hdr_type
== CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA
);
1153 tcp
->psh
= get_cqe_lro_tcppsh(cqe
);
1157 tcp
->ack_seq
= cqe
->lro
.ack_seq_num
;
1158 tcp
->window
= cqe
->lro
.tcp_win
;
1162 static void mlx5e_lro_update_hdr(struct sk_buff
*skb
, struct mlx5_cqe64
*cqe
,
1165 struct ethhdr
*eth
= (struct ethhdr
*)(skb
->data
);
1167 int network_depth
= 0;
1173 proto
= __vlan_get_protocol(skb
, eth
->h_proto
, &network_depth
);
1175 tot_len
= cqe_bcnt
- network_depth
;
1176 ip_p
= skb
->data
+ network_depth
;
1178 if (proto
== htons(ETH_P_IP
)) {
1179 struct iphdr
*ipv4
= ip_p
;
1181 tcp
= ip_p
+ sizeof(struct iphdr
);
1182 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1184 ipv4
->ttl
= cqe
->lro
.min_ttl
;
1185 ipv4
->tot_len
= cpu_to_be16(tot_len
);
1187 ipv4
->check
= ip_fast_csum((unsigned char *)ipv4
,
1190 mlx5e_lro_update_tcp_hdr(cqe
, tcp
);
1191 check
= csum_partial(tcp
, tcp
->doff
* 4,
1192 csum_unfold((__force __sum16
)cqe
->check_sum
));
1193 /* Almost done, don't forget the pseudo header */
1194 tcp
->check
= csum_tcpudp_magic(ipv4
->saddr
, ipv4
->daddr
,
1195 tot_len
- sizeof(struct iphdr
),
1196 IPPROTO_TCP
, check
);
1198 u16 payload_len
= tot_len
- sizeof(struct ipv6hdr
);
1199 struct ipv6hdr
*ipv6
= ip_p
;
1201 tcp
= ip_p
+ sizeof(struct ipv6hdr
);
1202 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1204 ipv6
->hop_limit
= cqe
->lro
.min_ttl
;
1205 ipv6
->payload_len
= cpu_to_be16(payload_len
);
1207 mlx5e_lro_update_tcp_hdr(cqe
, tcp
);
1208 check
= csum_partial(tcp
, tcp
->doff
* 4,
1209 csum_unfold((__force __sum16
)cqe
->check_sum
));
1210 /* Almost done, don't forget the pseudo header */
1211 tcp
->check
= csum_ipv6_magic(&ipv6
->saddr
, &ipv6
->daddr
, payload_len
,
1212 IPPROTO_TCP
, check
);
1216 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq
*rq
, u16 header_index
)
1218 struct mlx5e_dma_info
*last_head
= &rq
->mpwqe
.shampo
->info
[header_index
];
1219 u16 head_offset
= (last_head
->addr
& (PAGE_SIZE
- 1)) + rq
->buff
.headroom
;
1221 return page_address(last_head
->frag_page
->page
) + head_offset
;
1224 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq
*rq
, struct iphdr
*ipv4
)
1226 int udp_off
= rq
->hw_gro_data
->fk
.control
.thoff
;
1227 struct sk_buff
*skb
= rq
->hw_gro_data
->skb
;
1230 uh
= (struct udphdr
*)(skb
->data
+ udp_off
);
1231 uh
->len
= htons(skb
->len
- udp_off
);
1234 uh
->check
= ~udp_v4_check(skb
->len
- udp_off
, ipv4
->saddr
,
1237 skb
->csum_start
= (unsigned char *)uh
- skb
->head
;
1238 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1240 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_L4
;
1243 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq
*rq
, struct ipv6hdr
*ipv6
)
1245 int udp_off
= rq
->hw_gro_data
->fk
.control
.thoff
;
1246 struct sk_buff
*skb
= rq
->hw_gro_data
->skb
;
1249 uh
= (struct udphdr
*)(skb
->data
+ udp_off
);
1250 uh
->len
= htons(skb
->len
- udp_off
);
1253 uh
->check
= ~udp_v6_check(skb
->len
- udp_off
, &ipv6
->saddr
,
1256 skb
->csum_start
= (unsigned char *)uh
- skb
->head
;
1257 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1259 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_L4
;
1262 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
,
1263 struct tcphdr
*skb_tcp_hd
)
1265 u16 header_index
= mlx5e_shampo_get_cqe_header_index(rq
, cqe
);
1266 struct tcphdr
*last_tcp_hd
;
1269 last_hd_addr
= mlx5e_shampo_get_packet_hd(rq
, header_index
);
1270 last_tcp_hd
= last_hd_addr
+ ETH_HLEN
+ rq
->hw_gro_data
->fk
.control
.thoff
;
1271 tcp_flag_word(skb_tcp_hd
) |= tcp_flag_word(last_tcp_hd
) & (TCP_FLAG_FIN
| TCP_FLAG_PSH
);
1274 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq
*rq
, struct iphdr
*ipv4
,
1275 struct mlx5_cqe64
*cqe
, bool match
)
1277 int tcp_off
= rq
->hw_gro_data
->fk
.control
.thoff
;
1278 struct sk_buff
*skb
= rq
->hw_gro_data
->skb
;
1281 tcp
= (struct tcphdr
*)(skb
->data
+ tcp_off
);
1283 mlx5e_shampo_update_fin_psh_flags(rq
, cqe
, tcp
);
1285 tcp
->check
= ~tcp_v4_check(skb
->len
- tcp_off
, ipv4
->saddr
,
1287 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV4
;
1288 if (ntohs(ipv4
->id
) == rq
->hw_gro_data
->second_ip_id
)
1289 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_FIXEDID
;
1291 skb
->csum_start
= (unsigned char *)tcp
- skb
->head
;
1292 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
1295 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
1298 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq
*rq
, struct ipv6hdr
*ipv6
,
1299 struct mlx5_cqe64
*cqe
, bool match
)
1301 int tcp_off
= rq
->hw_gro_data
->fk
.control
.thoff
;
1302 struct sk_buff
*skb
= rq
->hw_gro_data
->skb
;
1305 tcp
= (struct tcphdr
*)(skb
->data
+ tcp_off
);
1307 mlx5e_shampo_update_fin_psh_flags(rq
, cqe
, tcp
);
1309 tcp
->check
= ~tcp_v6_check(skb
->len
- tcp_off
, &ipv6
->saddr
,
1311 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV6
;
1312 skb
->csum_start
= (unsigned char *)tcp
- skb
->head
;
1313 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
1316 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
1319 static void mlx5e_shampo_update_hdr(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
, bool match
)
1321 bool is_ipv4
= (rq
->hw_gro_data
->fk
.basic
.n_proto
== htons(ETH_P_IP
));
1322 struct sk_buff
*skb
= rq
->hw_gro_data
->skb
;
1324 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
1325 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1328 int nhoff
= rq
->hw_gro_data
->fk
.control
.thoff
- sizeof(struct iphdr
);
1329 struct iphdr
*ipv4
= (struct iphdr
*)(skb
->data
+ nhoff
);
1330 __be16 newlen
= htons(skb
->len
- nhoff
);
1332 csum_replace2(&ipv4
->check
, ipv4
->tot_len
, newlen
);
1333 ipv4
->tot_len
= newlen
;
1335 if (ipv4
->protocol
== IPPROTO_TCP
)
1336 mlx5e_shampo_update_ipv4_tcp_hdr(rq
, ipv4
, cqe
, match
);
1338 mlx5e_shampo_update_ipv4_udp_hdr(rq
, ipv4
);
1340 int nhoff
= rq
->hw_gro_data
->fk
.control
.thoff
- sizeof(struct ipv6hdr
);
1341 struct ipv6hdr
*ipv6
= (struct ipv6hdr
*)(skb
->data
+ nhoff
);
1343 ipv6
->payload_len
= htons(skb
->len
- nhoff
- sizeof(*ipv6
));
1345 if (ipv6
->nexthdr
== IPPROTO_TCP
)
1346 mlx5e_shampo_update_ipv6_tcp_hdr(rq
, ipv6
, cqe
, match
);
1348 mlx5e_shampo_update_ipv6_udp_hdr(rq
, ipv6
);
1352 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64
*cqe
,
1353 struct sk_buff
*skb
)
1355 u8 cht
= cqe
->rss_hash_type
;
1356 int ht
= (cht
& CQE_RSS_HTYPE_L4
) ? PKT_HASH_TYPE_L4
:
1357 (cht
& CQE_RSS_HTYPE_IP
) ? PKT_HASH_TYPE_L3
:
1359 skb_set_hash(skb
, be32_to_cpu(cqe
->rss_hash_result
), ht
);
1362 static inline bool is_last_ethertype_ip(struct sk_buff
*skb
, int *network_depth
,
1365 *proto
= ((struct ethhdr
*)skb
->data
)->h_proto
;
1366 *proto
= __vlan_get_protocol(skb
, *proto
, network_depth
);
1368 if (*proto
== htons(ETH_P_IP
))
1369 return pskb_may_pull(skb
, *network_depth
+ sizeof(struct iphdr
));
1371 if (*proto
== htons(ETH_P_IPV6
))
1372 return pskb_may_pull(skb
, *network_depth
+ sizeof(struct ipv6hdr
));
1377 static inline void mlx5e_enable_ecn(struct mlx5e_rq
*rq
, struct sk_buff
*skb
)
1379 int network_depth
= 0;
1384 if (unlikely(!is_last_ethertype_ip(skb
, &network_depth
, &proto
)))
1387 ip
= skb
->data
+ network_depth
;
1388 rc
= ((proto
== htons(ETH_P_IP
)) ? IP_ECN_set_ce((struct iphdr
*)ip
) :
1389 IP6_ECN_set_ce(skb
, (struct ipv6hdr
*)ip
));
1391 rq
->stats
->ecn_mark
+= !!rc
;
1394 static u8
get_ip_proto(struct sk_buff
*skb
, int network_depth
, __be16 proto
)
1396 void *ip_p
= skb
->data
+ network_depth
;
1398 return (proto
== htons(ETH_P_IP
)) ? ((struct iphdr
*)ip_p
)->protocol
:
1399 ((struct ipv6hdr
*)ip_p
)->nexthdr
;
1402 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1404 #define MAX_PADDING 8
1407 tail_padding_csum_slow(struct sk_buff
*skb
, int offset
, int len
,
1408 struct mlx5e_rq_stats
*stats
)
1410 stats
->csum_complete_tail_slow
++;
1411 skb
->csum
= csum_block_add(skb
->csum
,
1412 skb_checksum(skb
, offset
, len
, 0),
1417 tail_padding_csum(struct sk_buff
*skb
, int offset
,
1418 struct mlx5e_rq_stats
*stats
)
1420 u8 tail_padding
[MAX_PADDING
];
1421 int len
= skb
->len
- offset
;
1424 if (unlikely(len
> MAX_PADDING
)) {
1425 tail_padding_csum_slow(skb
, offset
, len
, stats
);
1429 tail
= skb_header_pointer(skb
, offset
, len
, tail_padding
);
1430 if (unlikely(!tail
)) {
1431 tail_padding_csum_slow(skb
, offset
, len
, stats
);
1435 stats
->csum_complete_tail
++;
1436 skb
->csum
= csum_block_add(skb
->csum
, csum_partial(tail
, len
, 0), offset
);
1440 mlx5e_skb_csum_fixup(struct sk_buff
*skb
, int network_depth
, __be16 proto
,
1441 struct mlx5e_rq_stats
*stats
)
1443 struct ipv6hdr
*ip6
;
1447 /* Fixup vlan headers, if any */
1448 if (network_depth
> ETH_HLEN
)
1449 /* CQE csum is calculated from the IP header and does
1450 * not cover VLAN headers (if present). This will add
1451 * the checksum manually.
1453 skb
->csum
= csum_partial(skb
->data
+ ETH_HLEN
,
1454 network_depth
- ETH_HLEN
,
1457 /* Fixup tail padding, if any */
1459 case htons(ETH_P_IP
):
1460 ip4
= (struct iphdr
*)(skb
->data
+ network_depth
);
1461 pkt_len
= network_depth
+ ntohs(ip4
->tot_len
);
1463 case htons(ETH_P_IPV6
):
1464 ip6
= (struct ipv6hdr
*)(skb
->data
+ network_depth
);
1465 pkt_len
= network_depth
+ sizeof(*ip6
) + ntohs(ip6
->payload_len
);
1471 if (likely(pkt_len
>= skb
->len
))
1474 tail_padding_csum(skb
, pkt_len
, stats
);
1477 static inline void mlx5e_handle_csum(struct net_device
*netdev
,
1478 struct mlx5_cqe64
*cqe
,
1479 struct mlx5e_rq
*rq
,
1480 struct sk_buff
*skb
,
1483 struct mlx5e_rq_stats
*stats
= rq
->stats
;
1484 int network_depth
= 0;
1487 if (unlikely(!(netdev
->features
& NETIF_F_RXCSUM
)))
1491 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1492 stats
->csum_unnecessary
++;
1496 /* True when explicitly set via priv flag, or XDP prog is loaded */
1497 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE
, &rq
->state
) ||
1498 get_cqe_tls_offload(cqe
))
1499 goto csum_unnecessary
;
1501 /* CQE csum doesn't cover padding octets in short ethernet
1502 * frames. And the pad field is appended prior to calculating
1503 * and appending the FCS field.
1505 * Detecting these padded frames requires to verify and parse
1506 * IP headers, so we simply force all those small frames to be
1507 * CHECKSUM_UNNECESSARY even if they are not padded.
1509 if (short_frame(skb
->len
))
1510 goto csum_unnecessary
;
1512 if (likely(is_last_ethertype_ip(skb
, &network_depth
, &proto
))) {
1513 if (unlikely(get_ip_proto(skb
, network_depth
, proto
) == IPPROTO_SCTP
))
1514 goto csum_unnecessary
;
1516 stats
->csum_complete
++;
1517 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1518 skb
->csum
= csum_unfold((__force __sum16
)cqe
->check_sum
);
1520 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL
, &rq
->state
))
1521 return; /* CQE csum covers all received bytes */
1523 /* csum might need some fixups ...*/
1524 mlx5e_skb_csum_fixup(skb
, network_depth
, proto
, stats
);
1529 if (likely((cqe
->hds_ip_ext
& CQE_L3_OK
) &&
1530 (cqe
->hds_ip_ext
& CQE_L4_OK
))) {
1531 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1532 if (cqe_is_tunneled(cqe
)) {
1533 skb
->csum_level
= 1;
1534 skb
->encapsulation
= 1;
1535 stats
->csum_unnecessary_inner
++;
1538 stats
->csum_unnecessary
++;
1542 skb
->ip_summed
= CHECKSUM_NONE
;
1546 #define MLX5E_CE_BIT_MASK 0x80
1548 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64
*cqe
,
1550 struct mlx5e_rq
*rq
,
1551 struct sk_buff
*skb
)
1553 u8 lro_num_seg
= be32_to_cpu(cqe
->srqn
) >> 24;
1554 struct mlx5e_rq_stats
*stats
= rq
->stats
;
1555 struct net_device
*netdev
= rq
->netdev
;
1557 skb
->mac_len
= ETH_HLEN
;
1559 if (unlikely(get_cqe_tls_offload(cqe
)))
1560 mlx5e_ktls_handle_rx_skb(rq
, skb
, cqe
, &cqe_bcnt
);
1562 if (unlikely(mlx5_ipsec_is_rx_flow(cqe
)))
1563 mlx5e_ipsec_offload_handle_rx_skb(netdev
, skb
,
1564 be32_to_cpu(cqe
->ft_metadata
));
1566 if (unlikely(mlx5e_macsec_is_rx_flow(cqe
)))
1567 mlx5e_macsec_offload_handle_rx_skb(netdev
, skb
, cqe
);
1569 if (lro_num_seg
> 1) {
1570 mlx5e_lro_update_hdr(skb
, cqe
, cqe_bcnt
);
1571 skb_shinfo(skb
)->gso_size
= DIV_ROUND_UP(cqe_bcnt
, lro_num_seg
);
1572 /* Subtract one since we already counted this as one
1573 * "regular" packet in mlx5e_complete_rx_cqe()
1575 stats
->packets
+= lro_num_seg
- 1;
1576 stats
->lro_packets
++;
1577 stats
->lro_bytes
+= cqe_bcnt
;
1580 if (unlikely(mlx5e_rx_hw_stamp(rq
->tstamp
)))
1581 skb_hwtstamps(skb
)->hwtstamp
= mlx5e_cqe_ts_to_ns(rq
->ptp_cyc2time
,
1582 rq
->clock
, get_cqe_ts(cqe
));
1583 skb_record_rx_queue(skb
, rq
->ix
);
1585 if (likely(netdev
->features
& NETIF_F_RXHASH
))
1586 mlx5e_skb_set_hash(cqe
, skb
);
1588 if (cqe_has_vlan(cqe
)) {
1589 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1590 be16_to_cpu(cqe
->vlan_info
));
1591 stats
->removed_vlan_packets
++;
1594 skb
->mark
= be32_to_cpu(cqe
->sop_drop_qpn
) & MLX5E_TC_FLOW_ID_MASK
;
1596 mlx5e_handle_csum(netdev
, cqe
, rq
, skb
, !!lro_num_seg
);
1597 /* checking CE bit in cqe - MSB in ml_path field */
1598 if (unlikely(cqe
->ml_path
& MLX5E_CE_BIT_MASK
))
1599 mlx5e_enable_ecn(rq
, skb
);
1601 skb
->protocol
= eth_type_trans(skb
, netdev
);
1603 if (unlikely(mlx5e_skb_is_multicast(skb
)))
1604 stats
->mcast_packets
++;
1607 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq
*rq
,
1608 struct mlx5_cqe64
*cqe
,
1610 struct sk_buff
*skb
)
1612 struct mlx5e_rq_stats
*stats
= rq
->stats
;
1615 stats
->gro_packets
++;
1616 stats
->bytes
+= cqe_bcnt
;
1617 stats
->gro_bytes
+= cqe_bcnt
;
1618 if (NAPI_GRO_CB(skb
)->count
!= 1)
1620 mlx5e_build_rx_skb(cqe
, cqe_bcnt
, rq
, skb
);
1621 skb_reset_network_header(skb
);
1622 if (!skb_flow_dissect_flow_keys(skb
, &rq
->hw_gro_data
->fk
, 0)) {
1623 napi_gro_receive(rq
->cq
.napi
, skb
);
1624 rq
->hw_gro_data
->skb
= NULL
;
1628 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq
*rq
,
1629 struct mlx5_cqe64
*cqe
,
1631 struct sk_buff
*skb
)
1633 struct mlx5e_rq_stats
*stats
= rq
->stats
;
1636 stats
->bytes
+= cqe_bcnt
;
1637 mlx5e_build_rx_skb(cqe
, cqe_bcnt
, rq
, skb
);
1641 struct sk_buff
*mlx5e_build_linear_skb(struct mlx5e_rq
*rq
, void *va
,
1642 u32 frag_size
, u16 headroom
,
1643 u32 cqe_bcnt
, u32 metasize
)
1645 struct sk_buff
*skb
= napi_build_skb(va
, frag_size
);
1647 if (unlikely(!skb
)) {
1648 rq
->stats
->buff_alloc_err
++;
1652 skb_reserve(skb
, headroom
);
1653 skb_put(skb
, cqe_bcnt
);
1656 skb_metadata_set(skb
, metasize
);
1661 static void mlx5e_fill_mxbuf(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
,
1662 void *va
, u16 headroom
, u32 frame_sz
, u32 len
,
1663 struct mlx5e_xdp_buff
*mxbuf
)
1665 xdp_init_buff(&mxbuf
->xdp
, frame_sz
, &rq
->xdp_rxq
);
1666 xdp_prepare_buff(&mxbuf
->xdp
, va
, headroom
, len
, true);
1671 static struct sk_buff
*
1672 mlx5e_skb_from_cqe_linear(struct mlx5e_rq
*rq
, struct mlx5e_wqe_frag_info
*wi
,
1673 struct mlx5_cqe64
*cqe
, u32 cqe_bcnt
)
1675 struct mlx5e_frag_page
*frag_page
= wi
->frag_page
;
1676 u16 rx_headroom
= rq
->buff
.headroom
;
1677 struct bpf_prog
*prog
;
1678 struct sk_buff
*skb
;
1684 va
= page_address(frag_page
->page
) + wi
->offset
;
1685 data
= va
+ rx_headroom
;
1686 frag_size
= MLX5_SKB_FRAG_SZ(rx_headroom
+ cqe_bcnt
);
1688 addr
= page_pool_get_dma_addr(frag_page
->page
);
1689 dma_sync_single_range_for_cpu(rq
->pdev
, addr
, wi
->offset
,
1690 frag_size
, rq
->buff
.map_dir
);
1693 prog
= rcu_dereference(rq
->xdp_prog
);
1695 struct mlx5e_xdp_buff mxbuf
;
1697 net_prefetchw(va
); /* xdp_frame data area */
1698 mlx5e_fill_mxbuf(rq
, cqe
, va
, rx_headroom
, rq
->buff
.frame0_sz
,
1700 if (mlx5e_xdp_handle(rq
, prog
, &mxbuf
))
1701 return NULL
; /* page/packet was consumed by XDP */
1703 rx_headroom
= mxbuf
.xdp
.data
- mxbuf
.xdp
.data_hard_start
;
1704 metasize
= mxbuf
.xdp
.data
- mxbuf
.xdp
.data_meta
;
1705 cqe_bcnt
= mxbuf
.xdp
.data_end
- mxbuf
.xdp
.data
;
1707 frag_size
= MLX5_SKB_FRAG_SZ(rx_headroom
+ cqe_bcnt
);
1708 skb
= mlx5e_build_linear_skb(rq
, va
, frag_size
, rx_headroom
, cqe_bcnt
, metasize
);
1712 /* queue up for recycling/reuse */
1713 skb_mark_for_recycle(skb
);
1719 static struct sk_buff
*
1720 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq
*rq
, struct mlx5e_wqe_frag_info
*wi
,
1721 struct mlx5_cqe64
*cqe
, u32 cqe_bcnt
)
1723 struct mlx5e_rq_frag_info
*frag_info
= &rq
->wqe
.info
.arr
[0];
1724 struct mlx5e_wqe_frag_info
*head_wi
= wi
;
1725 u16 rx_headroom
= rq
->buff
.headroom
;
1726 struct mlx5e_frag_page
*frag_page
;
1727 struct skb_shared_info
*sinfo
;
1728 struct mlx5e_xdp_buff mxbuf
;
1729 u32 frag_consumed_bytes
;
1730 struct bpf_prog
*prog
;
1731 struct sk_buff
*skb
;
1736 frag_page
= wi
->frag_page
;
1738 va
= page_address(frag_page
->page
) + wi
->offset
;
1739 frag_consumed_bytes
= min_t(u32
, frag_info
->frag_size
, cqe_bcnt
);
1741 addr
= page_pool_get_dma_addr(frag_page
->page
);
1742 dma_sync_single_range_for_cpu(rq
->pdev
, addr
, wi
->offset
,
1743 rq
->buff
.frame0_sz
, rq
->buff
.map_dir
);
1744 net_prefetchw(va
); /* xdp_frame data area */
1745 net_prefetch(va
+ rx_headroom
);
1747 mlx5e_fill_mxbuf(rq
, cqe
, va
, rx_headroom
, rq
->buff
.frame0_sz
,
1748 frag_consumed_bytes
, &mxbuf
);
1749 sinfo
= xdp_get_shared_info_from_buff(&mxbuf
.xdp
);
1752 cqe_bcnt
-= frag_consumed_bytes
;
1757 frag_page
= wi
->frag_page
;
1759 frag_consumed_bytes
= min_t(u32
, frag_info
->frag_size
, cqe_bcnt
);
1761 mlx5e_add_skb_shared_info_frag(rq
, sinfo
, &mxbuf
.xdp
, frag_page
,
1762 wi
->offset
, frag_consumed_bytes
);
1763 truesize
+= frag_info
->frag_stride
;
1765 cqe_bcnt
-= frag_consumed_bytes
;
1770 prog
= rcu_dereference(rq
->xdp_prog
);
1771 if (prog
&& mlx5e_xdp_handle(rq
, prog
, &mxbuf
)) {
1772 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT
, rq
->flags
)) {
1773 struct mlx5e_wqe_frag_info
*pwi
;
1775 for (pwi
= head_wi
; pwi
< wi
; pwi
++)
1776 pwi
->frag_page
->frags
++;
1778 return NULL
; /* page/packet was consumed by XDP */
1781 skb
= mlx5e_build_linear_skb(rq
, mxbuf
.xdp
.data_hard_start
, rq
->buff
.frame0_sz
,
1782 mxbuf
.xdp
.data
- mxbuf
.xdp
.data_hard_start
,
1783 mxbuf
.xdp
.data_end
- mxbuf
.xdp
.data
,
1784 mxbuf
.xdp
.data
- mxbuf
.xdp
.data_meta
);
1788 skb_mark_for_recycle(skb
);
1789 head_wi
->frag_page
->frags
++;
1791 if (xdp_buff_has_frags(&mxbuf
.xdp
)) {
1792 /* sinfo->nr_frags is reset by build_skb, calculate again. */
1793 xdp_update_skb_shared_info(skb
, wi
- head_wi
- 1,
1794 sinfo
->xdp_frags_size
, truesize
,
1795 xdp_buff_is_frag_pfmemalloc(&mxbuf
.xdp
));
1797 for (struct mlx5e_wqe_frag_info
*pwi
= head_wi
+ 1; pwi
< wi
; pwi
++)
1798 pwi
->frag_page
->frags
++;
1804 static void trigger_report(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
1806 struct mlx5_err_cqe
*err_cqe
= (struct mlx5_err_cqe
*)cqe
;
1807 struct mlx5e_priv
*priv
= rq
->priv
;
1809 if (cqe_syndrome_needs_recover(err_cqe
->syndrome
) &&
1810 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING
, &rq
->state
)) {
1811 mlx5e_dump_error_cqe(&rq
->cq
, rq
->rqn
, err_cqe
);
1812 queue_work(priv
->wq
, &rq
->recover_work
);
1816 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
1818 trigger_report(rq
, cqe
);
1819 rq
->stats
->wqe_err
++;
1822 static void mlx5e_handle_rx_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
1824 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
1825 struct mlx5e_wqe_frag_info
*wi
;
1826 struct sk_buff
*skb
;
1830 ci
= mlx5_wq_cyc_ctr2ix(wq
, be16_to_cpu(cqe
->wqe_counter
));
1831 wi
= get_frag(rq
, ci
);
1832 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
1834 if (unlikely(MLX5E_RX_ERR_CQE(cqe
))) {
1835 mlx5e_handle_rx_err_cqe(rq
, cqe
);
1839 skb
= INDIRECT_CALL_3(rq
->wqe
.skb_from_cqe
,
1840 mlx5e_skb_from_cqe_linear
,
1841 mlx5e_skb_from_cqe_nonlinear
,
1842 mlx5e_xsk_skb_from_cqe_linear
,
1843 rq
, wi
, cqe
, cqe_bcnt
);
1845 /* probably for XDP */
1846 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT
, rq
->flags
))
1847 wi
->frag_page
->frags
++;
1851 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
1853 if (mlx5e_cqe_regb_chain(cqe
))
1854 if (!mlx5e_tc_update_skb_nic(cqe
, skb
)) {
1855 dev_kfree_skb_any(skb
);
1859 napi_gro_receive(rq
->cq
.napi
, skb
);
1862 mlx5_wq_cyc_pop(wq
);
1865 #ifdef CONFIG_MLX5_ESWITCH
1866 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
1868 struct net_device
*netdev
= rq
->netdev
;
1869 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1870 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1871 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
1872 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
1873 struct mlx5e_wqe_frag_info
*wi
;
1874 struct sk_buff
*skb
;
1878 ci
= mlx5_wq_cyc_ctr2ix(wq
, be16_to_cpu(cqe
->wqe_counter
));
1879 wi
= get_frag(rq
, ci
);
1880 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
1882 if (unlikely(MLX5E_RX_ERR_CQE(cqe
))) {
1883 mlx5e_handle_rx_err_cqe(rq
, cqe
);
1887 skb
= INDIRECT_CALL_2(rq
->wqe
.skb_from_cqe
,
1888 mlx5e_skb_from_cqe_linear
,
1889 mlx5e_skb_from_cqe_nonlinear
,
1890 rq
, wi
, cqe
, cqe_bcnt
);
1892 /* probably for XDP */
1893 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT
, rq
->flags
))
1894 wi
->frag_page
->frags
++;
1898 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
1900 if (rep
->vlan
&& skb_vlan_tag_present(skb
))
1903 mlx5e_rep_tc_receive(cqe
, rq
, skb
);
1906 mlx5_wq_cyc_pop(wq
);
1909 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
1911 u16 cstrides
= mpwrq_get_cqe_consumed_strides(cqe
);
1912 u16 wqe_id
= be16_to_cpu(cqe
->wqe_id
);
1913 struct mlx5e_mpw_info
*wi
= mlx5e_get_mpw_info(rq
, wqe_id
);
1914 u16 stride_ix
= mpwrq_get_cqe_stride_index(cqe
);
1915 u32 wqe_offset
= stride_ix
<< rq
->mpwqe
.log_stride_sz
;
1916 u32 head_offset
= wqe_offset
& ((1 << rq
->mpwqe
.page_shift
) - 1);
1917 u32 page_idx
= wqe_offset
>> rq
->mpwqe
.page_shift
;
1918 struct mlx5e_rx_wqe_ll
*wqe
;
1919 struct mlx5_wq_ll
*wq
;
1920 struct sk_buff
*skb
;
1923 wi
->consumed_strides
+= cstrides
;
1925 if (unlikely(MLX5E_RX_ERR_CQE(cqe
))) {
1926 mlx5e_handle_rx_err_cqe(rq
, cqe
);
1930 if (unlikely(mpwrq_is_filler_cqe(cqe
))) {
1931 struct mlx5e_rq_stats
*stats
= rq
->stats
;
1933 stats
->mpwqe_filler_cqes
++;
1934 stats
->mpwqe_filler_strides
+= cstrides
;
1938 cqe_bcnt
= mpwrq_get_cqe_byte_cnt(cqe
);
1940 skb
= INDIRECT_CALL_2(rq
->mpwqe
.skb_from_cqe_mpwrq
,
1941 mlx5e_skb_from_cqe_mpwrq_linear
,
1942 mlx5e_skb_from_cqe_mpwrq_nonlinear
,
1943 rq
, wi
, cqe
, cqe_bcnt
, head_offset
, page_idx
);
1947 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
1949 mlx5e_rep_tc_receive(cqe
, rq
, skb
);
1952 if (likely(wi
->consumed_strides
< rq
->mpwqe
.num_strides
))
1956 wqe
= mlx5_wq_ll_get_wqe(wq
, wqe_id
);
1957 mlx5_wq_ll_pop(wq
, cqe
->wqe_id
, &wqe
->next
.next_wqe_index
);
1960 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep
= {
1961 .handle_rx_cqe
= mlx5e_handle_rx_cqe_rep
,
1962 .handle_rx_cqe_mpwqe
= mlx5e_handle_rx_cqe_mpwrq_rep
,
1967 mlx5e_fill_skb_data(struct sk_buff
*skb
, struct mlx5e_rq
*rq
,
1968 struct mlx5e_frag_page
*frag_page
,
1969 u32 data_bcnt
, u32 data_offset
)
1971 net_prefetchw(skb
->data
);
1974 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1975 u32 pg_consumed_bytes
= min_t(u32
, PAGE_SIZE
- data_offset
, data_bcnt
);
1976 unsigned int truesize
;
1978 if (test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
))
1979 truesize
= pg_consumed_bytes
;
1981 truesize
= ALIGN(pg_consumed_bytes
, BIT(rq
->mpwqe
.log_stride_sz
));
1984 mlx5e_add_skb_frag(rq
, skb
, frag_page
->page
, data_offset
,
1985 pg_consumed_bytes
, truesize
);
1987 data_bcnt
-= pg_consumed_bytes
;
1993 static struct sk_buff
*
1994 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
,
1995 struct mlx5_cqe64
*cqe
, u16 cqe_bcnt
, u32 head_offset
,
1998 struct mlx5e_frag_page
*frag_page
= &wi
->alloc_units
.frag_pages
[page_idx
];
1999 u16 headlen
= min_t(u16
, MLX5E_RX_MAX_HEAD
, cqe_bcnt
);
2000 struct mlx5e_frag_page
*head_page
= frag_page
;
2001 u32 frag_offset
= head_offset
;
2002 u32 byte_cnt
= cqe_bcnt
;
2003 struct skb_shared_info
*sinfo
;
2004 struct mlx5e_xdp_buff mxbuf
;
2005 unsigned int truesize
= 0;
2006 struct bpf_prog
*prog
;
2007 struct sk_buff
*skb
;
2008 u32 linear_frame_sz
;
2009 u16 linear_data_len
;
2013 prog
= rcu_dereference(rq
->xdp_prog
);
2016 /* area for bpf_xdp_[store|load]_bytes */
2017 net_prefetchw(page_address(frag_page
->page
) + frag_offset
);
2018 if (unlikely(mlx5e_page_alloc_fragmented(rq
, &wi
->linear_page
))) {
2019 rq
->stats
->buff_alloc_err
++;
2022 va
= page_address(wi
->linear_page
.page
);
2023 net_prefetchw(va
); /* xdp_frame data area */
2024 linear_hr
= XDP_PACKET_HEADROOM
;
2025 linear_data_len
= 0;
2026 linear_frame_sz
= MLX5_SKB_FRAG_SZ(linear_hr
+ MLX5E_RX_MAX_HEAD
);
2028 skb
= napi_alloc_skb(rq
->cq
.napi
,
2029 ALIGN(MLX5E_RX_MAX_HEAD
, sizeof(long)));
2030 if (unlikely(!skb
)) {
2031 rq
->stats
->buff_alloc_err
++;
2034 skb_mark_for_recycle(skb
);
2036 net_prefetchw(va
); /* xdp_frame data area */
2037 net_prefetchw(skb
->data
);
2039 frag_offset
+= headlen
;
2040 byte_cnt
-= headlen
;
2041 linear_hr
= skb_headroom(skb
);
2042 linear_data_len
= headlen
;
2043 linear_frame_sz
= MLX5_SKB_FRAG_SZ(skb_end_offset(skb
));
2044 if (unlikely(frag_offset
>= PAGE_SIZE
)) {
2046 frag_offset
-= PAGE_SIZE
;
2050 mlx5e_fill_mxbuf(rq
, cqe
, va
, linear_hr
, linear_frame_sz
, linear_data_len
, &mxbuf
);
2052 sinfo
= xdp_get_shared_info_from_buff(&mxbuf
.xdp
);
2055 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2056 u32 pg_consumed_bytes
= min_t(u32
, PAGE_SIZE
- frag_offset
, byte_cnt
);
2058 if (test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
))
2059 truesize
+= pg_consumed_bytes
;
2061 truesize
+= ALIGN(pg_consumed_bytes
, BIT(rq
->mpwqe
.log_stride_sz
));
2063 mlx5e_add_skb_shared_info_frag(rq
, sinfo
, &mxbuf
.xdp
, frag_page
, frag_offset
,
2065 byte_cnt
-= pg_consumed_bytes
;
2071 if (mlx5e_xdp_handle(rq
, prog
, &mxbuf
)) {
2072 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT
, rq
->flags
)) {
2073 struct mlx5e_frag_page
*pfp
;
2075 for (pfp
= head_page
; pfp
< frag_page
; pfp
++)
2078 wi
->linear_page
.frags
++;
2080 mlx5e_page_release_fragmented(rq
, &wi
->linear_page
);
2081 return NULL
; /* page/packet was consumed by XDP */
2084 skb
= mlx5e_build_linear_skb(rq
, mxbuf
.xdp
.data_hard_start
,
2086 mxbuf
.xdp
.data
- mxbuf
.xdp
.data_hard_start
, 0,
2087 mxbuf
.xdp
.data
- mxbuf
.xdp
.data_meta
);
2088 if (unlikely(!skb
)) {
2089 mlx5e_page_release_fragmented(rq
, &wi
->linear_page
);
2093 skb_mark_for_recycle(skb
);
2094 wi
->linear_page
.frags
++;
2095 mlx5e_page_release_fragmented(rq
, &wi
->linear_page
);
2097 if (xdp_buff_has_frags(&mxbuf
.xdp
)) {
2098 struct mlx5e_frag_page
*pagep
;
2100 /* sinfo->nr_frags is reset by build_skb, calculate again. */
2101 xdp_update_skb_shared_info(skb
, frag_page
- head_page
,
2102 sinfo
->xdp_frags_size
, truesize
,
2103 xdp_buff_is_frag_pfmemalloc(&mxbuf
.xdp
));
2108 while (++pagep
< frag_page
);
2110 __pskb_pull_tail(skb
, headlen
);
2114 if (xdp_buff_has_frags(&mxbuf
.xdp
)) {
2115 struct mlx5e_frag_page
*pagep
;
2117 xdp_update_skb_shared_info(skb
, sinfo
->nr_frags
,
2118 sinfo
->xdp_frags_size
, truesize
,
2119 xdp_buff_is_frag_pfmemalloc(&mxbuf
.xdp
));
2121 pagep
= frag_page
- sinfo
->nr_frags
;
2124 while (++pagep
< frag_page
);
2127 addr
= page_pool_get_dma_addr(head_page
->page
);
2128 mlx5e_copy_skb_header(rq
, skb
, head_page
->page
, addr
,
2129 head_offset
, head_offset
, headlen
);
2130 /* skb linear part was allocated with headlen and aligned to long */
2131 skb
->tail
+= headlen
;
2132 skb
->len
+= headlen
;
2138 static struct sk_buff
*
2139 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
,
2140 struct mlx5_cqe64
*cqe
, u16 cqe_bcnt
, u32 head_offset
,
2143 struct mlx5e_frag_page
*frag_page
= &wi
->alloc_units
.frag_pages
[page_idx
];
2144 u16 rx_headroom
= rq
->buff
.headroom
;
2145 struct bpf_prog
*prog
;
2146 struct sk_buff
*skb
;
2152 /* Check packet size. Note LRO doesn't use linear SKB */
2153 if (unlikely(cqe_bcnt
> rq
->hw_mtu
)) {
2154 rq
->stats
->oversize_pkts_sw_drop
++;
2158 va
= page_address(frag_page
->page
) + head_offset
;
2159 data
= va
+ rx_headroom
;
2160 frag_size
= MLX5_SKB_FRAG_SZ(rx_headroom
+ cqe_bcnt
);
2162 addr
= page_pool_get_dma_addr(frag_page
->page
);
2163 dma_sync_single_range_for_cpu(rq
->pdev
, addr
, head_offset
,
2164 frag_size
, rq
->buff
.map_dir
);
2167 prog
= rcu_dereference(rq
->xdp_prog
);
2169 struct mlx5e_xdp_buff mxbuf
;
2171 net_prefetchw(va
); /* xdp_frame data area */
2172 mlx5e_fill_mxbuf(rq
, cqe
, va
, rx_headroom
, rq
->buff
.frame0_sz
,
2174 if (mlx5e_xdp_handle(rq
, prog
, &mxbuf
)) {
2175 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT
, rq
->flags
))
2177 return NULL
; /* page/packet was consumed by XDP */
2180 rx_headroom
= mxbuf
.xdp
.data
- mxbuf
.xdp
.data_hard_start
;
2181 metasize
= mxbuf
.xdp
.data
- mxbuf
.xdp
.data_meta
;
2182 cqe_bcnt
= mxbuf
.xdp
.data_end
- mxbuf
.xdp
.data
;
2184 frag_size
= MLX5_SKB_FRAG_SZ(rx_headroom
+ cqe_bcnt
);
2185 skb
= mlx5e_build_linear_skb(rq
, va
, frag_size
, rx_headroom
, cqe_bcnt
, metasize
);
2189 /* queue up for recycling/reuse */
2190 skb_mark_for_recycle(skb
);
2196 static struct sk_buff
*
2197 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
,
2198 struct mlx5_cqe64
*cqe
, u16 header_index
)
2200 struct mlx5e_dma_info
*head
= &rq
->mpwqe
.shampo
->info
[header_index
];
2201 u16 head_offset
= head
->addr
& (PAGE_SIZE
- 1);
2202 u16 head_size
= cqe
->shampo
.header_size
;
2203 u16 rx_headroom
= rq
->buff
.headroom
;
2204 struct sk_buff
*skb
= NULL
;
2208 hdr
= page_address(head
->frag_page
->page
) + head_offset
;
2209 data
= hdr
+ rx_headroom
;
2210 frag_size
= MLX5_SKB_FRAG_SZ(rx_headroom
+ head_size
);
2212 if (likely(frag_size
<= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE
))) {
2213 /* build SKB around header */
2214 dma_sync_single_range_for_cpu(rq
->pdev
, head
->addr
, 0, frag_size
, rq
->buff
.map_dir
);
2217 skb
= mlx5e_build_linear_skb(rq
, hdr
, frag_size
, rx_headroom
, head_size
, 0);
2222 head
->frag_page
->frags
++;
2224 /* allocate SKB and copy header for large header */
2225 rq
->stats
->gro_large_hds
++;
2226 skb
= napi_alloc_skb(rq
->cq
.napi
,
2227 ALIGN(head_size
, sizeof(long)));
2228 if (unlikely(!skb
)) {
2229 rq
->stats
->buff_alloc_err
++;
2233 prefetchw(skb
->data
);
2234 mlx5e_copy_skb_header(rq
, skb
, head
->frag_page
->page
, head
->addr
,
2235 head_offset
+ rx_headroom
,
2236 rx_headroom
, head_size
);
2237 /* skb linear part was allocated with headlen and aligned to long */
2238 skb
->tail
+= head_size
;
2239 skb
->len
+= head_size
;
2242 /* queue up for recycling/reuse */
2243 skb_mark_for_recycle(skb
);
2249 mlx5e_shampo_align_fragment(struct sk_buff
*skb
, u8 log_stride_sz
)
2251 skb_frag_t
*last_frag
= &skb_shinfo(skb
)->frags
[skb_shinfo(skb
)->nr_frags
- 1];
2252 unsigned int frag_size
= skb_frag_size(last_frag
);
2253 unsigned int frag_truesize
;
2255 frag_truesize
= ALIGN(frag_size
, BIT(log_stride_sz
));
2256 skb
->truesize
+= frag_truesize
- frag_size
;
2260 mlx5e_shampo_flush_skb(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
, bool match
)
2262 struct sk_buff
*skb
= rq
->hw_gro_data
->skb
;
2263 struct mlx5e_rq_stats
*stats
= rq
->stats
;
2266 if (likely(skb_shinfo(skb
)->nr_frags
))
2267 mlx5e_shampo_align_fragment(skb
, rq
->mpwqe
.log_stride_sz
);
2268 if (NAPI_GRO_CB(skb
)->count
> 1)
2269 mlx5e_shampo_update_hdr(rq
, cqe
, match
);
2270 napi_gro_receive(rq
->cq
.napi
, skb
);
2271 rq
->hw_gro_data
->skb
= NULL
;
2275 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff
*skb
, u16 data_bcnt
)
2277 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2279 return PAGE_SIZE
* nr_frags
+ data_bcnt
<= GRO_LEGACY_MAX_SIZE
;
2283 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq
*rq
, u16 header_index
)
2285 struct mlx5e_shampo_hd
*shampo
= rq
->mpwqe
.shampo
;
2286 u64 addr
= shampo
->info
[header_index
].addr
;
2288 if (((header_index
+ 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE
- 1)) == 0) {
2289 struct mlx5e_dma_info
*dma_info
= &shampo
->info
[header_index
];
2291 dma_info
->addr
= ALIGN_DOWN(addr
, PAGE_SIZE
);
2292 mlx5e_page_release_fragmented(rq
, dma_info
->frag_page
);
2294 bitmap_clear(shampo
->bitmap
, header_index
, 1);
2297 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
2299 u16 data_bcnt
= mpwrq_get_cqe_byte_cnt(cqe
) - cqe
->shampo
.header_size
;
2300 u16 header_index
= mlx5e_shampo_get_cqe_header_index(rq
, cqe
);
2301 u32 wqe_offset
= be32_to_cpu(cqe
->shampo
.data_offset
);
2302 u16 cstrides
= mpwrq_get_cqe_consumed_strides(cqe
);
2303 u32 data_offset
= wqe_offset
& (PAGE_SIZE
- 1);
2304 u32 cqe_bcnt
= mpwrq_get_cqe_byte_cnt(cqe
);
2305 u16 wqe_id
= be16_to_cpu(cqe
->wqe_id
);
2306 u32 page_idx
= wqe_offset
>> PAGE_SHIFT
;
2307 u16 head_size
= cqe
->shampo
.header_size
;
2308 struct sk_buff
**skb
= &rq
->hw_gro_data
->skb
;
2309 bool flush
= cqe
->shampo
.flush
;
2310 bool match
= cqe
->shampo
.match
;
2311 struct mlx5e_rq_stats
*stats
= rq
->stats
;
2312 struct mlx5e_rx_wqe_ll
*wqe
;
2313 struct mlx5e_mpw_info
*wi
;
2314 struct mlx5_wq_ll
*wq
;
2316 wi
= mlx5e_get_mpw_info(rq
, wqe_id
);
2317 wi
->consumed_strides
+= cstrides
;
2319 if (unlikely(MLX5E_RX_ERR_CQE(cqe
))) {
2320 mlx5e_handle_rx_err_cqe(rq
, cqe
);
2324 if (unlikely(mpwrq_is_filler_cqe(cqe
))) {
2325 stats
->mpwqe_filler_cqes
++;
2326 stats
->mpwqe_filler_strides
+= cstrides
;
2330 stats
->gro_match_packets
+= match
;
2332 if (*skb
&& (!match
|| !(mlx5e_hw_gro_skb_has_enough_space(*skb
, data_bcnt
)))) {
2334 mlx5e_shampo_flush_skb(rq
, cqe
, match
);
2338 if (likely(head_size
))
2339 *skb
= mlx5e_skb_from_cqe_shampo(rq
, wi
, cqe
, header_index
);
2341 *skb
= mlx5e_skb_from_cqe_mpwrq_nonlinear(rq
, wi
, cqe
, cqe_bcnt
,
2342 data_offset
, page_idx
);
2343 if (unlikely(!*skb
))
2346 NAPI_GRO_CB(*skb
)->count
= 1;
2347 skb_shinfo(*skb
)->gso_size
= cqe_bcnt
- head_size
;
2349 NAPI_GRO_CB(*skb
)->count
++;
2350 if (NAPI_GRO_CB(*skb
)->count
== 2 &&
2351 rq
->hw_gro_data
->fk
.basic
.n_proto
== htons(ETH_P_IP
)) {
2352 void *hd_addr
= mlx5e_shampo_get_packet_hd(rq
, header_index
);
2353 int nhoff
= ETH_HLEN
+ rq
->hw_gro_data
->fk
.control
.thoff
-
2354 sizeof(struct iphdr
);
2355 struct iphdr
*iph
= (struct iphdr
*)(hd_addr
+ nhoff
);
2357 rq
->hw_gro_data
->second_ip_id
= ntohs(iph
->id
);
2361 if (likely(head_size
)) {
2362 struct mlx5e_frag_page
*frag_page
;
2364 frag_page
= &wi
->alloc_units
.frag_pages
[page_idx
];
2365 mlx5e_fill_skb_data(*skb
, rq
, frag_page
, data_bcnt
, data_offset
);
2368 mlx5e_shampo_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, *skb
);
2370 mlx5e_shampo_flush_skb(rq
, cqe
, match
);
2372 mlx5e_free_rx_shampo_hd_entry(rq
, header_index
);
2374 if (likely(wi
->consumed_strides
< rq
->mpwqe
.num_strides
))
2378 wqe
= mlx5_wq_ll_get_wqe(wq
, wqe_id
);
2379 mlx5_wq_ll_pop(wq
, cqe
->wqe_id
, &wqe
->next
.next_wqe_index
);
2382 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
2384 u16 cstrides
= mpwrq_get_cqe_consumed_strides(cqe
);
2385 u16 wqe_id
= be16_to_cpu(cqe
->wqe_id
);
2386 struct mlx5e_mpw_info
*wi
= mlx5e_get_mpw_info(rq
, wqe_id
);
2387 u16 stride_ix
= mpwrq_get_cqe_stride_index(cqe
);
2388 u32 wqe_offset
= stride_ix
<< rq
->mpwqe
.log_stride_sz
;
2389 u32 head_offset
= wqe_offset
& ((1 << rq
->mpwqe
.page_shift
) - 1);
2390 u32 page_idx
= wqe_offset
>> rq
->mpwqe
.page_shift
;
2391 struct mlx5e_rx_wqe_ll
*wqe
;
2392 struct mlx5_wq_ll
*wq
;
2393 struct sk_buff
*skb
;
2396 wi
->consumed_strides
+= cstrides
;
2398 if (unlikely(MLX5E_RX_ERR_CQE(cqe
))) {
2399 mlx5e_handle_rx_err_cqe(rq
, cqe
);
2403 if (unlikely(mpwrq_is_filler_cqe(cqe
))) {
2404 struct mlx5e_rq_stats
*stats
= rq
->stats
;
2406 stats
->mpwqe_filler_cqes
++;
2407 stats
->mpwqe_filler_strides
+= cstrides
;
2411 cqe_bcnt
= mpwrq_get_cqe_byte_cnt(cqe
);
2413 skb
= INDIRECT_CALL_3(rq
->mpwqe
.skb_from_cqe_mpwrq
,
2414 mlx5e_skb_from_cqe_mpwrq_linear
,
2415 mlx5e_skb_from_cqe_mpwrq_nonlinear
,
2416 mlx5e_xsk_skb_from_cqe_mpwrq_linear
,
2417 rq
, wi
, cqe
, cqe_bcnt
, head_offset
,
2422 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
2424 if (mlx5e_cqe_regb_chain(cqe
))
2425 if (!mlx5e_tc_update_skb_nic(cqe
, skb
)) {
2426 dev_kfree_skb_any(skb
);
2430 napi_gro_receive(rq
->cq
.napi
, skb
);
2433 if (likely(wi
->consumed_strides
< rq
->mpwqe
.num_strides
))
2437 wqe
= mlx5_wq_ll_get_wqe(wq
, wqe_id
);
2438 mlx5_wq_ll_pop(wq
, cqe
->wqe_id
, &wqe
->next
.next_wqe_index
);
2441 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq
*rq
,
2442 struct mlx5_cqwq
*cqwq
,
2445 struct mlx5_cqe64
*cqe
, *title_cqe
= NULL
;
2446 struct mlx5e_cq_decomp
*cqd
= &rq
->cqd
;
2449 cqe
= mlx5_cqwq_get_cqe_enahnced_comp(cqwq
);
2453 if (cqd
->last_cqe_title
&&
2454 (mlx5_get_cqe_format(cqe
) == MLX5_COMPRESSED
)) {
2455 rq
->stats
->cqe_compress_blks
++;
2456 cqd
->last_cqe_title
= false;
2460 if (mlx5_get_cqe_format(cqe
) == MLX5_COMPRESSED
) {
2462 mlx5e_read_enhanced_title_slot(rq
, title_cqe
);
2464 rq
->stats
->cqe_compress_blks
++;
2467 mlx5e_decompress_enhanced_cqe(rq
, cqwq
, cqe
,
2468 budget_rem
- work_done
);
2472 mlx5_cqwq_pop(cqwq
);
2474 INDIRECT_CALL_3(rq
->handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq
,
2475 mlx5e_handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq_shampo
,
2478 } while (work_done
< budget_rem
&&
2479 (cqe
= mlx5_cqwq_get_cqe_enahnced_comp(cqwq
)));
2481 /* last cqe might be title on next poll bulk */
2483 mlx5e_read_enhanced_title_slot(rq
, title_cqe
);
2484 cqd
->last_cqe_title
= true;
2490 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq
*rq
,
2491 struct mlx5_cqwq
*cqwq
,
2494 struct mlx5_cqe64
*cqe
;
2498 work_done
+= mlx5e_decompress_cqes_cont(rq
, cqwq
, 0, budget_rem
);
2500 while (work_done
< budget_rem
&& (cqe
= mlx5_cqwq_get_cqe(cqwq
))) {
2501 if (mlx5_get_cqe_format(cqe
) == MLX5_COMPRESSED
) {
2503 mlx5e_decompress_cqes_start(rq
, cqwq
,
2504 budget_rem
- work_done
);
2508 mlx5_cqwq_pop(cqwq
);
2509 INDIRECT_CALL_3(rq
->handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq
,
2510 mlx5e_handle_rx_cqe
, mlx5e_handle_rx_cqe_mpwrq_shampo
,
2518 int mlx5e_poll_rx_cq(struct mlx5e_cq
*cq
, int budget
)
2520 struct mlx5e_rq
*rq
= container_of(cq
, struct mlx5e_rq
, cq
);
2521 struct mlx5_cqwq
*cqwq
= &cq
->wq
;
2524 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
)))
2527 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED
, &rq
->state
))
2528 work_done
= mlx5e_rx_cq_process_enhanced_cqe_comp(rq
, cqwq
,
2531 work_done
= mlx5e_rx_cq_process_basic_cqe_comp(rq
, cqwq
,
2537 if (test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
) && rq
->hw_gro_data
->skb
)
2538 mlx5e_shampo_flush_skb(rq
, NULL
, false);
2540 if (rcu_access_pointer(rq
->xdp_prog
))
2541 mlx5e_xdp_rx_poll_complete(rq
);
2543 mlx5_cqwq_update_db_record(cqwq
);
2545 /* ensure cq space is freed before enabling more cqes */
2551 #ifdef CONFIG_MLX5_CORE_IPOIB
2553 #define MLX5_IB_GRH_SGID_OFFSET 8
2554 #define MLX5_IB_GRH_DGID_OFFSET 24
2555 #define MLX5_GID_SIZE 16
2557 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq
*rq
,
2558 struct mlx5_cqe64
*cqe
,
2560 struct sk_buff
*skb
)
2562 struct hwtstamp_config
*tstamp
;
2563 struct mlx5e_rq_stats
*stats
;
2564 struct net_device
*netdev
;
2565 struct mlx5e_priv
*priv
;
2566 char *pseudo_header
;
2572 qpn
= be32_to_cpu(cqe
->sop_drop_qpn
) & 0xffffff;
2573 netdev
= mlx5i_pkey_get_netdev(rq
->netdev
, qpn
);
2575 /* No mapping present, cannot process SKB. This might happen if a child
2576 * interface is going down while having unprocessed CQEs on parent RQ
2578 if (unlikely(!netdev
)) {
2579 /* TODO: add drop counters support */
2581 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn
);
2585 priv
= mlx5i_epriv(netdev
);
2586 tstamp
= &priv
->tstamp
;
2587 stats
= &priv
->channel_stats
[rq
->ix
]->rq
;
2589 flags_rqpn
= be32_to_cpu(cqe
->flags_rqpn
);
2590 g
= (flags_rqpn
>> 28) & 3;
2591 dgid
= skb
->data
+ MLX5_IB_GRH_DGID_OFFSET
;
2592 if ((!g
) || dgid
[0] != 0xff)
2593 skb
->pkt_type
= PACKET_HOST
;
2594 else if (memcmp(dgid
, netdev
->broadcast
+ 4, MLX5_GID_SIZE
) == 0)
2595 skb
->pkt_type
= PACKET_BROADCAST
;
2597 skb
->pkt_type
= PACKET_MULTICAST
;
2599 /* Drop packets that this interface sent, ie multicast packets
2600 * that the HCA has replicated.
2602 if (g
&& (qpn
== (flags_rqpn
& 0xffffff)) &&
2603 (memcmp(netdev
->dev_addr
+ 4, skb
->data
+ MLX5_IB_GRH_SGID_OFFSET
,
2604 MLX5_GID_SIZE
) == 0)) {
2609 skb_pull(skb
, MLX5_IB_GRH_BYTES
);
2611 skb
->protocol
= *((__be16
*)(skb
->data
));
2613 if (netdev
->features
& NETIF_F_RXCSUM
) {
2614 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2615 skb
->csum
= csum_unfold((__force __sum16
)cqe
->check_sum
);
2616 stats
->csum_complete
++;
2618 skb
->ip_summed
= CHECKSUM_NONE
;
2622 if (unlikely(mlx5e_rx_hw_stamp(tstamp
)))
2623 skb_hwtstamps(skb
)->hwtstamp
= mlx5e_cqe_ts_to_ns(rq
->ptp_cyc2time
,
2624 rq
->clock
, get_cqe_ts(cqe
));
2625 skb_record_rx_queue(skb
, rq
->ix
);
2627 if (likely(netdev
->features
& NETIF_F_RXHASH
))
2628 mlx5e_skb_set_hash(cqe
, skb
);
2630 /* 20 bytes of ipoib header and 4 for encap existing */
2631 pseudo_header
= skb_push(skb
, MLX5_IPOIB_PSEUDO_LEN
);
2632 memset(pseudo_header
, 0, MLX5_IPOIB_PSEUDO_LEN
);
2633 skb_reset_mac_header(skb
);
2634 skb_pull(skb
, MLX5_IPOIB_HARD_LEN
);
2639 stats
->bytes
+= cqe_bcnt
;
2642 static void mlx5i_handle_rx_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
2644 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
2645 struct mlx5e_wqe_frag_info
*wi
;
2646 struct sk_buff
*skb
;
2650 ci
= mlx5_wq_cyc_ctr2ix(wq
, be16_to_cpu(cqe
->wqe_counter
));
2651 wi
= get_frag(rq
, ci
);
2652 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
2654 if (unlikely(MLX5E_RX_ERR_CQE(cqe
))) {
2655 rq
->stats
->wqe_err
++;
2659 skb
= INDIRECT_CALL_2(rq
->wqe
.skb_from_cqe
,
2660 mlx5e_skb_from_cqe_linear
,
2661 mlx5e_skb_from_cqe_nonlinear
,
2662 rq
, wi
, cqe
, cqe_bcnt
);
2666 mlx5i_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
2667 if (unlikely(!skb
->dev
)) {
2668 dev_kfree_skb_any(skb
);
2671 napi_gro_receive(rq
->cq
.napi
, skb
);
2674 mlx5_wq_cyc_pop(wq
);
2677 const struct mlx5e_rx_handlers mlx5i_rx_handlers
= {
2678 .handle_rx_cqe
= mlx5i_handle_rx_cqe
,
2679 .handle_rx_cqe_mpwqe
= NULL
, /* Not supported */
2681 #endif /* CONFIG_MLX5_CORE_IPOIB */
2683 int mlx5e_rq_set_handlers(struct mlx5e_rq
*rq
, struct mlx5e_params
*params
, bool xsk
)
2685 struct net_device
*netdev
= rq
->netdev
;
2686 struct mlx5_core_dev
*mdev
= rq
->mdev
;
2687 struct mlx5e_priv
*priv
= rq
->priv
;
2689 switch (rq
->wq_type
) {
2690 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2691 rq
->mpwqe
.skb_from_cqe_mpwrq
= xsk
?
2692 mlx5e_xsk_skb_from_cqe_mpwrq_linear
:
2693 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
) ?
2694 mlx5e_skb_from_cqe_mpwrq_linear
:
2695 mlx5e_skb_from_cqe_mpwrq_nonlinear
;
2696 rq
->post_wqes
= mlx5e_post_rx_mpwqes
;
2697 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
2699 if (params
->packet_merge
.type
== MLX5E_PACKET_MERGE_SHAMPO
) {
2700 rq
->handle_rx_cqe
= priv
->profile
->rx_handlers
->handle_rx_cqe_mpwqe_shampo
;
2701 if (!rq
->handle_rx_cqe
) {
2702 netdev_err(netdev
, "RX handler of SHAMPO MPWQE RQ is not set\n");
2706 rq
->handle_rx_cqe
= priv
->profile
->rx_handlers
->handle_rx_cqe_mpwqe
;
2707 if (!rq
->handle_rx_cqe
) {
2708 netdev_err(netdev
, "RX handler of MPWQE RQ is not set\n");
2714 default: /* MLX5_WQ_TYPE_CYCLIC */
2715 rq
->wqe
.skb_from_cqe
= xsk
?
2716 mlx5e_xsk_skb_from_cqe_linear
:
2717 mlx5e_rx_is_linear_skb(mdev
, params
, NULL
) ?
2718 mlx5e_skb_from_cqe_linear
:
2719 mlx5e_skb_from_cqe_nonlinear
;
2720 rq
->post_wqes
= mlx5e_post_rx_wqes
;
2721 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
2722 rq
->handle_rx_cqe
= priv
->profile
->rx_handlers
->handle_rx_cqe
;
2723 if (!rq
->handle_rx_cqe
) {
2724 netdev_err(netdev
, "RX handler of RQ is not set\n");
2732 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
2734 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
2735 struct mlx5e_wqe_frag_info
*wi
;
2736 struct sk_buff
*skb
;
2741 trap_id
= get_cqe_flow_tag(cqe
);
2742 ci
= mlx5_wq_cyc_ctr2ix(wq
, be16_to_cpu(cqe
->wqe_counter
));
2743 wi
= get_frag(rq
, ci
);
2744 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
2746 if (unlikely(MLX5E_RX_ERR_CQE(cqe
))) {
2747 rq
->stats
->wqe_err
++;
2751 skb
= mlx5e_skb_from_cqe_nonlinear(rq
, wi
, cqe
, cqe_bcnt
);
2755 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
2756 skb_push(skb
, ETH_HLEN
);
2758 mlx5_devlink_trap_report(rq
->mdev
, trap_id
, skb
,
2759 rq
->netdev
->devlink_port
);
2760 dev_kfree_skb_any(skb
);
2763 mlx5_wq_cyc_pop(wq
);
2766 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq
*rq
, struct mlx5e_params
*params
)
2768 rq
->wqe
.skb_from_cqe
= mlx5e_rx_is_linear_skb(rq
->mdev
, params
, NULL
) ?
2769 mlx5e_skb_from_cqe_linear
:
2770 mlx5e_skb_from_cqe_nonlinear
;
2771 rq
->post_wqes
= mlx5e_post_rx_wqes
;
2772 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
2773 rq
->handle_rx_cqe
= mlx5e_trap_handle_rx_cqe
;