]>
Commit | Line | Data |
---|---|---|
e586b3b0 AV |
1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
18bcf742 | 33 | #include <linux/prefetch.h> |
e586b3b0 AV |
34 | #include <linux/ip.h> |
35 | #include <linux/ipv6.h> | |
36 | #include <linux/tcp.h> | |
8babd44d | 37 | #include <net/ip6_checksum.h> |
60bbf7ee | 38 | #include <net/page_pool.h> |
f007c13d | 39 | #include <net/inet_ecn.h> |
e586b3b0 | 40 | #include "en.h" |
12185a9f | 41 | #include "en_tc.h" |
f5f82476 | 42 | #include "eswitch.h" |
1d447a39 | 43 | #include "en_rep.h" |
4301ba7b | 44 | #include "ipoib/ipoib.h" |
899a59d3 | 45 | #include "en_accel/ipsec_rxtx.h" |
00aebab2 | 46 | #include "en_accel/tls_rxtx.h" |
7c39afb3 | 47 | #include "lib/clock.h" |
159d2131 | 48 | #include "en/xdp.h" |
e586b3b0 | 49 | |
7c39afb3 | 50 | static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) |
ef9814de | 51 | { |
7c39afb3 | 52 | return config->rx_filter == HWTSTAMP_FILTER_ALL; |
ef9814de EBE |
53 | } |
54 | ||
79d356ef TT |
55 | static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, |
56 | u32 cqcc, void *data) | |
7219ab34 | 57 | { |
79d356ef | 58 | u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); |
7219ab34 | 59 | |
79d356ef | 60 | memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); |
7219ab34 TT |
61 | } |
62 | ||
63 | static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, | |
79d356ef TT |
64 | struct mlx5_cqwq *wq, |
65 | u32 cqcc) | |
7219ab34 | 66 | { |
79d356ef TT |
67 | struct mlx5e_cq_decomp *cqd = &rq->cqd; |
68 | struct mlx5_cqe64 *title = &cqd->title; | |
69 | ||
70 | mlx5e_read_cqe_slot(wq, cqcc, title); | |
71 | cqd->left = be32_to_cpu(title->byte_cnt); | |
72 | cqd->wqe_counter = be16_to_cpu(title->wqe_counter); | |
05909bab | 73 | rq->stats->cqe_compress_blks++; |
7219ab34 TT |
74 | } |
75 | ||
79d356ef TT |
76 | static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, |
77 | struct mlx5e_cq_decomp *cqd, | |
78 | u32 cqcc) | |
7219ab34 | 79 | { |
79d356ef TT |
80 | mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); |
81 | cqd->mini_arr_idx = 0; | |
7219ab34 TT |
82 | } |
83 | ||
79d356ef | 84 | static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) |
7219ab34 | 85 | { |
79d356ef | 86 | u32 cqcc = wq->cc; |
ddf385e3 TT |
87 | u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; |
88 | u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); | |
89 | u32 wq_sz = mlx5_cqwq_get_size(wq); | |
7219ab34 TT |
90 | u32 ci_top = min_t(u32, wq_sz, ci + n); |
91 | ||
92 | for (; ci < ci_top; ci++, n--) { | |
79d356ef | 93 | struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); |
7219ab34 TT |
94 | |
95 | cqe->op_own = op_own; | |
96 | } | |
97 | ||
98 | if (unlikely(ci == wq_sz)) { | |
99 | op_own = !op_own; | |
100 | for (ci = 0; ci < n; ci++) { | |
79d356ef | 101 | struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); |
7219ab34 TT |
102 | |
103 | cqe->op_own = op_own; | |
104 | } | |
105 | } | |
106 | } | |
107 | ||
108 | static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, | |
79d356ef TT |
109 | struct mlx5_cqwq *wq, |
110 | u32 cqcc) | |
7219ab34 | 111 | { |
79d356ef TT |
112 | struct mlx5e_cq_decomp *cqd = &rq->cqd; |
113 | struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx]; | |
114 | struct mlx5_cqe64 *title = &cqd->title; | |
115 | ||
116 | title->byte_cnt = mini_cqe->byte_cnt; | |
117 | title->check_sum = mini_cqe->checksum; | |
118 | title->op_own &= 0xf0; | |
119 | title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); | |
120 | title->wqe_counter = cpu_to_be16(cqd->wqe_counter); | |
7219ab34 | 121 | |
36154be4 | 122 | if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
79d356ef | 123 | cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); |
36154be4 | 124 | else |
79d356ef TT |
125 | cqd->wqe_counter = |
126 | mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); | |
7219ab34 TT |
127 | } |
128 | ||
129 | static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, | |
79d356ef TT |
130 | struct mlx5_cqwq *wq, |
131 | u32 cqcc) | |
7219ab34 | 132 | { |
79d356ef TT |
133 | struct mlx5e_cq_decomp *cqd = &rq->cqd; |
134 | ||
135 | mlx5e_decompress_cqe(rq, wq, cqcc); | |
136 | cqd->title.rss_hash_type = 0; | |
137 | cqd->title.rss_hash_result = 0; | |
7219ab34 TT |
138 | } |
139 | ||
140 | static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, | |
79d356ef | 141 | struct mlx5_cqwq *wq, |
7219ab34 TT |
142 | int update_owner_only, |
143 | int budget_rem) | |
144 | { | |
79d356ef TT |
145 | struct mlx5e_cq_decomp *cqd = &rq->cqd; |
146 | u32 cqcc = wq->cc + update_owner_only; | |
7219ab34 TT |
147 | u32 cqe_count; |
148 | u32 i; | |
149 | ||
79d356ef | 150 | cqe_count = min_t(u32, cqd->left, budget_rem); |
7219ab34 TT |
151 | |
152 | for (i = update_owner_only; i < cqe_count; | |
79d356ef TT |
153 | i++, cqd->mini_arr_idx++, cqcc++) { |
154 | if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) | |
155 | mlx5e_read_mini_arr_slot(wq, cqd, cqcc); | |
7219ab34 | 156 | |
79d356ef TT |
157 | mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); |
158 | rq->handle_rx_cqe(rq, &cqd->title); | |
7219ab34 | 159 | } |
79d356ef TT |
160 | mlx5e_cqes_update_owner(wq, cqcc - wq->cc); |
161 | wq->cc = cqcc; | |
162 | cqd->left -= cqe_count; | |
05909bab | 163 | rq->stats->cqe_compress_pkts += cqe_count; |
7219ab34 TT |
164 | |
165 | return cqe_count; | |
166 | } | |
167 | ||
168 | static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, | |
79d356ef | 169 | struct mlx5_cqwq *wq, |
7219ab34 TT |
170 | int budget_rem) |
171 | { | |
79d356ef TT |
172 | struct mlx5e_cq_decomp *cqd = &rq->cqd; |
173 | u32 cc = wq->cc; | |
7219ab34 | 174 | |
79d356ef TT |
175 | mlx5e_read_title_slot(rq, wq, cc); |
176 | mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); | |
177 | mlx5e_decompress_cqe(rq, wq, cc); | |
178 | rq->handle_rx_cqe(rq, &cqd->title); | |
179 | cqd->mini_arr_idx++; | |
180 | ||
181 | return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; | |
7219ab34 TT |
182 | } |
183 | ||
accd5883 TT |
184 | static inline bool mlx5e_page_is_reserved(struct page *page) |
185 | { | |
70871f1e | 186 | return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); |
accd5883 TT |
187 | } |
188 | ||
1bfecfca SM |
189 | static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, |
190 | struct mlx5e_dma_info *dma_info) | |
e586b3b0 | 191 | { |
1bfecfca SM |
192 | struct mlx5e_page_cache *cache = &rq->page_cache; |
193 | u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); | |
05909bab | 194 | struct mlx5e_rq_stats *stats = rq->stats; |
e586b3b0 | 195 | |
1bfecfca | 196 | if (tail_next == cache->head) { |
05909bab | 197 | stats->cache_full++; |
1bfecfca SM |
198 | return false; |
199 | } | |
e586b3b0 | 200 | |
70871f1e | 201 | if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { |
05909bab | 202 | stats->cache_waive++; |
e048fc50 | 203 | return false; |
70871f1e | 204 | } |
e048fc50 | 205 | |
1bfecfca SM |
206 | cache->page_cache[cache->tail] = *dma_info; |
207 | cache->tail = tail_next; | |
208 | return true; | |
209 | } | |
e586b3b0 | 210 | |
1bfecfca SM |
211 | static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, |
212 | struct mlx5e_dma_info *dma_info) | |
213 | { | |
214 | struct mlx5e_page_cache *cache = &rq->page_cache; | |
05909bab | 215 | struct mlx5e_rq_stats *stats = rq->stats; |
1bfecfca SM |
216 | |
217 | if (unlikely(cache->head == cache->tail)) { | |
05909bab | 218 | stats->cache_empty++; |
1bfecfca SM |
219 | return false; |
220 | } | |
221 | ||
222 | if (page_ref_count(cache->page_cache[cache->head].page) != 1) { | |
05909bab | 223 | stats->cache_busy++; |
1bfecfca SM |
224 | return false; |
225 | } | |
e586b3b0 | 226 | |
1bfecfca SM |
227 | *dma_info = cache->page_cache[cache->head]; |
228 | cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); | |
05909bab | 229 | stats->cache_reuse++; |
e586b3b0 | 230 | |
1bfecfca | 231 | dma_sync_single_for_device(rq->pdev, dma_info->addr, |
069d1146 | 232 | PAGE_SIZE, |
1bfecfca SM |
233 | DMA_FROM_DEVICE); |
234 | return true; | |
235 | } | |
236 | ||
237 | static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, | |
238 | struct mlx5e_dma_info *dma_info) | |
239 | { | |
1bfecfca SM |
240 | if (mlx5e_rx_cache_get(rq, dma_info)) |
241 | return 0; | |
242 | ||
60bbf7ee | 243 | dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); |
2e50b261 | 244 | if (unlikely(!dma_info->page)) |
1bfecfca SM |
245 | return -ENOMEM; |
246 | ||
2e50b261 | 247 | dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, |
069d1146 | 248 | PAGE_SIZE, rq->buff.map_dir); |
1bfecfca | 249 | if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { |
2e50b261 IK |
250 | put_page(dma_info->page); |
251 | dma_info->page = NULL; | |
1bfecfca SM |
252 | return -ENOMEM; |
253 | } | |
e586b3b0 AV |
254 | |
255 | return 0; | |
1bfecfca SM |
256 | } |
257 | ||
159d2131 | 258 | void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) |
5168d732 | 259 | { |
069d1146 | 260 | dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); |
5168d732 JDB |
261 | } |
262 | ||
1bfecfca SM |
263 | void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, |
264 | bool recycle) | |
265 | { | |
60bbf7ee JDB |
266 | if (likely(recycle)) { |
267 | if (mlx5e_rx_cache_put(rq, dma_info)) | |
268 | return; | |
269 | ||
270 | mlx5e_page_dma_unmap(rq, dma_info); | |
271 | page_pool_recycle_direct(rq->page_pool, dma_info->page); | |
272 | } else { | |
273 | mlx5e_page_dma_unmap(rq, dma_info); | |
274 | put_page(dma_info->page); | |
275 | } | |
1bfecfca SM |
276 | } |
277 | ||
069d1146 TT |
278 | static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, |
279 | struct mlx5e_wqe_frag_info *frag) | |
280 | { | |
281 | int err = 0; | |
282 | ||
283 | if (!frag->offset) | |
284 | /* On first frag (offset == 0), replenish page (dma_info actually). | |
285 | * Other frags that point to the same dma_info (with a different | |
286 | * offset) should just use the new one without replenishing again | |
287 | * by themselves. | |
288 | */ | |
289 | err = mlx5e_page_alloc_mapped(rq, frag->di); | |
290 | ||
291 | return err; | |
292 | } | |
293 | ||
294 | static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, | |
cb5189d1 TT |
295 | struct mlx5e_wqe_frag_info *frag, |
296 | bool recycle) | |
accd5883 | 297 | { |
069d1146 | 298 | if (frag->last_in_page) |
cb5189d1 | 299 | mlx5e_page_release(rq, frag->di, recycle); |
accd5883 TT |
300 | } |
301 | ||
99cbfa93 TT |
302 | static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) |
303 | { | |
069d1146 | 304 | return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; |
99cbfa93 TT |
305 | } |
306 | ||
069d1146 TT |
307 | static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, |
308 | u16 ix) | |
1bfecfca | 309 | { |
069d1146 TT |
310 | struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); |
311 | int err; | |
312 | int i; | |
e586b3b0 | 313 | |
069d1146 TT |
314 | for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { |
315 | err = mlx5e_get_rx_frag(rq, frag); | |
316 | if (unlikely(err)) | |
317 | goto free_frags; | |
318 | ||
319 | wqe->data[i].addr = cpu_to_be64(frag->di->addr + | |
320 | frag->offset + rq->buff.headroom); | |
accd5883 | 321 | } |
e586b3b0 | 322 | |
1bfecfca | 323 | return 0; |
069d1146 TT |
324 | |
325 | free_frags: | |
326 | while (--i >= 0) | |
cb5189d1 | 327 | mlx5e_put_rx_frag(rq, --frag, true); |
069d1146 TT |
328 | |
329 | return err; | |
e586b3b0 AV |
330 | } |
331 | ||
accd5883 | 332 | static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, |
cb5189d1 TT |
333 | struct mlx5e_wqe_frag_info *wi, |
334 | bool recycle) | |
accd5883 | 335 | { |
069d1146 TT |
336 | int i; |
337 | ||
338 | for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) | |
cb5189d1 | 339 | mlx5e_put_rx_frag(rq, wi, recycle); |
accd5883 TT |
340 | } |
341 | ||
069d1146 | 342 | void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) |
accd5883 | 343 | { |
069d1146 | 344 | struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); |
accd5883 | 345 | |
cb5189d1 | 346 | mlx5e_free_rx_wqe(rq, wi, false); |
accd5883 TT |
347 | } |
348 | ||
069d1146 | 349 | static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) |
6cd392a0 | 350 | { |
069d1146 TT |
351 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
352 | int err; | |
353 | int i; | |
6cd392a0 | 354 | |
069d1146 TT |
355 | for (i = 0; i < wqe_bulk; i++) { |
356 | struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); | |
357 | ||
358 | err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i); | |
359 | if (unlikely(err)) | |
360 | goto free_wqes; | |
361 | } | |
362 | ||
363 | return 0; | |
364 | ||
365 | free_wqes: | |
366 | while (--i >= 0) | |
367 | mlx5e_dealloc_rx_wqe(rq, ix + i); | |
368 | ||
369 | return err; | |
6cd392a0 DJ |
370 | } |
371 | ||
fa698366 TT |
372 | static inline void |
373 | mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, | |
374 | struct mlx5e_dma_info *di, u32 frag_offset, u32 len, | |
375 | unsigned int truesize) | |
bc77b240 | 376 | { |
d9d9f156 | 377 | dma_sync_single_for_cpu(rq->pdev, |
9f9e9cd5 | 378 | di->addr + frag_offset, |
bc77b240 | 379 | len, DMA_FROM_DEVICE); |
9f9e9cd5 | 380 | page_ref_inc(di->page); |
bc77b240 | 381 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
9f9e9cd5 | 382 | di->page, frag_offset, len, truesize); |
bc77b240 TT |
383 | } |
384 | ||
386471f1 TT |
385 | static inline void |
386 | mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, | |
387 | struct mlx5e_dma_info *dma_info, | |
94816278 | 388 | int offset_from, u32 headlen) |
386471f1 TT |
389 | { |
390 | const void *from = page_address(dma_info->page) + offset_from; | |
391 | /* Aligning len to sizeof(long) optimizes memcpy performance */ | |
392 | unsigned int len = ALIGN(headlen, sizeof(long)); | |
393 | ||
394 | dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, | |
395 | DMA_FROM_DEVICE); | |
94816278 | 396 | skb_copy_to_linear_data(skb, from, len); |
bc77b240 TT |
397 | } |
398 | ||
cb5189d1 TT |
399 | static void |
400 | mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) | |
bc77b240 | 401 | { |
22f45398 TT |
402 | const bool no_xdp_xmit = |
403 | bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); | |
22f45398 | 404 | struct mlx5e_dma_info *dma_info = wi->umr.dma_info; |
18187fb2 | 405 | int i; |
bc77b240 | 406 | |
9f9e9cd5 | 407 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) |
22f45398 | 408 | if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) |
cb5189d1 | 409 | mlx5e_page_release(rq, &dma_info[i], recycle); |
18187fb2 | 410 | } |
bc77b240 | 411 | |
18187fb2 TT |
412 | static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) |
413 | { | |
422d4c40 | 414 | struct mlx5_wq_ll *wq = &rq->mpwqe.wq; |
99cbfa93 | 415 | struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); |
7e426671 | 416 | |
18187fb2 TT |
417 | rq->mpwqe.umr_in_progress = false; |
418 | ||
419 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); | |
420 | ||
421 | /* ensure wqes are visible to device before updating doorbell record */ | |
422 | dma_wmb(); | |
423 | ||
424 | mlx5_wq_ll_update_db_record(wq); | |
bc77b240 TT |
425 | } |
426 | ||
ab966d7e TT |
427 | static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) |
428 | { | |
429 | return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; | |
430 | } | |
431 | ||
3a2f7033 TT |
432 | static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, |
433 | struct mlx5_wq_cyc *wq, | |
37fdffb2 | 434 | u16 pi, u16 nnops) |
043dc78e TT |
435 | { |
436 | struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; | |
043dc78e TT |
437 | |
438 | edge_wi = wi + nnops; | |
439 | ||
3a2f7033 | 440 | /* fill sq frag edge with nops to avoid wqe wrapping two pages */ |
043dc78e TT |
441 | for (; wi < edge_wi; wi++) { |
442 | wi->opcode = MLX5_OPCODE_NOP; | |
443 | mlx5e_post_nop(wq, sq->sqn, &sq->pc); | |
444 | } | |
445 | } | |
446 | ||
18187fb2 | 447 | static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
bc77b240 | 448 | { |
21c59685 | 449 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; |
4c2af5cc | 450 | struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; |
18187fb2 TT |
451 | struct mlx5e_icosq *sq = &rq->channel->icosq; |
452 | struct mlx5_wq_cyc *wq = &sq->wq; | |
ea3886ca | 453 | struct mlx5e_umr_wqe *umr_wqe; |
b8a98a4c | 454 | u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); |
37fdffb2 | 455 | u16 pi, contig_wqebbs_room; |
7e426671 | 456 | int err; |
bc77b240 TT |
457 | int i; |
458 | ||
043dc78e | 459 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
37fdffb2 TT |
460 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
461 | if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) { | |
462 | mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room); | |
043dc78e | 463 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
ea3886ca TT |
464 | } |
465 | ||
466 | umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); | |
ab966d7e TT |
467 | if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2)) |
468 | memcpy(umr_wqe, &rq->mpwqe.umr_wqe, | |
469 | offsetof(struct mlx5e_umr_wqe, inline_mtts)); | |
470 | ||
4c2af5cc | 471 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { |
a5a0c590 | 472 | err = mlx5e_page_alloc_mapped(rq, dma_info); |
7e426671 | 473 | if (unlikely(err)) |
bc77b240 | 474 | goto err_unmap; |
ea3886ca | 475 | umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); |
bc77b240 TT |
476 | } |
477 | ||
22f45398 | 478 | bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); |
bc77b240 | 479 | wi->consumed_strides = 0; |
bc77b240 | 480 | |
18187fb2 TT |
481 | rq->mpwqe.umr_in_progress = true; |
482 | ||
ea3886ca | 483 | umr_wqe->ctrl.opmod_idx_opcode = |
18187fb2 TT |
484 | cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | |
485 | MLX5_OPCODE_UMR); | |
b8a98a4c | 486 | umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); |
18187fb2 TT |
487 | |
488 | sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; | |
ea3886ca | 489 | sq->pc += MLX5E_UMR_WQEBBS; |
c4000283 | 490 | mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); |
18187fb2 | 491 | |
bc77b240 TT |
492 | return 0; |
493 | ||
494 | err_unmap: | |
495 | while (--i >= 0) { | |
4c2af5cc | 496 | dma_info--; |
4415a031 | 497 | mlx5e_page_release(rq, dma_info, true); |
bc77b240 | 498 | } |
05909bab | 499 | rq->stats->buff_alloc_err++; |
bc77b240 | 500 | |
7e426671 | 501 | return err; |
bc77b240 TT |
502 | } |
503 | ||
6cd392a0 DJ |
504 | void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
505 | { | |
21c59685 | 506 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; |
cb5189d1 TT |
507 | /* Don't recycle, this function is called on rq/netdev close */ |
508 | mlx5e_free_rx_mpwqe(rq, wi, false); | |
6cd392a0 DJ |
509 | } |
510 | ||
e586b3b0 AV |
511 | bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) |
512 | { | |
99cbfa93 | 513 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
069d1146 | 514 | u8 wqe_bulk; |
4b7dfc99 | 515 | int err; |
e586b3b0 | 516 | |
0e5c04f6 | 517 | if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) |
e586b3b0 AV |
518 | return false; |
519 | ||
069d1146 TT |
520 | wqe_bulk = rq->wqe.info.wqe_bulk; |
521 | ||
522 | if (mlx5_wq_cyc_missing(wq) < wqe_bulk) | |
4b7dfc99 TT |
523 | return false; |
524 | ||
4b7dfc99 | 525 | do { |
99cbfa93 | 526 | u16 head = mlx5_wq_cyc_get_head(wq); |
e586b3b0 | 527 | |
069d1146 | 528 | err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); |
54984407 | 529 | if (unlikely(err)) { |
05909bab | 530 | rq->stats->buff_alloc_err++; |
e586b3b0 | 531 | break; |
54984407 | 532 | } |
e586b3b0 | 533 | |
069d1146 TT |
534 | mlx5_wq_cyc_push_n(wq, wqe_bulk); |
535 | } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); | |
e586b3b0 AV |
536 | |
537 | /* ensure wqes are visible to device before updating doorbell record */ | |
538 | dma_wmb(); | |
539 | ||
99cbfa93 | 540 | mlx5_wq_cyc_update_db_record(wq); |
e586b3b0 | 541 | |
4b7dfc99 | 542 | return !!err; |
e586b3b0 AV |
543 | } |
544 | ||
7cc6d77b TT |
545 | static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, |
546 | struct mlx5e_icosq *sq, | |
547 | struct mlx5e_rq *rq, | |
3b56f7b2 | 548 | struct mlx5_cqe64 *cqe) |
7cc6d77b TT |
549 | { |
550 | struct mlx5_wq_cyc *wq = &sq->wq; | |
ddf385e3 | 551 | u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); |
7cc6d77b TT |
552 | struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; |
553 | ||
554 | mlx5_cqwq_pop(&cq->wq); | |
7cc6d77b | 555 | |
6254adeb | 556 | if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { |
cd4a87df | 557 | netdev_WARN_ONCE(cq->channel->netdev, |
6254adeb | 558 | "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); |
7cc6d77b TT |
559 | return; |
560 | } | |
561 | ||
562 | if (likely(icowi->opcode == MLX5_OPCODE_UMR)) { | |
563 | mlx5e_post_rx_mpwqe(rq); | |
564 | return; | |
565 | } | |
566 | ||
567 | if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) | |
cd4a87df GP |
568 | netdev_WARN_ONCE(cq->channel->netdev, |
569 | "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode); | |
7cc6d77b TT |
570 | } |
571 | ||
572 | static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) | |
573 | { | |
574 | struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); | |
575 | struct mlx5_cqe64 *cqe; | |
7cc6d77b | 576 | |
0e5c04f6 | 577 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) |
7cc6d77b TT |
578 | return; |
579 | ||
580 | cqe = mlx5_cqwq_get_cqe(&cq->wq); | |
581 | if (likely(!cqe)) | |
582 | return; | |
583 | ||
7cc6d77b | 584 | /* by design, there's only a single cqe */ |
3b56f7b2 | 585 | mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe); |
7cc6d77b TT |
586 | |
587 | mlx5_cqwq_update_db_record(&cq->wq); | |
7cc6d77b TT |
588 | } |
589 | ||
590 | bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) | |
591 | { | |
422d4c40 | 592 | struct mlx5_wq_ll *wq = &rq->mpwqe.wq; |
7cc6d77b | 593 | |
0e5c04f6 | 594 | if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) |
7cc6d77b TT |
595 | return false; |
596 | ||
597 | mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq); | |
598 | ||
599 | if (mlx5_wq_ll_is_full(wq)) | |
600 | return false; | |
601 | ||
602 | if (!rq->mpwqe.umr_in_progress) | |
603 | mlx5e_alloc_rx_mpwqe(rq, wq->head); | |
dc983f0e TT |
604 | else |
605 | rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2; | |
7cc6d77b | 606 | |
e4d86a4a | 607 | return false; |
7cc6d77b TT |
608 | } |
609 | ||
8babd44d GP |
610 | static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) |
611 | { | |
612 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | |
613 | u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || | |
614 | (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); | |
615 | ||
616 | tcp->check = 0; | |
617 | tcp->psh = get_cqe_lro_tcppsh(cqe); | |
618 | ||
619 | if (tcp_ack) { | |
620 | tcp->ack = 1; | |
621 | tcp->ack_seq = cqe->lro_ack_seq_num; | |
622 | tcp->window = cqe->lro_tcp_win; | |
623 | } | |
624 | } | |
625 | ||
461017cb TT |
626 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, |
627 | u32 cqe_bcnt) | |
e586b3b0 | 628 | { |
cd17d230 | 629 | struct ethhdr *eth = (struct ethhdr *)(skb->data); |
e586b3b0 | 630 | struct tcphdr *tcp; |
cd17d230 | 631 | int network_depth = 0; |
8babd44d | 632 | __wsum check; |
cd17d230 GP |
633 | __be16 proto; |
634 | u16 tot_len; | |
604acb19 | 635 | void *ip_p; |
e586b3b0 | 636 | |
cd17d230 | 637 | proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); |
e586b3b0 | 638 | |
cd17d230 | 639 | tot_len = cqe_bcnt - network_depth; |
604acb19 | 640 | ip_p = skb->data + network_depth; |
cd17d230 GP |
641 | |
642 | if (proto == htons(ETH_P_IP)) { | |
604acb19 | 643 | struct iphdr *ipv4 = ip_p; |
e586b3b0 | 644 | |
604acb19 TT |
645 | tcp = ip_p + sizeof(struct iphdr); |
646 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
e586b3b0 | 647 | |
e586b3b0 AV |
648 | ipv4->ttl = cqe->lro_min_ttl; |
649 | ipv4->tot_len = cpu_to_be16(tot_len); | |
650 | ipv4->check = 0; | |
651 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, | |
652 | ipv4->ihl); | |
8babd44d GP |
653 | |
654 | mlx5e_lro_update_tcp_hdr(cqe, tcp); | |
655 | check = csum_partial(tcp, tcp->doff * 4, | |
656 | csum_unfold((__force __sum16)cqe->check_sum)); | |
657 | /* Almost done, don't forget the pseudo header */ | |
658 | tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, | |
659 | tot_len - sizeof(struct iphdr), | |
660 | IPPROTO_TCP, check); | |
e586b3b0 | 661 | } else { |
8babd44d | 662 | u16 payload_len = tot_len - sizeof(struct ipv6hdr); |
604acb19 TT |
663 | struct ipv6hdr *ipv6 = ip_p; |
664 | ||
665 | tcp = ip_p + sizeof(struct ipv6hdr); | |
666 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | |
667 | ||
e586b3b0 | 668 | ipv6->hop_limit = cqe->lro_min_ttl; |
8babd44d GP |
669 | ipv6->payload_len = cpu_to_be16(payload_len); |
670 | ||
671 | mlx5e_lro_update_tcp_hdr(cqe, tcp); | |
672 | check = csum_partial(tcp, tcp->doff * 4, | |
673 | csum_unfold((__force __sum16)cqe->check_sum)); | |
674 | /* Almost done, don't forget the pseudo header */ | |
675 | tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, | |
676 | IPPROTO_TCP, check); | |
604acb19 | 677 | } |
e586b3b0 AV |
678 | } |
679 | ||
680 | static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, | |
681 | struct sk_buff *skb) | |
682 | { | |
683 | u8 cht = cqe->rss_hash_type; | |
684 | int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : | |
685 | (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : | |
686 | PKT_HASH_TYPE_NONE; | |
687 | skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); | |
688 | } | |
689 | ||
f007c13d NS |
690 | static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, |
691 | __be16 *proto) | |
bbceefce | 692 | { |
f007c13d NS |
693 | *proto = ((struct ethhdr *)skb->data)->h_proto; |
694 | *proto = __vlan_get_protocol(skb, *proto, network_depth); | |
695 | return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); | |
696 | } | |
697 | ||
698 | static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) | |
699 | { | |
700 | int network_depth = 0; | |
701 | __be16 proto; | |
702 | void *ip; | |
703 | int rc; | |
bbceefce | 704 | |
f007c13d NS |
705 | if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) |
706 | return; | |
707 | ||
708 | ip = skb->data + network_depth; | |
709 | rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : | |
710 | IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); | |
711 | ||
712 | rq->stats->ecn_mark += !!rc; | |
bbceefce AS |
713 | } |
714 | ||
d48051c5 | 715 | static u32 mlx5e_get_fcs(const struct sk_buff *skb) |
902a5459 | 716 | { |
d48051c5 ED |
717 | const void *fcs_bytes; |
718 | u32 _fcs_bytes; | |
902a5459 | 719 | |
d48051c5 ED |
720 | fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, |
721 | ETH_FCS_LEN, &_fcs_bytes); | |
902a5459 | 722 | |
d48051c5 | 723 | return __get_unaligned_cpu32(fcs_bytes); |
902a5459 EBE |
724 | } |
725 | ||
ef6fcd45 | 726 | static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) |
fe1dc069 | 727 | { |
ef6fcd45 | 728 | void *ip_p = skb->data + network_depth; |
fe1dc069 AH |
729 | |
730 | return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : | |
731 | ((struct ipv6hdr *)ip_p)->nexthdr; | |
732 | } | |
733 | ||
e8c8b53c CW |
734 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) |
735 | ||
bbceefce AS |
736 | static inline void mlx5e_handle_csum(struct net_device *netdev, |
737 | struct mlx5_cqe64 *cqe, | |
738 | struct mlx5e_rq *rq, | |
5f6d12d1 MF |
739 | struct sk_buff *skb, |
740 | bool lro) | |
bbceefce | 741 | { |
05909bab | 742 | struct mlx5e_rq_stats *stats = rq->stats; |
f938daee | 743 | int network_depth = 0; |
f007c13d | 744 | __be16 proto; |
f938daee | 745 | |
bbceefce AS |
746 | if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) |
747 | goto csum_none; | |
748 | ||
5f6d12d1 | 749 | if (lro) { |
bbceefce | 750 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
05909bab | 751 | stats->csum_unnecessary++; |
1b223dd3 SM |
752 | return; |
753 | } | |
754 | ||
b856df28 OG |
755 | if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) |
756 | goto csum_unnecessary; | |
757 | ||
e8c8b53c CW |
758 | /* CQE csum doesn't cover padding octets in short ethernet |
759 | * frames. And the pad field is appended prior to calculating | |
760 | * and appending the FCS field. | |
761 | * | |
762 | * Detecting these padded frames requires to verify and parse | |
763 | * IP headers, so we simply force all those small frames to be | |
764 | * CHECKSUM_UNNECESSARY even if they are not padded. | |
765 | */ | |
766 | if (short_frame(skb->len)) | |
767 | goto csum_unnecessary; | |
768 | ||
f007c13d | 769 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { |
ef6fcd45 | 770 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) |
fe1dc069 AH |
771 | goto csum_unnecessary; |
772 | ||
bbceefce | 773 | skb->ip_summed = CHECKSUM_COMPLETE; |
ecf842f6 | 774 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); |
f938daee GP |
775 | if (network_depth > ETH_HLEN) |
776 | /* CQE csum is calculated from the IP header and does | |
777 | * not cover VLAN headers (if present). This will add | |
778 | * the checksum manually. | |
779 | */ | |
780 | skb->csum = csum_partial(skb->data + ETH_HLEN, | |
781 | network_depth - ETH_HLEN, | |
782 | skb->csum); | |
902a5459 | 783 | if (unlikely(netdev->features & NETIF_F_RXFCS)) |
d48051c5 ED |
784 | skb->csum = csum_block_add(skb->csum, |
785 | (__force __wsum)mlx5e_get_fcs(skb), | |
786 | skb->len - ETH_FCS_LEN); | |
05909bab | 787 | stats->csum_complete++; |
1b223dd3 | 788 | return; |
bbceefce AS |
789 | } |
790 | ||
fe1dc069 | 791 | csum_unnecessary: |
1b223dd3 | 792 | if (likely((cqe->hds_ip_ext & CQE_L3_OK) && |
b820e6fb OG |
793 | ((cqe->hds_ip_ext & CQE_L4_OK) || |
794 | (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { | |
1b223dd3 SM |
795 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
796 | if (cqe_is_tunneled(cqe)) { | |
797 | skb->csum_level = 1; | |
798 | skb->encapsulation = 1; | |
05909bab | 799 | stats->csum_unnecessary_inner++; |
603e1f5b | 800 | return; |
1b223dd3 | 801 | } |
05909bab | 802 | stats->csum_unnecessary++; |
1b223dd3 SM |
803 | return; |
804 | } | |
bbceefce AS |
805 | csum_none: |
806 | skb->ip_summed = CHECKSUM_NONE; | |
05909bab | 807 | stats->csum_none++; |
bbceefce AS |
808 | } |
809 | ||
f007c13d NS |
810 | #define MLX5E_CE_BIT_MASK 0x80 |
811 | ||
e586b3b0 | 812 | static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, |
461017cb | 813 | u32 cqe_bcnt, |
e586b3b0 AV |
814 | struct mlx5e_rq *rq, |
815 | struct sk_buff *skb) | |
816 | { | |
bd206fd5 | 817 | u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; |
05909bab | 818 | struct mlx5e_rq_stats *stats = rq->stats; |
e586b3b0 | 819 | struct net_device *netdev = rq->netdev; |
e586b3b0 | 820 | |
f938daee | 821 | skb->mac_len = ETH_HLEN; |
00aebab2 BP |
822 | |
823 | #ifdef CONFIG_MLX5_EN_TLS | |
824 | mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt); | |
825 | #endif | |
826 | ||
e586b3b0 | 827 | if (lro_num_seg > 1) { |
461017cb | 828 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); |
d9a40271 | 829 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); |
8ab7e2ae GP |
830 | /* Subtract one since we already counted this as one |
831 | * "regular" packet in mlx5e_complete_rx_cqe() | |
832 | */ | |
05909bab EBE |
833 | stats->packets += lro_num_seg - 1; |
834 | stats->lro_packets++; | |
835 | stats->lro_bytes += cqe_bcnt; | |
e586b3b0 AV |
836 | } |
837 | ||
7c39afb3 FD |
838 | if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) |
839 | skb_hwtstamps(skb)->hwtstamp = | |
840 | mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); | |
ef9814de | 841 | |
e586b3b0 AV |
842 | skb_record_rx_queue(skb, rq->ix); |
843 | ||
844 | if (likely(netdev->features & NETIF_F_RXHASH)) | |
845 | mlx5e_skb_set_hash(cqe, skb); | |
846 | ||
f24686e8 | 847 | if (cqe_has_vlan(cqe)) { |
e586b3b0 AV |
848 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
849 | be16_to_cpu(cqe->vlan_info)); | |
05909bab | 850 | stats->removed_vlan_packets++; |
f24686e8 | 851 | } |
12185a9f AV |
852 | |
853 | skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; | |
e20a0db3 SM |
854 | |
855 | mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); | |
f007c13d NS |
856 | /* checking CE bit in cqe - MSB in ml_path field */ |
857 | if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) | |
858 | mlx5e_enable_ecn(rq, skb); | |
859 | ||
e20a0db3 | 860 | skb->protocol = eth_type_trans(skb, netdev); |
e586b3b0 AV |
861 | } |
862 | ||
461017cb TT |
863 | static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, |
864 | struct mlx5_cqe64 *cqe, | |
865 | u32 cqe_bcnt, | |
866 | struct sk_buff *skb) | |
867 | { | |
05909bab EBE |
868 | struct mlx5e_rq_stats *stats = rq->stats; |
869 | ||
870 | stats->packets++; | |
871 | stats->bytes += cqe_bcnt; | |
461017cb | 872 | mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); |
461017cb TT |
873 | } |
874 | ||
619a8f2a TT |
875 | static inline |
876 | struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, | |
877 | u32 frag_size, u16 headroom, | |
878 | u32 cqe_bcnt) | |
879 | { | |
880 | struct sk_buff *skb = build_skb(va, frag_size); | |
881 | ||
882 | if (unlikely(!skb)) { | |
05909bab | 883 | rq->stats->buff_alloc_err++; |
619a8f2a TT |
884 | return NULL; |
885 | } | |
886 | ||
887 | skb_reserve(skb, headroom); | |
888 | skb_put(skb, cqe_bcnt); | |
889 | ||
890 | return skb; | |
891 | } | |
892 | ||
069d1146 TT |
893 | struct sk_buff * |
894 | mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, | |
895 | struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) | |
2f48af12 | 896 | { |
069d1146 | 897 | struct mlx5e_dma_info *di = wi->di; |
b45d8b50 | 898 | u16 rx_headroom = rq->buff.headroom; |
1bfecfca | 899 | struct sk_buff *skb; |
b5503b99 | 900 | void *va, *data; |
366cbf2f | 901 | bool consumed; |
78aedd32 | 902 | u32 frag_size; |
2f48af12 | 903 | |
accd5883 | 904 | va = page_address(di->page) + wi->offset; |
d8bec2b2 | 905 | data = va + rx_headroom; |
accd5883 | 906 | frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); |
2f48af12 | 907 | |
bd658dda TT |
908 | dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, |
909 | frag_size, DMA_FROM_DEVICE); | |
03993094 | 910 | prefetchw(va); /* xdp_frame data area */ |
b5503b99 | 911 | prefetch(data); |
2f48af12 | 912 | |
6254adeb | 913 | if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { |
05909bab | 914 | rq->stats->wqe_err++; |
8515c581 | 915 | return NULL; |
2f48af12 TT |
916 | } |
917 | ||
366cbf2f | 918 | rcu_read_lock(); |
d8bec2b2 | 919 | consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt); |
366cbf2f DB |
920 | rcu_read_unlock(); |
921 | if (consumed) | |
8515c581 | 922 | return NULL; /* page/packet was consumed by XDP */ |
86994156 | 923 | |
619a8f2a TT |
924 | skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); |
925 | if (unlikely(!skb)) | |
8515c581 | 926 | return NULL; |
1bfecfca | 927 | |
accd5883 | 928 | /* queue up for recycling/reuse */ |
1bfecfca | 929 | page_ref_inc(di->page); |
1bfecfca | 930 | |
8515c581 OG |
931 | return skb; |
932 | } | |
933 | ||
069d1146 TT |
934 | struct sk_buff * |
935 | mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, | |
936 | struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) | |
937 | { | |
938 | struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; | |
939 | struct mlx5e_wqe_frag_info *head_wi = wi; | |
940 | u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt); | |
941 | u16 frag_headlen = headlen; | |
942 | u16 byte_cnt = cqe_bcnt - headlen; | |
943 | struct sk_buff *skb; | |
944 | ||
6254adeb | 945 | if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { |
069d1146 TT |
946 | rq->stats->wqe_err++; |
947 | return NULL; | |
948 | } | |
949 | ||
950 | /* XDP is not supported in this configuration, as incoming packets | |
951 | * might spread among multiple pages. | |
952 | */ | |
953 | skb = napi_alloc_skb(rq->cq.napi, | |
954 | ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); | |
955 | if (unlikely(!skb)) { | |
956 | rq->stats->buff_alloc_err++; | |
957 | return NULL; | |
958 | } | |
959 | ||
960 | prefetchw(skb->data); | |
961 | ||
962 | while (byte_cnt) { | |
963 | u16 frag_consumed_bytes = | |
964 | min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); | |
965 | ||
966 | mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, | |
967 | frag_consumed_bytes, frag_info->frag_stride); | |
968 | byte_cnt -= frag_consumed_bytes; | |
969 | frag_headlen = 0; | |
970 | frag_info++; | |
971 | wi++; | |
972 | } | |
973 | ||
974 | /* copy header */ | |
94816278 | 975 | mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen); |
069d1146 TT |
976 | /* skb linear part was allocated with headlen and aligned to long */ |
977 | skb->tail += headlen; | |
978 | skb->len += headlen; | |
979 | ||
980 | return skb; | |
981 | } | |
982 | ||
8515c581 OG |
983 | void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
984 | { | |
99cbfa93 | 985 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
accd5883 | 986 | struct mlx5e_wqe_frag_info *wi; |
8515c581 | 987 | struct sk_buff *skb; |
8515c581 | 988 | u32 cqe_bcnt; |
99cbfa93 | 989 | u16 ci; |
8515c581 | 990 | |
99cbfa93 TT |
991 | ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); |
992 | wi = get_frag(rq, ci); | |
993 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); | |
8515c581 | 994 | |
069d1146 | 995 | skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
accd5883 TT |
996 | if (!skb) { |
997 | /* probably for XDP */ | |
121e8927 | 998 | if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { |
069d1146 TT |
999 | /* do not return page to cache, |
1000 | * it will be returned on XDP_TX completion. | |
1001 | */ | |
99cbfa93 | 1002 | goto wq_cyc_pop; |
accd5883 | 1003 | } |
069d1146 | 1004 | goto free_wqe; |
accd5883 | 1005 | } |
8515c581 | 1006 | |
461017cb | 1007 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
8515c581 | 1008 | napi_gro_receive(rq->cq.napi, skb); |
2f48af12 | 1009 | |
069d1146 | 1010 | free_wqe: |
cb5189d1 | 1011 | mlx5e_free_rx_wqe(rq, wi, true); |
99cbfa93 TT |
1012 | wq_cyc_pop: |
1013 | mlx5_wq_cyc_pop(wq); | |
2f48af12 TT |
1014 | } |
1015 | ||
e80541ec | 1016 | #ifdef CONFIG_MLX5_ESWITCH |
f5f82476 OG |
1017 | void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
1018 | { | |
1019 | struct net_device *netdev = rq->netdev; | |
1020 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
1d447a39 SM |
1021 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1022 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
99cbfa93 | 1023 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
accd5883 | 1024 | struct mlx5e_wqe_frag_info *wi; |
f5f82476 | 1025 | struct sk_buff *skb; |
f5f82476 | 1026 | u32 cqe_bcnt; |
99cbfa93 | 1027 | u16 ci; |
f5f82476 | 1028 | |
99cbfa93 TT |
1029 | ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); |
1030 | wi = get_frag(rq, ci); | |
1031 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); | |
f5f82476 | 1032 | |
069d1146 | 1033 | skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
accd5883 | 1034 | if (!skb) { |
069d1146 | 1035 | /* probably for XDP */ |
121e8927 | 1036 | if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { |
069d1146 TT |
1037 | /* do not return page to cache, |
1038 | * it will be returned on XDP_TX completion. | |
1039 | */ | |
99cbfa93 | 1040 | goto wq_cyc_pop; |
accd5883 | 1041 | } |
069d1146 | 1042 | goto free_wqe; |
accd5883 | 1043 | } |
f5f82476 OG |
1044 | |
1045 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); | |
1046 | ||
1047 | if (rep->vlan && skb_vlan_tag_present(skb)) | |
1048 | skb_vlan_pop(skb); | |
1049 | ||
1050 | napi_gro_receive(rq->cq.napi, skb); | |
1051 | ||
069d1146 | 1052 | free_wqe: |
cb5189d1 | 1053 | mlx5e_free_rx_wqe(rq, wi, true); |
99cbfa93 TT |
1054 | wq_cyc_pop: |
1055 | mlx5_wq_cyc_pop(wq); | |
f5f82476 | 1056 | } |
e80541ec | 1057 | #endif |
f5f82476 | 1058 | |
619a8f2a TT |
1059 | struct sk_buff * |
1060 | mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, | |
1061 | u16 cqe_bcnt, u32 head_offset, u32 page_idx) | |
bc77b240 | 1062 | { |
75aa889f | 1063 | u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); |
9f9e9cd5 | 1064 | struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; |
bc77b240 | 1065 | u32 frag_offset = head_offset + headlen; |
9f9e9cd5 TT |
1066 | u32 byte_cnt = cqe_bcnt - headlen; |
1067 | struct mlx5e_dma_info *head_di = di; | |
619a8f2a TT |
1068 | struct sk_buff *skb; |
1069 | ||
1070 | skb = napi_alloc_skb(rq->cq.napi, | |
75aa889f | 1071 | ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); |
619a8f2a | 1072 | if (unlikely(!skb)) { |
05909bab | 1073 | rq->stats->buff_alloc_err++; |
619a8f2a TT |
1074 | return NULL; |
1075 | } | |
1076 | ||
1077 | prefetchw(skb->data); | |
bc77b240 | 1078 | |
bc77b240 | 1079 | if (unlikely(frag_offset >= PAGE_SIZE)) { |
9f9e9cd5 | 1080 | di++; |
bc77b240 TT |
1081 | frag_offset -= PAGE_SIZE; |
1082 | } | |
bc77b240 TT |
1083 | |
1084 | while (byte_cnt) { | |
1085 | u32 pg_consumed_bytes = | |
1086 | min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); | |
fa698366 TT |
1087 | unsigned int truesize = |
1088 | ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); | |
bc77b240 | 1089 | |
fa698366 TT |
1090 | mlx5e_add_skb_frag(rq, skb, di, frag_offset, |
1091 | pg_consumed_bytes, truesize); | |
bc77b240 TT |
1092 | byte_cnt -= pg_consumed_bytes; |
1093 | frag_offset = 0; | |
9f9e9cd5 | 1094 | di++; |
bc77b240 TT |
1095 | } |
1096 | /* copy header */ | |
94816278 | 1097 | mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen); |
bc77b240 TT |
1098 | /* skb linear part was allocated with headlen and aligned to long */ |
1099 | skb->tail += headlen; | |
1100 | skb->len += headlen; | |
619a8f2a TT |
1101 | |
1102 | return skb; | |
1103 | } | |
1104 | ||
1105 | struct sk_buff * | |
1106 | mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, | |
1107 | u16 cqe_bcnt, u32 head_offset, u32 page_idx) | |
1108 | { | |
1109 | struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; | |
1110 | u16 rx_headroom = rq->buff.headroom; | |
22f45398 | 1111 | u32 cqe_bcnt32 = cqe_bcnt; |
619a8f2a TT |
1112 | struct sk_buff *skb; |
1113 | void *va, *data; | |
1114 | u32 frag_size; | |
22f45398 | 1115 | bool consumed; |
619a8f2a | 1116 | |
0073c8f7 MS |
1117 | /* Check packet size. Note LRO doesn't use linear SKB */ |
1118 | if (unlikely(cqe_bcnt > rq->hw_mtu)) { | |
1119 | rq->stats->oversize_pkts_sw_drop++; | |
1120 | return NULL; | |
1121 | } | |
1122 | ||
619a8f2a TT |
1123 | va = page_address(di->page) + head_offset; |
1124 | data = va + rx_headroom; | |
22f45398 | 1125 | frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); |
619a8f2a TT |
1126 | |
1127 | dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, | |
1128 | frag_size, DMA_FROM_DEVICE); | |
d3398a4f | 1129 | prefetchw(va); /* xdp_frame data area */ |
619a8f2a | 1130 | prefetch(data); |
22f45398 TT |
1131 | |
1132 | rcu_read_lock(); | |
1133 | consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32); | |
1134 | rcu_read_unlock(); | |
1135 | if (consumed) { | |
1136 | if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) | |
1137 | __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ | |
1138 | return NULL; /* page/packet was consumed by XDP */ | |
1139 | } | |
1140 | ||
1141 | skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); | |
619a8f2a TT |
1142 | if (unlikely(!skb)) |
1143 | return NULL; | |
1144 | ||
1145 | /* queue up for recycling/reuse */ | |
9f9e9cd5 | 1146 | page_ref_inc(di->page); |
619a8f2a TT |
1147 | |
1148 | return skb; | |
bc77b240 TT |
1149 | } |
1150 | ||
461017cb TT |
1151 | void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
1152 | { | |
1153 | u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); | |
461017cb | 1154 | u16 wqe_id = be16_to_cpu(cqe->wqe_id); |
21c59685 | 1155 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; |
619a8f2a TT |
1156 | u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); |
1157 | u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; | |
1158 | u32 head_offset = wqe_offset & (PAGE_SIZE - 1); | |
1159 | u32 page_idx = wqe_offset >> PAGE_SHIFT; | |
99cbfa93 | 1160 | struct mlx5e_rx_wqe_ll *wqe; |
422d4c40 | 1161 | struct mlx5_wq_ll *wq; |
461017cb | 1162 | struct sk_buff *skb; |
461017cb | 1163 | u16 cqe_bcnt; |
461017cb TT |
1164 | |
1165 | wi->consumed_strides += cstrides; | |
1166 | ||
6254adeb | 1167 | if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { |
05909bab | 1168 | rq->stats->wqe_err++; |
461017cb TT |
1169 | goto mpwrq_cqe_out; |
1170 | } | |
1171 | ||
1172 | if (unlikely(mpwrq_is_filler_cqe(cqe))) { | |
b71ba6b4 TT |
1173 | struct mlx5e_rq_stats *stats = rq->stats; |
1174 | ||
1175 | stats->mpwqe_filler_cqes++; | |
1176 | stats->mpwqe_filler_strides += cstrides; | |
461017cb TT |
1177 | goto mpwrq_cqe_out; |
1178 | } | |
1179 | ||
461017cb | 1180 | cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); |
461017cb | 1181 | |
619a8f2a TT |
1182 | skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset, |
1183 | page_idx); | |
22f45398 | 1184 | if (!skb) |
619a8f2a TT |
1185 | goto mpwrq_cqe_out; |
1186 | ||
461017cb | 1187 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
8515c581 | 1188 | napi_gro_receive(rq->cq.napi, skb); |
461017cb TT |
1189 | |
1190 | mpwrq_cqe_out: | |
b45d8b50 | 1191 | if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) |
461017cb TT |
1192 | return; |
1193 | ||
422d4c40 TT |
1194 | wq = &rq->mpwqe.wq; |
1195 | wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); | |
cb5189d1 | 1196 | mlx5e_free_rx_mpwqe(rq, wi, true); |
422d4c40 | 1197 | mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); |
461017cb TT |
1198 | } |
1199 | ||
44fb6fbb | 1200 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) |
e586b3b0 | 1201 | { |
e3391054 | 1202 | struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); |
79d356ef | 1203 | struct mlx5_cqwq *cqwq = &cq->wq; |
4b7dfc99 | 1204 | struct mlx5_cqe64 *cqe; |
7219ab34 | 1205 | int work_done = 0; |
e586b3b0 | 1206 | |
0e5c04f6 | 1207 | if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) |
6cd392a0 DJ |
1208 | return 0; |
1209 | ||
79d356ef TT |
1210 | if (rq->cqd.left) |
1211 | work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); | |
7219ab34 | 1212 | |
79d356ef | 1213 | cqe = mlx5_cqwq_get_cqe(cqwq); |
bfc69825 TT |
1214 | if (!cqe) { |
1215 | if (unlikely(work_done)) | |
1216 | goto out; | |
4b7dfc99 | 1217 | return 0; |
bfc69825 | 1218 | } |
e586b3b0 | 1219 | |
4b7dfc99 | 1220 | do { |
7219ab34 TT |
1221 | if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { |
1222 | work_done += | |
79d356ef | 1223 | mlx5e_decompress_cqes_start(rq, cqwq, |
7219ab34 TT |
1224 | budget - work_done); |
1225 | continue; | |
1226 | } | |
1227 | ||
79d356ef | 1228 | mlx5_cqwq_pop(cqwq); |
a1f5a1a8 | 1229 | |
2f48af12 | 1230 | rq->handle_rx_cqe(rq, cqe); |
79d356ef | 1231 | } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); |
e586b3b0 | 1232 | |
bfc69825 | 1233 | out: |
4fb2f516 TT |
1234 | if (rq->xdp_prog) |
1235 | mlx5e_xdp_rx_poll_complete(rq); | |
5168d732 | 1236 | |
79d356ef | 1237 | mlx5_cqwq_update_db_record(cqwq); |
e586b3b0 AV |
1238 | |
1239 | /* ensure cq space is freed before enabling more cqes */ | |
1240 | wmb(); | |
1241 | ||
44fb6fbb | 1242 | return work_done; |
e586b3b0 | 1243 | } |
1c4bf940 | 1244 | |
9d6bd752 SM |
1245 | #ifdef CONFIG_MLX5_CORE_IPOIB |
1246 | ||
1247 | #define MLX5_IB_GRH_DGID_OFFSET 24 | |
9d6bd752 SM |
1248 | #define MLX5_GID_SIZE 16 |
1249 | ||
1250 | static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | |
1251 | struct mlx5_cqe64 *cqe, | |
1252 | u32 cqe_bcnt, | |
1253 | struct sk_buff *skb) | |
1254 | { | |
36e564b7 | 1255 | struct hwtstamp_config *tstamp; |
19052a3b | 1256 | struct mlx5e_rq_stats *stats; |
7e7f4780 | 1257 | struct net_device *netdev; |
36e564b7 | 1258 | struct mlx5e_priv *priv; |
b57fe691 | 1259 | char *pseudo_header; |
7e7f4780 | 1260 | u32 qpn; |
9d6bd752 SM |
1261 | u8 *dgid; |
1262 | u8 g; | |
1263 | ||
7e7f4780 AV |
1264 | qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff; |
1265 | netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn); | |
1266 | ||
1267 | /* No mapping present, cannot process SKB. This might happen if a child | |
1268 | * interface is going down while having unprocessed CQEs on parent RQ | |
1269 | */ | |
1270 | if (unlikely(!netdev)) { | |
1271 | /* TODO: add drop counters support */ | |
1272 | skb->dev = NULL; | |
1273 | pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn); | |
1274 | return; | |
1275 | } | |
1276 | ||
36e564b7 FD |
1277 | priv = mlx5i_epriv(netdev); |
1278 | tstamp = &priv->tstamp; | |
19052a3b | 1279 | stats = &priv->channel_stats[rq->ix].rq; |
36e564b7 | 1280 | |
9d6bd752 SM |
1281 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; |
1282 | dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; | |
1283 | if ((!g) || dgid[0] != 0xff) | |
1284 | skb->pkt_type = PACKET_HOST; | |
1285 | else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0) | |
1286 | skb->pkt_type = PACKET_BROADCAST; | |
1287 | else | |
1288 | skb->pkt_type = PACKET_MULTICAST; | |
1289 | ||
1290 | /* TODO: IB/ipoib: Allow mcast packets from other VFs | |
1291 | * 68996a6e760e5c74654723eeb57bf65628ae87f4 | |
1292 | */ | |
1293 | ||
1294 | skb_pull(skb, MLX5_IB_GRH_BYTES); | |
1295 | ||
1296 | skb->protocol = *((__be16 *)(skb->data)); | |
1297 | ||
3d6f3cdf FD |
1298 | if (netdev->features & NETIF_F_RXCSUM) { |
1299 | skb->ip_summed = CHECKSUM_COMPLETE; | |
1300 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); | |
1301 | stats->csum_complete++; | |
1302 | } else { | |
1303 | skb->ip_summed = CHECKSUM_NONE; | |
1304 | stats->csum_none++; | |
1305 | } | |
9d6bd752 | 1306 | |
36e564b7 | 1307 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) |
7c39afb3 FD |
1308 | skb_hwtstamps(skb)->hwtstamp = |
1309 | mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); | |
3844b07e | 1310 | |
9d6bd752 SM |
1311 | skb_record_rx_queue(skb, rq->ix); |
1312 | ||
1313 | if (likely(netdev->features & NETIF_F_RXHASH)) | |
1314 | mlx5e_skb_set_hash(cqe, skb); | |
1315 | ||
b57fe691 ES |
1316 | /* 20 bytes of ipoib header and 4 for encap existing */ |
1317 | pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN); | |
1318 | memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN); | |
9d6bd752 | 1319 | skb_reset_mac_header(skb); |
b57fe691 | 1320 | skb_pull(skb, MLX5_IPOIB_HARD_LEN); |
9d6bd752 SM |
1321 | |
1322 | skb->dev = netdev; | |
1323 | ||
05909bab EBE |
1324 | stats->packets++; |
1325 | stats->bytes += cqe_bcnt; | |
9d6bd752 SM |
1326 | } |
1327 | ||
1328 | void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) | |
1329 | { | |
99cbfa93 | 1330 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
accd5883 | 1331 | struct mlx5e_wqe_frag_info *wi; |
9d6bd752 | 1332 | struct sk_buff *skb; |
9d6bd752 | 1333 | u32 cqe_bcnt; |
99cbfa93 | 1334 | u16 ci; |
9d6bd752 | 1335 | |
99cbfa93 TT |
1336 | ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); |
1337 | wi = get_frag(rq, ci); | |
1338 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); | |
9d6bd752 | 1339 | |
069d1146 | 1340 | skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
9d6bd752 | 1341 | if (!skb) |
accd5883 | 1342 | goto wq_free_wqe; |
9d6bd752 SM |
1343 | |
1344 | mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); | |
7e7f4780 AV |
1345 | if (unlikely(!skb->dev)) { |
1346 | dev_kfree_skb_any(skb); | |
1347 | goto wq_free_wqe; | |
1348 | } | |
9d6bd752 SM |
1349 | napi_gro_receive(rq->cq.napi, skb); |
1350 | ||
accd5883 | 1351 | wq_free_wqe: |
cb5189d1 | 1352 | mlx5e_free_rx_wqe(rq, wi, true); |
99cbfa93 | 1353 | mlx5_wq_cyc_pop(wq); |
9d6bd752 SM |
1354 | } |
1355 | ||
1356 | #endif /* CONFIG_MLX5_CORE_IPOIB */ | |
899a59d3 IT |
1357 | |
1358 | #ifdef CONFIG_MLX5_EN_IPSEC | |
1359 | ||
1360 | void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) | |
1361 | { | |
99cbfa93 | 1362 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
899a59d3 | 1363 | struct mlx5e_wqe_frag_info *wi; |
899a59d3 | 1364 | struct sk_buff *skb; |
899a59d3 | 1365 | u32 cqe_bcnt; |
99cbfa93 | 1366 | u16 ci; |
899a59d3 | 1367 | |
99cbfa93 TT |
1368 | ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); |
1369 | wi = get_frag(rq, ci); | |
1370 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); | |
899a59d3 | 1371 | |
069d1146 | 1372 | skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
899a59d3 IT |
1373 | if (unlikely(!skb)) { |
1374 | /* a DROP, save the page-reuse checks */ | |
cb5189d1 | 1375 | mlx5e_free_rx_wqe(rq, wi, true); |
99cbfa93 | 1376 | goto wq_cyc_pop; |
899a59d3 | 1377 | } |
b3ccf978 | 1378 | skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); |
899a59d3 | 1379 | if (unlikely(!skb)) { |
cb5189d1 | 1380 | mlx5e_free_rx_wqe(rq, wi, true); |
99cbfa93 | 1381 | goto wq_cyc_pop; |
899a59d3 IT |
1382 | } |
1383 | ||
1384 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); | |
1385 | napi_gro_receive(rq->cq.napi, skb); | |
1386 | ||
cb5189d1 | 1387 | mlx5e_free_rx_wqe(rq, wi, true); |
99cbfa93 TT |
1388 | wq_cyc_pop: |
1389 | mlx5_wq_cyc_pop(wq); | |
899a59d3 IT |
1390 | } |
1391 | ||
1392 | #endif /* CONFIG_MLX5_EN_IPSEC */ |