]>
Commit | Line | Data |
---|---|---|
c27a02cd YP |
1 | /* |
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | ||
076bb0c8 | 34 | #include <net/busy_poll.h> |
47a38e15 | 35 | #include <linux/bpf.h> |
a67edbf4 | 36 | #include <linux/bpf_trace.h> |
c27a02cd | 37 | #include <linux/mlx4/cq.h> |
5a0e3ad6 | 38 | #include <linux/slab.h> |
c27a02cd YP |
39 | #include <linux/mlx4/qp.h> |
40 | #include <linux/skbuff.h> | |
b67bfe0d | 41 | #include <linux/rculist.h> |
c27a02cd YP |
42 | #include <linux/if_ether.h> |
43 | #include <linux/if_vlan.h> | |
44 | #include <linux/vmalloc.h> | |
35f6f453 | 45 | #include <linux/irq.h> |
c27a02cd | 46 | |
f8c6455b SM |
47 | #if IS_ENABLED(CONFIG_IPV6) |
48 | #include <net/ip6_checksum.h> | |
49 | #endif | |
50 | ||
c27a02cd YP |
51 | #include "mlx4_en.h" |
52 | ||
34db548b ED |
53 | static int mlx4_alloc_page(struct mlx4_en_priv *priv, |
54 | struct mlx4_en_rx_alloc *frag, | |
55 | gfp_t gfp) | |
51151a16 | 56 | { |
51151a16 ED |
57 | struct page *page; |
58 | dma_addr_t dma; | |
59 | ||
b5a54d9a ED |
60 | page = alloc_page(gfp); |
61 | if (unlikely(!page)) | |
62 | return -ENOMEM; | |
63 | dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir); | |
de3d6fa8 | 64 | if (unlikely(dma_mapping_error(priv->ddev, dma))) { |
34db548b | 65 | __free_page(page); |
51151a16 ED |
66 | return -ENOMEM; |
67 | } | |
34db548b ED |
68 | frag->page = page; |
69 | frag->dma = dma; | |
70 | frag->page_offset = priv->rx_headroom; | |
51151a16 ED |
71 | return 0; |
72 | } | |
73 | ||
4cce66cd | 74 | static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, |
7d7bfc6a | 75 | struct mlx4_en_rx_ring *ring, |
4cce66cd TLSC |
76 | struct mlx4_en_rx_desc *rx_desc, |
77 | struct mlx4_en_rx_alloc *frags, | |
51151a16 | 78 | gfp_t gfp) |
c27a02cd | 79 | { |
4cce66cd | 80 | int i; |
c27a02cd | 81 | |
34db548b | 82 | for (i = 0; i < priv->num_frags; i++, frags++) { |
7d7bfc6a ED |
83 | if (!frags->page) { |
84 | if (mlx4_alloc_page(priv, frags, gfp)) | |
85 | return -ENOMEM; | |
86 | ring->rx_alloc_pages++; | |
87 | } | |
34db548b ED |
88 | rx_desc->data[i].addr = cpu_to_be64(frags->dma + |
89 | frags->page_offset); | |
c27a02cd YP |
90 | } |
91 | return 0; | |
c27a02cd YP |
92 | } |
93 | ||
34db548b ED |
94 | static void mlx4_en_free_frag(const struct mlx4_en_priv *priv, |
95 | struct mlx4_en_rx_alloc *frag) | |
c27a02cd | 96 | { |
34db548b ED |
97 | if (frag->page) { |
98 | dma_unmap_page(priv->ddev, frag->dma, | |
b5a54d9a | 99 | PAGE_SIZE, priv->dma_dir); |
34db548b | 100 | __free_page(frag->page); |
c27a02cd | 101 | } |
34db548b ED |
102 | /* We need to clear all fields, otherwise a change of priv->log_rx_info |
103 | * could lead to see garbage later in frag->page. | |
104 | */ | |
105 | memset(frag, 0, sizeof(*frag)); | |
c27a02cd YP |
106 | } |
107 | ||
34db548b | 108 | static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv, |
c27a02cd YP |
109 | struct mlx4_en_rx_ring *ring, int index) |
110 | { | |
111 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; | |
c27a02cd YP |
112 | int possible_frags; |
113 | int i; | |
114 | ||
c27a02cd YP |
115 | /* Set size and memtype fields */ |
116 | for (i = 0; i < priv->num_frags; i++) { | |
c27a02cd YP |
117 | rx_desc->data[i].byte_count = |
118 | cpu_to_be32(priv->frag_info[i].frag_size); | |
119 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); | |
120 | } | |
121 | ||
122 | /* If the number of used fragments does not fill up the ring stride, | |
123 | * remaining (unused) fragments must be padded with null address/size | |
124 | * and a special memory key */ | |
125 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; | |
126 | for (i = priv->num_frags; i < possible_frags; i++) { | |
127 | rx_desc->data[i].byte_count = 0; | |
128 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); | |
129 | rx_desc->data[i].addr = 0; | |
130 | } | |
131 | } | |
132 | ||
c27a02cd | 133 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, |
51151a16 ED |
134 | struct mlx4_en_rx_ring *ring, int index, |
135 | gfp_t gfp) | |
c27a02cd | 136 | { |
9bcee89a TT |
137 | struct mlx4_en_rx_desc *rx_desc = ring->buf + |
138 | (index << ring->log_stride); | |
4cce66cd TLSC |
139 | struct mlx4_en_rx_alloc *frags = ring->rx_info + |
140 | (index << priv->log_rx_info); | |
9bcee89a | 141 | if (likely(ring->page_cache.index > 0)) { |
34db548b ED |
142 | /* XDP uses a single page per frame */ |
143 | if (!frags->page) { | |
144 | ring->page_cache.index--; | |
145 | frags->page = ring->page_cache.buf[ring->page_cache.index].page; | |
146 | frags->dma = ring->page_cache.buf[ring->page_cache.index].dma; | |
147 | } | |
148 | frags->page_offset = XDP_PACKET_HEADROOM; | |
149 | rx_desc->data[0].addr = cpu_to_be64(frags->dma + | |
150 | XDP_PACKET_HEADROOM); | |
d576acf0 BB |
151 | return 0; |
152 | } | |
153 | ||
7d7bfc6a | 154 | return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp); |
c27a02cd YP |
155 | } |
156 | ||
34db548b | 157 | static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring) |
07841f9d | 158 | { |
07841f9d IS |
159 | return ring->prod == ring->cons; |
160 | } | |
161 | ||
c27a02cd YP |
162 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) |
163 | { | |
164 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | |
165 | } | |
166 | ||
34db548b ED |
167 | /* slow path */ |
168 | static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv, | |
38aab07c YP |
169 | struct mlx4_en_rx_ring *ring, |
170 | int index) | |
171 | { | |
4cce66cd | 172 | struct mlx4_en_rx_alloc *frags; |
38aab07c YP |
173 | int nr; |
174 | ||
4cce66cd | 175 | frags = ring->rx_info + (index << priv->log_rx_info); |
38aab07c | 176 | for (nr = 0; nr < priv->num_frags; nr++) { |
453a6082 | 177 | en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); |
34db548b | 178 | mlx4_en_free_frag(priv, frags + nr); |
38aab07c YP |
179 | } |
180 | } | |
181 | ||
9bcee89a | 182 | /* Function not in fast-path */ |
c27a02cd YP |
183 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) |
184 | { | |
c27a02cd YP |
185 | struct mlx4_en_rx_ring *ring; |
186 | int ring_ind; | |
187 | int buf_ind; | |
38aab07c | 188 | int new_size; |
c27a02cd YP |
189 | |
190 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { | |
191 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 192 | ring = priv->rx_ring[ring_ind]; |
c27a02cd YP |
193 | |
194 | if (mlx4_en_prepare_rx_desc(priv, ring, | |
51151a16 | 195 | ring->actual_size, |
1ab25f86 | 196 | GFP_KERNEL | __GFP_COLD)) { |
c27a02cd | 197 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { |
1a91de28 | 198 | en_err(priv, "Failed to allocate enough rx buffers\n"); |
c27a02cd YP |
199 | return -ENOMEM; |
200 | } else { | |
38aab07c | 201 | new_size = rounddown_pow_of_two(ring->actual_size); |
1a91de28 | 202 | en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", |
453a6082 | 203 | ring->actual_size, new_size); |
38aab07c | 204 | goto reduce_rings; |
c27a02cd YP |
205 | } |
206 | } | |
207 | ring->actual_size++; | |
208 | ring->prod++; | |
209 | } | |
210 | } | |
38aab07c YP |
211 | return 0; |
212 | ||
213 | reduce_rings: | |
214 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 215 | ring = priv->rx_ring[ring_ind]; |
38aab07c YP |
216 | while (ring->actual_size > new_size) { |
217 | ring->actual_size--; | |
218 | ring->prod--; | |
219 | mlx4_en_free_rx_desc(priv, ring, ring->actual_size); | |
220 | } | |
38aab07c YP |
221 | } |
222 | ||
c27a02cd YP |
223 | return 0; |
224 | } | |
225 | ||
c27a02cd YP |
226 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, |
227 | struct mlx4_en_rx_ring *ring) | |
228 | { | |
c27a02cd | 229 | int index; |
c27a02cd | 230 | |
453a6082 YP |
231 | en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", |
232 | ring->cons, ring->prod); | |
c27a02cd YP |
233 | |
234 | /* Unmap and free Rx buffers */ | |
34db548b | 235 | for (index = 0; index < ring->size; index++) { |
453a6082 | 236 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); |
38aab07c | 237 | mlx4_en_free_rx_desc(priv, ring, index); |
c27a02cd | 238 | } |
34db548b ED |
239 | ring->cons = 0; |
240 | ring->prod = 0; | |
c27a02cd YP |
241 | } |
242 | ||
02512482 IS |
243 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) |
244 | { | |
245 | int i; | |
246 | int num_of_eqs; | |
bb2146bc | 247 | int num_rx_rings; |
02512482 IS |
248 | struct mlx4_dev *dev = mdev->dev; |
249 | ||
250 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | |
c66fa19c MB |
251 | num_of_eqs = max_t(int, MIN_RX_RINGS, |
252 | min_t(int, | |
253 | mlx4_get_eqs_per_port(mdev->dev, i), | |
254 | DEF_RX_RINGS)); | |
02512482 | 255 | |
ea1c1af1 AV |
256 | num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : |
257 | min_t(int, num_of_eqs, | |
258 | netif_get_num_default_rss_queues()); | |
02512482 | 259 | mdev->profile.prof[i].rx_ring_num = |
bb2146bc | 260 | rounddown_pow_of_two(num_rx_rings); |
02512482 IS |
261 | } |
262 | } | |
263 | ||
c27a02cd | 264 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, |
41d942d5 | 265 | struct mlx4_en_rx_ring **pring, |
163561a4 | 266 | u32 size, u16 stride, int node) |
c27a02cd YP |
267 | { |
268 | struct mlx4_en_dev *mdev = priv->mdev; | |
41d942d5 | 269 | struct mlx4_en_rx_ring *ring; |
4cce66cd | 270 | int err = -ENOMEM; |
c27a02cd YP |
271 | int tmp; |
272 | ||
163561a4 | 273 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); |
41d942d5 | 274 | if (!ring) { |
163561a4 EE |
275 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
276 | if (!ring) { | |
277 | en_err(priv, "Failed to allocate RX ring structure\n"); | |
278 | return -ENOMEM; | |
279 | } | |
41d942d5 EE |
280 | } |
281 | ||
c27a02cd YP |
282 | ring->prod = 0; |
283 | ring->cons = 0; | |
284 | ring->size = size; | |
285 | ring->size_mask = size - 1; | |
286 | ring->stride = stride; | |
287 | ring->log_stride = ffs(ring->stride) - 1; | |
9f519f68 | 288 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; |
c27a02cd YP |
289 | |
290 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | |
4cce66cd | 291 | sizeof(struct mlx4_en_rx_alloc)); |
34db548b | 292 | ring->rx_info = vzalloc_node(tmp, node); |
41d942d5 | 293 | if (!ring->rx_info) { |
34db548b | 294 | ring->rx_info = vzalloc(tmp); |
163561a4 EE |
295 | if (!ring->rx_info) { |
296 | err = -ENOMEM; | |
297 | goto err_ring; | |
298 | } | |
41d942d5 | 299 | } |
e404decb | 300 | |
453a6082 | 301 | en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", |
c27a02cd YP |
302 | ring->rx_info, tmp); |
303 | ||
163561a4 | 304 | /* Allocate HW buffers on provided NUMA node */ |
872bf2fb | 305 | set_dev_node(&mdev->dev->persist->pdev->dev, node); |
73898db0 | 306 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); |
872bf2fb | 307 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); |
c27a02cd | 308 | if (err) |
41d942d5 | 309 | goto err_info; |
c27a02cd | 310 | |
c27a02cd YP |
311 | ring->buf = ring->wqres.buf.direct.buf; |
312 | ||
ec693d47 AV |
313 | ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; |
314 | ||
41d942d5 | 315 | *pring = ring; |
c27a02cd YP |
316 | return 0; |
317 | ||
41d942d5 | 318 | err_info: |
c27a02cd YP |
319 | vfree(ring->rx_info); |
320 | ring->rx_info = NULL; | |
41d942d5 EE |
321 | err_ring: |
322 | kfree(ring); | |
323 | *pring = NULL; | |
324 | ||
c27a02cd YP |
325 | return err; |
326 | } | |
327 | ||
328 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |
329 | { | |
c27a02cd YP |
330 | struct mlx4_en_rx_ring *ring; |
331 | int i; | |
332 | int ring_ind; | |
333 | int err; | |
334 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | |
335 | DS_SIZE * priv->num_frags); | |
c27a02cd YP |
336 | |
337 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 338 | ring = priv->rx_ring[ring_ind]; |
c27a02cd YP |
339 | |
340 | ring->prod = 0; | |
341 | ring->cons = 0; | |
342 | ring->actual_size = 0; | |
41d942d5 | 343 | ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; |
c27a02cd YP |
344 | |
345 | ring->stride = stride; | |
6496bbf0 EE |
346 | if (ring->stride <= TXBB_SIZE) { |
347 | /* Stamp first unused send wqe */ | |
348 | __be32 *ptr = (__be32 *)ring->buf; | |
349 | __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); | |
350 | *ptr = stamp; | |
351 | /* Move pointer to start of rx section */ | |
9f519f68 | 352 | ring->buf += TXBB_SIZE; |
6496bbf0 | 353 | } |
9f519f68 | 354 | |
c27a02cd YP |
355 | ring->log_stride = ffs(ring->stride) - 1; |
356 | ring->buf_size = ring->size * ring->stride; | |
357 | ||
358 | memset(ring->buf, 0, ring->buf_size); | |
359 | mlx4_en_update_rx_prod_db(ring); | |
360 | ||
4cce66cd | 361 | /* Initialize all descriptors */ |
c27a02cd YP |
362 | for (i = 0; i < ring->size; i++) |
363 | mlx4_en_init_rx_desc(priv, ring, i); | |
c27a02cd | 364 | } |
b58515be IM |
365 | err = mlx4_en_fill_rx_buffers(priv); |
366 | if (err) | |
c27a02cd YP |
367 | goto err_buffers; |
368 | ||
369 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 370 | ring = priv->rx_ring[ring_ind]; |
c27a02cd | 371 | |
00d7d7bc | 372 | ring->size_mask = ring->actual_size - 1; |
c27a02cd | 373 | mlx4_en_update_rx_prod_db(ring); |
c27a02cd YP |
374 | } |
375 | ||
376 | return 0; | |
377 | ||
c27a02cd YP |
378 | err_buffers: |
379 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | |
41d942d5 | 380 | mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); |
c27a02cd YP |
381 | |
382 | ring_ind = priv->rx_ring_num - 1; | |
c27a02cd | 383 | while (ring_ind >= 0) { |
41d942d5 EE |
384 | if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) |
385 | priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; | |
c27a02cd YP |
386 | ring_ind--; |
387 | } | |
388 | return err; | |
389 | } | |
390 | ||
07841f9d IS |
391 | /* We recover from out of memory by scheduling our napi poll |
392 | * function (mlx4_en_process_cq), which tries to allocate | |
393 | * all missing RX buffers (call to mlx4_en_refill_rx_buffers). | |
394 | */ | |
395 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) | |
396 | { | |
397 | int ring; | |
398 | ||
399 | if (!priv->port_up) | |
400 | return; | |
401 | ||
402 | for (ring = 0; ring < priv->rx_ring_num; ring++) { | |
bd4ce941 BP |
403 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { |
404 | local_bh_disable(); | |
07841f9d | 405 | napi_reschedule(&priv->rx_cq[ring]->napi); |
bd4ce941 BP |
406 | local_bh_enable(); |
407 | } | |
07841f9d IS |
408 | } |
409 | } | |
410 | ||
d576acf0 BB |
411 | /* When the rx ring is running in page-per-packet mode, a released frame can go |
412 | * directly into a small cache, to avoid unmapping or touching the page | |
413 | * allocator. In bpf prog performance scenarios, buffers are either forwarded | |
414 | * or dropped, never converted to skbs, so every page can come directly from | |
415 | * this cache when it is sized to be a multiple of the napi budget. | |
416 | */ | |
417 | bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, | |
418 | struct mlx4_en_rx_alloc *frame) | |
419 | { | |
420 | struct mlx4_en_page_cache *cache = &ring->page_cache; | |
421 | ||
422 | if (cache->index >= MLX4_EN_CACHE_SIZE) | |
423 | return false; | |
424 | ||
acd7628d ED |
425 | cache->buf[cache->index].page = frame->page; |
426 | cache->buf[cache->index].dma = frame->dma; | |
427 | cache->index++; | |
d576acf0 BB |
428 | return true; |
429 | } | |
430 | ||
c27a02cd | 431 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, |
41d942d5 EE |
432 | struct mlx4_en_rx_ring **pring, |
433 | u32 size, u16 stride) | |
c27a02cd YP |
434 | { |
435 | struct mlx4_en_dev *mdev = priv->mdev; | |
41d942d5 | 436 | struct mlx4_en_rx_ring *ring = *pring; |
cb7386d3 | 437 | struct bpf_prog *old_prog; |
c27a02cd | 438 | |
326fe02d BB |
439 | old_prog = rcu_dereference_protected( |
440 | ring->xdp_prog, | |
441 | lockdep_is_held(&mdev->state_lock)); | |
cb7386d3 BB |
442 | if (old_prog) |
443 | bpf_prog_put(old_prog); | |
68355f71 | 444 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); |
c27a02cd YP |
445 | vfree(ring->rx_info); |
446 | ring->rx_info = NULL; | |
41d942d5 EE |
447 | kfree(ring); |
448 | *pring = NULL; | |
c27a02cd YP |
449 | } |
450 | ||
451 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | |
452 | struct mlx4_en_rx_ring *ring) | |
453 | { | |
d576acf0 BB |
454 | int i; |
455 | ||
456 | for (i = 0; i < ring->page_cache.index; i++) { | |
acd7628d ED |
457 | dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma, |
458 | PAGE_SIZE, priv->dma_dir); | |
459 | put_page(ring->page_cache.buf[i].page); | |
d576acf0 BB |
460 | } |
461 | ring->page_cache.index = 0; | |
c27a02cd | 462 | mlx4_en_free_rx_buf(priv, ring); |
9f519f68 YP |
463 | if (ring->stride <= TXBB_SIZE) |
464 | ring->buf -= TXBB_SIZE; | |
c27a02cd YP |
465 | } |
466 | ||
467 | ||
c27a02cd | 468 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, |
4cce66cd | 469 | struct mlx4_en_rx_alloc *frags, |
90278c9f | 470 | struct sk_buff *skb, |
c27a02cd YP |
471 | int length) |
472 | { | |
34db548b ED |
473 | const struct mlx4_en_frag_info *frag_info = priv->frag_info; |
474 | unsigned int truesize = 0; | |
aaca121d | 475 | int nr, frag_size; |
34db548b | 476 | struct page *page; |
c27a02cd | 477 | dma_addr_t dma; |
34db548b | 478 | bool release; |
c27a02cd | 479 | |
4cce66cd | 480 | /* Collect used fragments while replacing them in the HW descriptors */ |
34db548b | 481 | for (nr = 0;; frags++) { |
aaca121d ED |
482 | frag_size = min_t(int, length, frag_info->frag_size); |
483 | ||
34db548b ED |
484 | page = frags->page; |
485 | if (unlikely(!page)) | |
4cce66cd | 486 | goto fail; |
c27a02cd | 487 | |
34db548b ED |
488 | dma = frags->dma; |
489 | dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, | |
490 | frag_size, priv->dma_dir); | |
c27a02cd | 491 | |
34db548b | 492 | __skb_fill_page_desc(skb, nr, page, frags->page_offset, |
aaca121d | 493 | frag_size); |
7f0137e2 | 494 | |
34db548b ED |
495 | truesize += frag_info->frag_stride; |
496 | if (frag_info->frag_stride == PAGE_SIZE / 2) { | |
497 | frags->page_offset ^= PAGE_SIZE / 2; | |
498 | release = page_count(page) != 1 || | |
499 | page_is_pfmemalloc(page) || | |
500 | page_to_nid(page) != numa_mem_id(); | |
501 | } else { | |
502 | u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); | |
503 | ||
504 | frags->page_offset += sz_align; | |
505 | release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; | |
506 | } | |
507 | if (release) { | |
508 | dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir); | |
509 | frags->page = NULL; | |
510 | } else { | |
511 | page_ref_inc(page); | |
512 | } | |
513 | ||
aaca121d ED |
514 | nr++; |
515 | length -= frag_size; | |
516 | if (!length) | |
517 | break; | |
518 | frag_info++; | |
c27a02cd | 519 | } |
34db548b | 520 | skb->truesize += truesize; |
c27a02cd YP |
521 | return nr; |
522 | ||
523 | fail: | |
c27a02cd YP |
524 | while (nr > 0) { |
525 | nr--; | |
34db548b | 526 | __skb_frag_unref(skb_shinfo(skb)->frags + nr); |
c27a02cd YP |
527 | } |
528 | return 0; | |
529 | } | |
530 | ||
6969cf0f | 531 | static void validate_loopback(struct mlx4_en_priv *priv, void *va) |
e7c1c2c4 | 532 | { |
6969cf0f | 533 | const unsigned char *data = va + ETH_HLEN; |
e7c1c2c4 | 534 | int i; |
e7c1c2c4 | 535 | |
6969cf0f ED |
536 | for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) { |
537 | if (data[i] != (unsigned char)i) | |
538 | return; | |
e7c1c2c4 YP |
539 | } |
540 | /* Loopback found */ | |
541 | priv->loopback_ok = 1; | |
e7c1c2c4 | 542 | } |
c27a02cd | 543 | |
9bcee89a | 544 | static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, |
dad42c30 | 545 | struct mlx4_en_rx_ring *ring) |
4cce66cd | 546 | { |
dad42c30 | 547 | u32 missing = ring->actual_size - (ring->prod - ring->cons); |
4cce66cd | 548 | |
dad42c30 ED |
549 | /* Try to batch allocations, but not too much. */ |
550 | if (missing < 8) | |
9bcee89a | 551 | return; |
dad42c30 ED |
552 | do { |
553 | if (mlx4_en_prepare_rx_desc(priv, ring, | |
554 | ring->prod & ring->size_mask, | |
dceeab0e ED |
555 | GFP_ATOMIC | __GFP_COLD | |
556 | __GFP_MEMALLOC)) | |
4cce66cd TLSC |
557 | break; |
558 | ring->prod++; | |
9bcee89a | 559 | } while (likely(--missing)); |
dad42c30 | 560 | |
9bcee89a | 561 | mlx4_en_update_rx_prod_db(ring); |
4cce66cd TLSC |
562 | } |
563 | ||
f8c6455b SM |
564 | /* When hardware doesn't strip the vlan, we need to calculate the checksum |
565 | * over it and add it to the hardware's checksum calculation | |
566 | */ | |
567 | static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, | |
568 | struct vlan_hdr *vlanh) | |
569 | { | |
570 | return csum_add(hw_checksum, *(__wsum *)vlanh); | |
571 | } | |
572 | ||
573 | /* Although the stack expects checksum which doesn't include the pseudo | |
574 | * header, the HW adds it. To address that, we are subtracting the pseudo | |
575 | * header checksum from the checksum value provided by the HW. | |
576 | */ | |
577 | static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | |
578 | struct iphdr *iph) | |
579 | { | |
580 | __u16 length_for_csum = 0; | |
581 | __wsum csum_pseudo_header = 0; | |
582 | ||
583 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); | |
584 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, | |
585 | length_for_csum, iph->protocol, 0); | |
586 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); | |
587 | } | |
588 | ||
589 | #if IS_ENABLED(CONFIG_IPV6) | |
590 | /* In IPv6 packets, besides subtracting the pseudo header checksum, | |
591 | * we also compute/add the IP header checksum which | |
592 | * is not added by the HW. | |
593 | */ | |
594 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | |
595 | struct ipv6hdr *ipv6h) | |
596 | { | |
597 | __wsum csum_pseudo_hdr = 0; | |
598 | ||
de3d6fa8 TT |
599 | if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || |
600 | ipv6h->nexthdr == IPPROTO_HOPOPTS)) | |
f8c6455b | 601 | return -1; |
82d69203 | 602 | hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); |
f8c6455b SM |
603 | |
604 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, | |
605 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); | |
606 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); | |
607 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); | |
608 | ||
609 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); | |
610 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); | |
611 | return 0; | |
612 | } | |
613 | #endif | |
614 | static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |
79a25852 | 615 | netdev_features_t dev_features) |
f8c6455b SM |
616 | { |
617 | __wsum hw_checksum = 0; | |
618 | ||
619 | void *hdr = (u8 *)va + sizeof(struct ethhdr); | |
620 | ||
621 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); | |
622 | ||
e802f8e4 | 623 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && |
79a25852 | 624 | !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { |
f8c6455b SM |
625 | hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); |
626 | hdr += sizeof(struct vlan_hdr); | |
627 | } | |
628 | ||
629 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) | |
630 | get_fixed_ipv4_csum(hw_checksum, skb, hdr); | |
631 | #if IS_ENABLED(CONFIG_IPV6) | |
632 | else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) | |
de3d6fa8 | 633 | if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) |
f8c6455b SM |
634 | return -1; |
635 | #endif | |
636 | return 0; | |
637 | } | |
638 | ||
c27a02cd YP |
639 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) |
640 | { | |
641 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
9bcee89a TT |
642 | int factor = priv->cqe_factor; |
643 | struct mlx4_en_rx_ring *ring; | |
47a38e15 | 644 | struct bpf_prog *xdp_prog; |
9bcee89a | 645 | int cq_ring = cq->ring; |
36ea7964 | 646 | bool doorbell_pending; |
9bcee89a | 647 | struct mlx4_cqe *cqe; |
c27a02cd | 648 | int polled = 0; |
9bcee89a | 649 | int index; |
c27a02cd | 650 | |
de3d6fa8 | 651 | if (unlikely(!priv->port_up)) |
c27a02cd YP |
652 | return 0; |
653 | ||
de3d6fa8 | 654 | if (unlikely(budget <= 0)) |
38be0a34 EB |
655 | return polled; |
656 | ||
9bcee89a TT |
657 | ring = priv->rx_ring[cq_ring]; |
658 | ||
326fe02d BB |
659 | /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ |
660 | rcu_read_lock(); | |
661 | xdp_prog = rcu_dereference(ring->xdp_prog); | |
9ecc2d86 | 662 | doorbell_pending = 0; |
47a38e15 | 663 | |
c27a02cd YP |
664 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx |
665 | * descriptor offset can be deduced from the CQE index instead of | |
666 | * reading 'cqe->index' */ | |
667 | index = cq->mcq.cons_index & ring->size_mask; | |
b1b6b4da | 668 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; |
c27a02cd YP |
669 | |
670 | /* Process all completed CQEs */ | |
671 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | |
672 | cq->mcq.cons_index & cq->size)) { | |
9bcee89a TT |
673 | struct mlx4_en_rx_alloc *frags; |
674 | enum pkt_hash_types hash_type; | |
675 | struct sk_buff *skb; | |
676 | unsigned int length; | |
677 | int ip_summed; | |
02e6fd3e | 678 | void *va; |
9bcee89a | 679 | int nr; |
c27a02cd | 680 | |
4cce66cd | 681 | frags = ring->rx_info + (index << priv->log_rx_info); |
02e6fd3e | 682 | va = page_address(frags[0].page) + frags[0].page_offset; |
9bcee89a | 683 | prefetchw(va); |
c27a02cd YP |
684 | /* |
685 | * make sure we read the CQE after we read the ownership bit | |
686 | */ | |
12b3375f | 687 | dma_rmb(); |
c27a02cd YP |
688 | |
689 | /* Drop packet on bad receive or bad checksum */ | |
690 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | |
691 | MLX4_CQE_OPCODE_ERROR)) { | |
1a91de28 JP |
692 | en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", |
693 | ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, | |
694 | ((struct mlx4_err_cqe *)cqe)->syndrome); | |
c27a02cd YP |
695 | goto next; |
696 | } | |
697 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { | |
453a6082 | 698 | en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); |
c27a02cd YP |
699 | goto next; |
700 | } | |
701 | ||
79aeaccd YB |
702 | /* Check if we need to drop the packet if SRIOV is not enabled |
703 | * and not performing the selftest or flb disabled | |
704 | */ | |
705 | if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { | |
02e6fd3e | 706 | const struct ethhdr *ethh = va; |
79aeaccd | 707 | dma_addr_t dma; |
79aeaccd YB |
708 | /* Get pointer to first fragment since we haven't |
709 | * skb yet and cast it to ethhdr struct | |
710 | */ | |
9e8c0395 | 711 | dma = frags[0].dma + frags[0].page_offset; |
79aeaccd YB |
712 | dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), |
713 | DMA_FROM_DEVICE); | |
79aeaccd | 714 | |
c07cb4b0 YB |
715 | if (is_multicast_ether_addr(ethh->h_dest)) { |
716 | struct mlx4_mac_entry *entry; | |
c07cb4b0 YB |
717 | struct hlist_head *bucket; |
718 | unsigned int mac_hash; | |
719 | ||
720 | /* Drop the packet, since HW loopback-ed it */ | |
721 | mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; | |
722 | bucket = &priv->mac_hash[mac_hash]; | |
b67bfe0d | 723 | hlist_for_each_entry_rcu(entry, bucket, hlist) { |
c07cb4b0 | 724 | if (ether_addr_equal_64bits(entry->mac, |
326fe02d | 725 | ethh->h_source)) |
c07cb4b0 | 726 | goto next; |
c07cb4b0 | 727 | } |
c07cb4b0 | 728 | } |
79aeaccd | 729 | } |
5b4c4d36 | 730 | |
6969cf0f ED |
731 | if (unlikely(priv->validate_loopback)) { |
732 | validate_loopback(priv, va); | |
733 | goto next; | |
734 | } | |
735 | ||
c27a02cd YP |
736 | /* |
737 | * Packet is OK - process it. | |
738 | */ | |
739 | length = be32_to_cpu(cqe->byte_cnt); | |
4a5f4dd8 | 740 | length -= ring->fcs_del; |
c27a02cd | 741 | |
47a38e15 BB |
742 | /* A bpf program gets first chance to drop the packet. It may |
743 | * read bytes but not past the end of the frag. | |
744 | */ | |
745 | if (xdp_prog) { | |
746 | struct xdp_buff xdp; | |
747 | dma_addr_t dma; | |
ea3349a0 | 748 | void *orig_data; |
47a38e15 BB |
749 | u32 act; |
750 | ||
9e8c0395 | 751 | dma = frags[0].dma + frags[0].page_offset; |
47a38e15 BB |
752 | dma_sync_single_for_cpu(priv->ddev, dma, |
753 | priv->frag_info[0].frag_size, | |
754 | DMA_FROM_DEVICE); | |
755 | ||
02e6fd3e ED |
756 | xdp.data_hard_start = va - frags[0].page_offset; |
757 | xdp.data = va; | |
47a38e15 | 758 | xdp.data_end = xdp.data + length; |
ea3349a0 | 759 | orig_data = xdp.data; |
47a38e15 BB |
760 | |
761 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | |
ea3349a0 MKL |
762 | |
763 | if (xdp.data != orig_data) { | |
764 | length = xdp.data_end - xdp.data; | |
765 | frags[0].page_offset = xdp.data - | |
766 | xdp.data_hard_start; | |
02e6fd3e | 767 | va = xdp.data; |
ea3349a0 MKL |
768 | } |
769 | ||
47a38e15 BB |
770 | switch (act) { |
771 | case XDP_PASS: | |
772 | break; | |
9ecc2d86 | 773 | case XDP_TX: |
15fca2c8 | 774 | if (likely(!mlx4_en_xmit_frame(ring, frags, dev, |
9bcee89a | 775 | length, cq_ring, |
34db548b ED |
776 | &doorbell_pending))) { |
777 | frags[0].page = NULL; | |
778 | goto next; | |
779 | } | |
a67edbf4 | 780 | trace_xdp_exception(dev, xdp_prog, act); |
15fca2c8 | 781 | goto xdp_drop_no_cnt; /* Drop on xmit failure */ |
47a38e15 BB |
782 | default: |
783 | bpf_warn_invalid_xdp_action(act); | |
784 | case XDP_ABORTED: | |
a67edbf4 | 785 | trace_xdp_exception(dev, xdp_prog, act); |
47a38e15 | 786 | case XDP_DROP: |
15fca2c8 TT |
787 | ring->xdp_drop++; |
788 | xdp_drop_no_cnt: | |
47a38e15 BB |
789 | goto next; |
790 | } | |
791 | } | |
792 | ||
15fca2c8 TT |
793 | ring->bytes += length; |
794 | ring->packets++; | |
795 | ||
68b8df46 | 796 | skb = napi_get_frags(&cq->napi); |
9bcee89a | 797 | if (unlikely(!skb)) |
68b8df46 ED |
798 | goto next; |
799 | ||
800 | if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) { | |
9bcee89a TT |
801 | u64 timestamp = mlx4_en_get_cqe_ts(cqe); |
802 | ||
803 | mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb), | |
68b8df46 ED |
804 | timestamp); |
805 | } | |
9bcee89a | 806 | skb_record_rx_queue(skb, cq_ring); |
68b8df46 | 807 | |
c8c64cff | 808 | if (likely(dev->features & NETIF_F_RXCSUM)) { |
f8c6455b SM |
809 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | |
810 | MLX4_CQE_STATUS_UDP)) { | |
811 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | |
812 | cqe->checksum == cpu_to_be16(0xffff)) { | |
9bcee89a | 813 | bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && |
68b8df46 | 814 | (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); |
9bcee89a TT |
815 | |
816 | ip_summed = CHECKSUM_UNNECESSARY; | |
817 | hash_type = PKT_HASH_TYPE_L4; | |
68b8df46 ED |
818 | if (l2_tunnel) |
819 | skb->csum_level = 1; | |
f8c6455b SM |
820 | ring->csum_ok++; |
821 | } else { | |
68b8df46 | 822 | goto csum_none; |
f8c6455b | 823 | } |
c27a02cd | 824 | } else { |
f8c6455b SM |
825 | if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && |
826 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | |
827 | MLX4_CQE_STATUS_IPV6))) { | |
68b8df46 ED |
828 | if (check_csum(cqe, skb, va, dev->features)) { |
829 | goto csum_none; | |
830 | } else { | |
831 | ip_summed = CHECKSUM_COMPLETE; | |
9bcee89a | 832 | hash_type = PKT_HASH_TYPE_L3; |
68b8df46 ED |
833 | ring->csum_complete++; |
834 | } | |
f8c6455b | 835 | } else { |
68b8df46 | 836 | goto csum_none; |
f8c6455b | 837 | } |
c27a02cd YP |
838 | } |
839 | } else { | |
68b8df46 | 840 | csum_none: |
c27a02cd | 841 | ip_summed = CHECKSUM_NONE; |
9bcee89a | 842 | hash_type = PKT_HASH_TYPE_L3; |
ad04378c | 843 | ring->csum_none++; |
c27a02cd | 844 | } |
c27a02cd | 845 | skb->ip_summed = ip_summed; |
ad86107f | 846 | if (dev->features & NETIF_F_RXHASH) |
69174416 TH |
847 | skb_set_hash(skb, |
848 | be32_to_cpu(cqe->immed_rss_invalid), | |
9bcee89a | 849 | hash_type); |
68b8df46 ED |
850 | |
851 | if ((cqe->vlan_my_qpn & | |
852 | cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && | |
ec693d47 | 853 | (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
68b8df46 ED |
854 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
855 | be16_to_cpu(cqe->sl_vid)); | |
856 | else if ((cqe->vlan_my_qpn & | |
857 | cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) && | |
e38af4fa HHZ |
858 | (dev->features & NETIF_F_HW_VLAN_STAG_RX)) |
859 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), | |
860 | be16_to_cpu(cqe->sl_vid)); | |
f1b553fb | 861 | |
68b8df46 ED |
862 | nr = mlx4_en_complete_rx_desc(priv, frags, skb, length); |
863 | if (likely(nr)) { | |
864 | skb_shinfo(skb)->nr_frags = nr; | |
865 | skb->len = length; | |
866 | skb->data_len = length; | |
867 | napi_gro_frags(&cq->napi); | |
868 | } else { | |
869 | skb->vlan_tci = 0; | |
870 | skb_clear_hash(skb); | |
ec693d47 | 871 | } |
c27a02cd YP |
872 | next: |
873 | ++cq->mcq.cons_index; | |
874 | index = (cq->mcq.cons_index) & ring->size_mask; | |
b1b6b4da | 875 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; |
9bcee89a | 876 | if (unlikely(++polled == budget)) |
68b8df46 | 877 | break; |
c27a02cd YP |
878 | } |
879 | ||
326fe02d | 880 | rcu_read_unlock(); |
9ecc2d86 | 881 | |
9bcee89a | 882 | if (likely(polled)) { |
6c78511b TT |
883 | if (doorbell_pending) { |
884 | priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true; | |
885 | mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]); | |
886 | } | |
dad42c30 ED |
887 | |
888 | mlx4_cq_set_ci(&cq->mcq); | |
889 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | |
890 | ring->cons = cq->mcq.cons_index; | |
891 | } | |
c27a02cd | 892 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); |
dad42c30 | 893 | |
9bcee89a | 894 | mlx4_en_refill_rx_buffers(priv, ring); |
dad42c30 | 895 | |
c27a02cd YP |
896 | return polled; |
897 | } | |
898 | ||
899 | ||
900 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) | |
901 | { | |
902 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | |
903 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | |
904 | ||
477b35b4 ED |
905 | if (likely(priv->port_up)) |
906 | napi_schedule_irqoff(&cq->napi); | |
c27a02cd YP |
907 | else |
908 | mlx4_en_arm_cq(priv, cq); | |
909 | } | |
910 | ||
911 | /* Rx CQ polling - called by NAPI */ | |
912 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | |
913 | { | |
914 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); | |
915 | struct net_device *dev = cq->dev; | |
916 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
6c78511b TT |
917 | struct mlx4_en_cq *xdp_tx_cq = NULL; |
918 | bool clean_complete = true; | |
c27a02cd YP |
919 | int done; |
920 | ||
6c78511b TT |
921 | if (priv->tx_ring_num[TX_XDP]) { |
922 | xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; | |
923 | if (xdp_tx_cq->xdp_busy) { | |
924 | clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq, | |
925 | budget); | |
926 | xdp_tx_cq->xdp_busy = !clean_complete; | |
927 | } | |
928 | } | |
929 | ||
c27a02cd YP |
930 | done = mlx4_en_process_rx_cq(dev, cq, budget); |
931 | ||
932 | /* If we used up all the quota - we're probably not done yet... */ | |
6c78511b | 933 | if (done == budget || !clean_complete) { |
35f6f453 | 934 | const struct cpumask *aff; |
dc2ec62f TG |
935 | struct irq_data *idata; |
936 | int cpu_curr; | |
35f6f453 | 937 | |
6c78511b TT |
938 | /* in case we got here because of !clean_complete */ |
939 | done = budget; | |
940 | ||
c27a02cd | 941 | INC_PERF_COUNTER(priv->pstats.napi_quota); |
35f6f453 AV |
942 | |
943 | cpu_curr = smp_processor_id(); | |
dc2ec62f TG |
944 | idata = irq_desc_get_irq_data(cq->irq_desc); |
945 | aff = irq_data_get_affinity_mask(idata); | |
35f6f453 | 946 | |
2e1af7d7 ED |
947 | if (likely(cpumask_test_cpu(cpu_curr, aff))) |
948 | return budget; | |
949 | ||
950 | /* Current cpu is not according to smp_irq_affinity - | |
dad42c30 ED |
951 | * probably affinity changed. Need to stop this NAPI |
952 | * poll, and restart it on the right CPU. | |
953 | * Try to avoid returning a too small value (like 0), | |
954 | * to not fool net_rx_action() and its netdev_budget | |
2e1af7d7 | 955 | */ |
dad42c30 ED |
956 | if (done) |
957 | done--; | |
c27a02cd | 958 | } |
1a288172 | 959 | /* Done for now */ |
9bcee89a | 960 | if (likely(napi_complete_done(napi, done))) |
2e713283 | 961 | mlx4_en_arm_cq(priv, cq); |
c27a02cd YP |
962 | return done; |
963 | } | |
964 | ||
c27a02cd YP |
965 | void mlx4_en_calc_rx_buf(struct net_device *dev) |
966 | { | |
967 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
47a38e15 | 968 | int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); |
c27a02cd YP |
969 | int i = 0; |
970 | ||
d576acf0 BB |
971 | /* bpf requires buffers to be set up as 1 packet per page. |
972 | * This only works when num_frags == 1. | |
973 | */ | |
67f8b1dc | 974 | if (priv->tx_ring_num[TX_XDP]) { |
b45f0674 | 975 | priv->frag_info[0].frag_size = eff_mtu; |
b45f0674 MKL |
976 | /* This will gain efficient xdp frame recycling at the |
977 | * expense of more costly truesize accounting | |
d576acf0 | 978 | */ |
b45f0674 | 979 | priv->frag_info[0].frag_stride = PAGE_SIZE; |
69ba9431 | 980 | priv->dma_dir = PCI_DMA_BIDIRECTIONAL; |
d85f6c14 | 981 | priv->rx_headroom = XDP_PACKET_HEADROOM; |
b45f0674 MKL |
982 | i = 1; |
983 | } else { | |
b5a54d9a ED |
984 | int frag_size_max = 2048, buf_size = 0; |
985 | ||
986 | /* should not happen, right ? */ | |
987 | if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048) | |
988 | frag_size_max = PAGE_SIZE; | |
b45f0674 MKL |
989 | |
990 | while (buf_size < eff_mtu) { | |
b5a54d9a ED |
991 | int frag_stride, frag_size = eff_mtu - buf_size; |
992 | int pad, nb; | |
60c7f5ae ED |
993 | |
994 | if (i < MLX4_EN_MAX_RX_FRAGS - 1) | |
b5a54d9a | 995 | frag_size = min(frag_size, frag_size_max); |
60c7f5ae ED |
996 | |
997 | priv->frag_info[i].frag_size = frag_size; | |
b5a54d9a ED |
998 | frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES); |
999 | /* We can only pack 2 1536-bytes frames in on 4K page | |
1000 | * Therefore, each frame would consume more bytes (truesize) | |
1001 | */ | |
1002 | nb = PAGE_SIZE / frag_stride; | |
1003 | pad = (PAGE_SIZE - nb * frag_stride) / nb; | |
1004 | pad &= ~(SMP_CACHE_BYTES - 1); | |
1005 | priv->frag_info[i].frag_stride = frag_stride + pad; | |
60c7f5ae | 1006 | |
60c7f5ae | 1007 | buf_size += frag_size; |
b45f0674 MKL |
1008 | i++; |
1009 | } | |
69ba9431 | 1010 | priv->dma_dir = PCI_DMA_FROMDEVICE; |
d85f6c14 | 1011 | priv->rx_headroom = 0; |
c27a02cd YP |
1012 | } |
1013 | ||
1014 | priv->num_frags = i; | |
1015 | priv->rx_skb_size = eff_mtu; | |
4cce66cd | 1016 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); |
c27a02cd | 1017 | |
1a91de28 JP |
1018 | en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", |
1019 | eff_mtu, priv->num_frags); | |
c27a02cd | 1020 | for (i = 0; i < priv->num_frags; i++) { |
505a9249 KH |
1021 | en_dbg(DRV, |
1022 | priv, | |
aaca121d | 1023 | " frag:%d - size:%d stride:%d\n", |
51151a16 ED |
1024 | i, |
1025 | priv->frag_info[i].frag_size, | |
51151a16 | 1026 | priv->frag_info[i].frag_stride); |
c27a02cd YP |
1027 | } |
1028 | } | |
1029 | ||
1030 | /* RSS related functions */ | |
1031 | ||
9f519f68 YP |
1032 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, |
1033 | struct mlx4_en_rx_ring *ring, | |
c27a02cd YP |
1034 | enum mlx4_qp_state *state, |
1035 | struct mlx4_qp *qp) | |
1036 | { | |
1037 | struct mlx4_en_dev *mdev = priv->mdev; | |
1038 | struct mlx4_qp_context *context; | |
1039 | int err = 0; | |
1040 | ||
14f8dc49 JP |
1041 | context = kmalloc(sizeof(*context), GFP_KERNEL); |
1042 | if (!context) | |
c27a02cd | 1043 | return -ENOMEM; |
c27a02cd | 1044 | |
40f2287b | 1045 | err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); |
c27a02cd | 1046 | if (err) { |
453a6082 | 1047 | en_err(priv, "Failed to allocate qp #%x\n", qpn); |
c27a02cd | 1048 | goto out; |
c27a02cd YP |
1049 | } |
1050 | qp->event = mlx4_en_sqp_event; | |
1051 | ||
1052 | memset(context, 0, sizeof *context); | |
00d7d7bc | 1053 | mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, |
0e98b523 | 1054 | qpn, ring->cqn, -1, context); |
9f519f68 | 1055 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); |
c27a02cd | 1056 | |
f3a9d1f2 | 1057 | /* Cancel FCS removal if FW allows */ |
4a5f4dd8 | 1058 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { |
f3a9d1f2 | 1059 | context->param3 |= cpu_to_be32(1 << 29); |
f0df3503 MM |
1060 | if (priv->dev->features & NETIF_F_RXFCS) |
1061 | ring->fcs_del = 0; | |
1062 | else | |
1063 | ring->fcs_del = ETH_FCS_LEN; | |
4a5f4dd8 YP |
1064 | } else |
1065 | ring->fcs_del = 0; | |
f3a9d1f2 | 1066 | |
9f519f68 | 1067 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); |
c27a02cd YP |
1068 | if (err) { |
1069 | mlx4_qp_remove(mdev->dev, qp); | |
1070 | mlx4_qp_free(mdev->dev, qp); | |
1071 | } | |
9f519f68 | 1072 | mlx4_en_update_rx_prod_db(ring); |
c27a02cd YP |
1073 | out: |
1074 | kfree(context); | |
1075 | return err; | |
1076 | } | |
1077 | ||
cabdc8ee HHZ |
1078 | int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) |
1079 | { | |
1080 | int err; | |
1081 | u32 qpn; | |
1082 | ||
d57febe1 MB |
1083 | err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, |
1084 | MLX4_RESERVE_A0_QP); | |
cabdc8ee HHZ |
1085 | if (err) { |
1086 | en_err(priv, "Failed reserving drop qpn\n"); | |
1087 | return err; | |
1088 | } | |
40f2287b | 1089 | err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); |
cabdc8ee HHZ |
1090 | if (err) { |
1091 | en_err(priv, "Failed allocating drop qp\n"); | |
1092 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1093 | return err; | |
1094 | } | |
1095 | ||
1096 | return 0; | |
1097 | } | |
1098 | ||
1099 | void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) | |
1100 | { | |
1101 | u32 qpn; | |
1102 | ||
1103 | qpn = priv->drop_qp.qpn; | |
1104 | mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); | |
1105 | mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); | |
1106 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1107 | } | |
1108 | ||
c27a02cd YP |
1109 | /* Allocate rx qp's and configure them according to rss map */ |
1110 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |
1111 | { | |
1112 | struct mlx4_en_dev *mdev = priv->mdev; | |
1113 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1114 | struct mlx4_qp_context context; | |
876f6e67 | 1115 | struct mlx4_rss_context *rss_context; |
93d3e367 | 1116 | int rss_rings; |
c27a02cd | 1117 | void *ptr; |
876f6e67 | 1118 | u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | |
1202d460 | 1119 | MLX4_RSS_TCP_IPV6); |
9f519f68 | 1120 | int i, qpn; |
c27a02cd YP |
1121 | int err = 0; |
1122 | int good_qps = 0; | |
4931c6ef | 1123 | u8 flags; |
c27a02cd | 1124 | |
453a6082 | 1125 | en_dbg(DRV, priv, "Configuring rss steering\n"); |
4931c6ef SM |
1126 | |
1127 | flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0; | |
b6b912e0 YP |
1128 | err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, |
1129 | priv->rx_ring_num, | |
4931c6ef | 1130 | &rss_map->base_qpn, flags); |
c27a02cd | 1131 | if (err) { |
b6b912e0 | 1132 | en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); |
c27a02cd YP |
1133 | return err; |
1134 | } | |
1135 | ||
b6b912e0 | 1136 | for (i = 0; i < priv->rx_ring_num; i++) { |
c27a02cd | 1137 | qpn = rss_map->base_qpn + i; |
41d942d5 | 1138 | err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], |
c27a02cd YP |
1139 | &rss_map->state[i], |
1140 | &rss_map->qps[i]); | |
1141 | if (err) | |
1142 | goto rss_err; | |
1143 | ||
1144 | ++good_qps; | |
1145 | } | |
1146 | ||
4931c6ef SM |
1147 | if (priv->rx_ring_num == 1) { |
1148 | rss_map->indir_qp = &rss_map->qps[0]; | |
1149 | priv->base_qpn = rss_map->indir_qp->qpn; | |
1150 | en_info(priv, "Optimized Non-RSS steering\n"); | |
1151 | return 0; | |
1152 | } | |
1153 | ||
1154 | rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL); | |
1155 | if (!rss_map->indir_qp) { | |
1156 | err = -ENOMEM; | |
1157 | goto rss_err; | |
1158 | } | |
1159 | ||
c27a02cd | 1160 | /* Configure RSS indirection qp */ |
4931c6ef SM |
1161 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp, |
1162 | GFP_KERNEL); | |
c27a02cd | 1163 | if (err) { |
453a6082 | 1164 | en_err(priv, "Failed to allocate RSS indirection QP\n"); |
1679200f | 1165 | goto rss_err; |
c27a02cd | 1166 | } |
4931c6ef SM |
1167 | |
1168 | rss_map->indir_qp->event = mlx4_en_sqp_event; | |
c27a02cd | 1169 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, |
41d942d5 | 1170 | priv->rx_ring[0]->cqn, -1, &context); |
c27a02cd | 1171 | |
93d3e367 YP |
1172 | if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) |
1173 | rss_rings = priv->rx_ring_num; | |
1174 | else | |
1175 | rss_rings = priv->prof->rss_rings; | |
1176 | ||
876f6e67 OG |
1177 | ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) |
1178 | + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; | |
43d620c8 | 1179 | rss_context = ptr; |
93d3e367 | 1180 | rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | |
c27a02cd | 1181 | (rss_map->base_qpn)); |
89efea25 | 1182 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); |
1202d460 OG |
1183 | if (priv->mdev->profile.udp_rss) { |
1184 | rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; | |
1185 | rss_context->base_qpn_udp = rss_context->default_qpn; | |
1186 | } | |
837052d0 OG |
1187 | |
1188 | if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | |
1189 | en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); | |
1190 | rss_mask |= MLX4_RSS_BY_INNER_HEADERS; | |
1191 | } | |
1192 | ||
0533943c | 1193 | rss_context->flags = rss_mask; |
876f6e67 | 1194 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; |
947cbb0a EP |
1195 | if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { |
1196 | rss_context->hash_fn = MLX4_RSS_HASH_XOR; | |
1197 | } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { | |
1198 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | |
1199 | memcpy(rss_context->rss_key, priv->rss_key, | |
1200 | MLX4_EN_RSS_KEY_SIZE); | |
947cbb0a EP |
1201 | } else { |
1202 | en_err(priv, "Unknown RSS hash function requested\n"); | |
1203 | err = -EINVAL; | |
1204 | goto indir_err; | |
1205 | } | |
4931c6ef | 1206 | |
c27a02cd | 1207 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, |
4931c6ef | 1208 | rss_map->indir_qp, &rss_map->indir_state); |
c27a02cd YP |
1209 | if (err) |
1210 | goto indir_err; | |
1211 | ||
1212 | return 0; | |
1213 | ||
1214 | indir_err: | |
1215 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
4931c6ef SM |
1216 | MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); |
1217 | mlx4_qp_remove(mdev->dev, rss_map->indir_qp); | |
1218 | mlx4_qp_free(mdev->dev, rss_map->indir_qp); | |
1219 | kfree(rss_map->indir_qp); | |
1220 | rss_map->indir_qp = NULL; | |
c27a02cd YP |
1221 | rss_err: |
1222 | for (i = 0; i < good_qps; i++) { | |
1223 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | |
1224 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1225 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1226 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1227 | } | |
b6b912e0 | 1228 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); |
c27a02cd YP |
1229 | return err; |
1230 | } | |
1231 | ||
1232 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | |
1233 | { | |
1234 | struct mlx4_en_dev *mdev = priv->mdev; | |
1235 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1236 | int i; | |
1237 | ||
4931c6ef SM |
1238 | if (priv->rx_ring_num > 1) { |
1239 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
1240 | MLX4_QP_STATE_RST, NULL, 0, 0, | |
1241 | rss_map->indir_qp); | |
1242 | mlx4_qp_remove(mdev->dev, rss_map->indir_qp); | |
1243 | mlx4_qp_free(mdev->dev, rss_map->indir_qp); | |
1244 | kfree(rss_map->indir_qp); | |
1245 | rss_map->indir_qp = NULL; | |
1246 | } | |
c27a02cd | 1247 | |
b6b912e0 | 1248 | for (i = 0; i < priv->rx_ring_num; i++) { |
c27a02cd YP |
1249 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
1250 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1251 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1252 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1253 | } | |
b6b912e0 | 1254 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); |
c27a02cd | 1255 | } |