]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | ||
34 | #include <net/busy_poll.h> | |
35 | #include <linux/bpf.h> | |
36 | #include <linux/bpf_trace.h> | |
37 | #include <linux/mlx4/cq.h> | |
38 | #include <linux/slab.h> | |
39 | #include <linux/mlx4/qp.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/rculist.h> | |
42 | #include <linux/if_ether.h> | |
43 | #include <linux/if_vlan.h> | |
44 | #include <linux/vmalloc.h> | |
45 | #include <linux/irq.h> | |
46 | ||
47 | #if IS_ENABLED(CONFIG_IPV6) | |
48 | #include <net/ip6_checksum.h> | |
49 | #endif | |
50 | ||
51 | #include "mlx4_en.h" | |
52 | ||
53 | static int mlx4_alloc_page(struct mlx4_en_priv *priv, | |
54 | struct mlx4_en_rx_alloc *frag, | |
55 | gfp_t gfp) | |
56 | { | |
57 | struct page *page; | |
58 | dma_addr_t dma; | |
59 | ||
60 | page = alloc_page(gfp); | |
61 | if (unlikely(!page)) | |
62 | return -ENOMEM; | |
63 | dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir); | |
64 | if (unlikely(dma_mapping_error(priv->ddev, dma))) { | |
65 | __free_page(page); | |
66 | return -ENOMEM; | |
67 | } | |
68 | frag->page = page; | |
69 | frag->dma = dma; | |
70 | frag->page_offset = priv->rx_headroom; | |
71 | return 0; | |
72 | } | |
73 | ||
74 | static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, | |
75 | struct mlx4_en_rx_ring *ring, | |
76 | struct mlx4_en_rx_desc *rx_desc, | |
77 | struct mlx4_en_rx_alloc *frags, | |
78 | gfp_t gfp) | |
79 | { | |
80 | int i; | |
81 | ||
82 | for (i = 0; i < priv->num_frags; i++, frags++) { | |
83 | if (!frags->page) { | |
84 | if (mlx4_alloc_page(priv, frags, gfp)) | |
85 | return -ENOMEM; | |
86 | ring->rx_alloc_pages++; | |
87 | } | |
88 | rx_desc->data[i].addr = cpu_to_be64(frags->dma + | |
89 | frags->page_offset); | |
90 | } | |
91 | return 0; | |
92 | } | |
93 | ||
94 | static void mlx4_en_free_frag(const struct mlx4_en_priv *priv, | |
95 | struct mlx4_en_rx_alloc *frag) | |
96 | { | |
97 | if (frag->page) { | |
98 | dma_unmap_page(priv->ddev, frag->dma, | |
99 | PAGE_SIZE, priv->dma_dir); | |
100 | __free_page(frag->page); | |
101 | } | |
102 | /* We need to clear all fields, otherwise a change of priv->log_rx_info | |
103 | * could lead to see garbage later in frag->page. | |
104 | */ | |
105 | memset(frag, 0, sizeof(*frag)); | |
106 | } | |
107 | ||
108 | static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv, | |
109 | struct mlx4_en_rx_ring *ring, int index) | |
110 | { | |
111 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; | |
112 | int possible_frags; | |
113 | int i; | |
114 | ||
115 | /* Set size and memtype fields */ | |
116 | for (i = 0; i < priv->num_frags; i++) { | |
117 | rx_desc->data[i].byte_count = | |
118 | cpu_to_be32(priv->frag_info[i].frag_size); | |
119 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); | |
120 | } | |
121 | ||
122 | /* If the number of used fragments does not fill up the ring stride, | |
123 | * remaining (unused) fragments must be padded with null address/size | |
124 | * and a special memory key */ | |
125 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; | |
126 | for (i = priv->num_frags; i < possible_frags; i++) { | |
127 | rx_desc->data[i].byte_count = 0; | |
128 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); | |
129 | rx_desc->data[i].addr = 0; | |
130 | } | |
131 | } | |
132 | ||
133 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, | |
134 | struct mlx4_en_rx_ring *ring, int index, | |
135 | gfp_t gfp) | |
136 | { | |
137 | struct mlx4_en_rx_desc *rx_desc = ring->buf + | |
138 | (index << ring->log_stride); | |
139 | struct mlx4_en_rx_alloc *frags = ring->rx_info + | |
140 | (index << priv->log_rx_info); | |
141 | if (likely(ring->page_cache.index > 0)) { | |
142 | /* XDP uses a single page per frame */ | |
143 | if (!frags->page) { | |
144 | ring->page_cache.index--; | |
145 | frags->page = ring->page_cache.buf[ring->page_cache.index].page; | |
146 | frags->dma = ring->page_cache.buf[ring->page_cache.index].dma; | |
147 | } | |
148 | frags->page_offset = XDP_PACKET_HEADROOM; | |
149 | rx_desc->data[0].addr = cpu_to_be64(frags->dma + | |
150 | XDP_PACKET_HEADROOM); | |
151 | return 0; | |
152 | } | |
153 | ||
154 | return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp); | |
155 | } | |
156 | ||
157 | static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring) | |
158 | { | |
159 | return ring->prod == ring->cons; | |
160 | } | |
161 | ||
162 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) | |
163 | { | |
164 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | |
165 | } | |
166 | ||
167 | /* slow path */ | |
168 | static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv, | |
169 | struct mlx4_en_rx_ring *ring, | |
170 | int index) | |
171 | { | |
172 | struct mlx4_en_rx_alloc *frags; | |
173 | int nr; | |
174 | ||
175 | frags = ring->rx_info + (index << priv->log_rx_info); | |
176 | for (nr = 0; nr < priv->num_frags; nr++) { | |
177 | en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); | |
178 | mlx4_en_free_frag(priv, frags + nr); | |
179 | } | |
180 | } | |
181 | ||
182 | /* Function not in fast-path */ | |
183 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) | |
184 | { | |
185 | struct mlx4_en_rx_ring *ring; | |
186 | int ring_ind; | |
187 | int buf_ind; | |
188 | int new_size; | |
189 | ||
190 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { | |
191 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
192 | ring = priv->rx_ring[ring_ind]; | |
193 | ||
194 | if (mlx4_en_prepare_rx_desc(priv, ring, | |
195 | ring->actual_size, | |
196 | GFP_KERNEL)) { | |
197 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { | |
198 | en_err(priv, "Failed to allocate enough rx buffers\n"); | |
199 | return -ENOMEM; | |
200 | } else { | |
201 | new_size = rounddown_pow_of_two(ring->actual_size); | |
202 | en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", | |
203 | ring->actual_size, new_size); | |
204 | goto reduce_rings; | |
205 | } | |
206 | } | |
207 | ring->actual_size++; | |
208 | ring->prod++; | |
209 | } | |
210 | } | |
211 | return 0; | |
212 | ||
213 | reduce_rings: | |
214 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
215 | ring = priv->rx_ring[ring_ind]; | |
216 | while (ring->actual_size > new_size) { | |
217 | ring->actual_size--; | |
218 | ring->prod--; | |
219 | mlx4_en_free_rx_desc(priv, ring, ring->actual_size); | |
220 | } | |
221 | } | |
222 | ||
223 | return 0; | |
224 | } | |
225 | ||
226 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | |
227 | struct mlx4_en_rx_ring *ring) | |
228 | { | |
229 | int index; | |
230 | ||
231 | en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", | |
232 | ring->cons, ring->prod); | |
233 | ||
234 | /* Unmap and free Rx buffers */ | |
235 | for (index = 0; index < ring->size; index++) { | |
236 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); | |
237 | mlx4_en_free_rx_desc(priv, ring, index); | |
238 | } | |
239 | ring->cons = 0; | |
240 | ring->prod = 0; | |
241 | } | |
242 | ||
243 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) | |
244 | { | |
245 | int i; | |
246 | int num_of_eqs; | |
247 | int num_rx_rings; | |
248 | struct mlx4_dev *dev = mdev->dev; | |
249 | ||
250 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | |
251 | num_of_eqs = max_t(int, MIN_RX_RINGS, | |
252 | min_t(int, | |
253 | mlx4_get_eqs_per_port(mdev->dev, i), | |
254 | DEF_RX_RINGS)); | |
255 | ||
256 | num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : | |
257 | min_t(int, num_of_eqs, num_online_cpus()); | |
258 | mdev->profile.prof[i].rx_ring_num = | |
259 | rounddown_pow_of_two(num_rx_rings); | |
260 | } | |
261 | } | |
262 | ||
263 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | |
264 | struct mlx4_en_rx_ring **pring, | |
265 | u32 size, u16 stride, int node, int queue_index) | |
266 | { | |
267 | struct mlx4_en_dev *mdev = priv->mdev; | |
268 | struct mlx4_en_rx_ring *ring; | |
269 | int err = -ENOMEM; | |
270 | int tmp; | |
271 | ||
272 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); | |
273 | if (!ring) { | |
274 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | |
275 | if (!ring) { | |
276 | en_err(priv, "Failed to allocate RX ring structure\n"); | |
277 | return -ENOMEM; | |
278 | } | |
279 | } | |
280 | ||
281 | ring->prod = 0; | |
282 | ring->cons = 0; | |
283 | ring->size = size; | |
284 | ring->size_mask = size - 1; | |
285 | ring->stride = stride; | |
286 | ring->log_stride = ffs(ring->stride) - 1; | |
287 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; | |
288 | ||
289 | if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0) | |
290 | goto err_ring; | |
291 | ||
292 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | |
293 | sizeof(struct mlx4_en_rx_alloc)); | |
294 | ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node); | |
295 | if (!ring->rx_info) { | |
296 | err = -ENOMEM; | |
297 | goto err_xdp_info; | |
298 | } | |
299 | ||
300 | en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", | |
301 | ring->rx_info, tmp); | |
302 | ||
303 | /* Allocate HW buffers on provided NUMA node */ | |
304 | set_dev_node(&mdev->dev->persist->pdev->dev, node); | |
305 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | |
306 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); | |
307 | if (err) | |
308 | goto err_info; | |
309 | ||
310 | ring->buf = ring->wqres.buf.direct.buf; | |
311 | ||
312 | ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; | |
313 | ||
314 | *pring = ring; | |
315 | return 0; | |
316 | ||
317 | err_info: | |
318 | kvfree(ring->rx_info); | |
319 | ring->rx_info = NULL; | |
320 | err_xdp_info: | |
321 | xdp_rxq_info_unreg(&ring->xdp_rxq); | |
322 | err_ring: | |
323 | kfree(ring); | |
324 | *pring = NULL; | |
325 | ||
326 | return err; | |
327 | } | |
328 | ||
329 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |
330 | { | |
331 | struct mlx4_en_rx_ring *ring; | |
332 | int i; | |
333 | int ring_ind; | |
334 | int err; | |
335 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | |
336 | DS_SIZE * priv->num_frags); | |
337 | ||
338 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
339 | ring = priv->rx_ring[ring_ind]; | |
340 | ||
341 | ring->prod = 0; | |
342 | ring->cons = 0; | |
343 | ring->actual_size = 0; | |
344 | ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; | |
345 | ||
346 | ring->stride = stride; | |
347 | if (ring->stride <= TXBB_SIZE) { | |
348 | /* Stamp first unused send wqe */ | |
349 | __be32 *ptr = (__be32 *)ring->buf; | |
350 | __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); | |
351 | *ptr = stamp; | |
352 | /* Move pointer to start of rx section */ | |
353 | ring->buf += TXBB_SIZE; | |
354 | } | |
355 | ||
356 | ring->log_stride = ffs(ring->stride) - 1; | |
357 | ring->buf_size = ring->size * ring->stride; | |
358 | ||
359 | memset(ring->buf, 0, ring->buf_size); | |
360 | mlx4_en_update_rx_prod_db(ring); | |
361 | ||
362 | /* Initialize all descriptors */ | |
363 | for (i = 0; i < ring->size; i++) | |
364 | mlx4_en_init_rx_desc(priv, ring, i); | |
365 | } | |
366 | err = mlx4_en_fill_rx_buffers(priv); | |
367 | if (err) | |
368 | goto err_buffers; | |
369 | ||
370 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
371 | ring = priv->rx_ring[ring_ind]; | |
372 | ||
373 | ring->size_mask = ring->actual_size - 1; | |
374 | mlx4_en_update_rx_prod_db(ring); | |
375 | } | |
376 | ||
377 | return 0; | |
378 | ||
379 | err_buffers: | |
380 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | |
381 | mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); | |
382 | ||
383 | ring_ind = priv->rx_ring_num - 1; | |
384 | while (ring_ind >= 0) { | |
385 | if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) | |
386 | priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; | |
387 | ring_ind--; | |
388 | } | |
389 | return err; | |
390 | } | |
391 | ||
392 | /* We recover from out of memory by scheduling our napi poll | |
393 | * function (mlx4_en_process_cq), which tries to allocate | |
394 | * all missing RX buffers (call to mlx4_en_refill_rx_buffers). | |
395 | */ | |
396 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) | |
397 | { | |
398 | int ring; | |
399 | ||
400 | if (!priv->port_up) | |
401 | return; | |
402 | ||
403 | for (ring = 0; ring < priv->rx_ring_num; ring++) { | |
404 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { | |
405 | local_bh_disable(); | |
406 | napi_reschedule(&priv->rx_cq[ring]->napi); | |
407 | local_bh_enable(); | |
408 | } | |
409 | } | |
410 | } | |
411 | ||
412 | /* When the rx ring is running in page-per-packet mode, a released frame can go | |
413 | * directly into a small cache, to avoid unmapping or touching the page | |
414 | * allocator. In bpf prog performance scenarios, buffers are either forwarded | |
415 | * or dropped, never converted to skbs, so every page can come directly from | |
416 | * this cache when it is sized to be a multiple of the napi budget. | |
417 | */ | |
418 | bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, | |
419 | struct mlx4_en_rx_alloc *frame) | |
420 | { | |
421 | struct mlx4_en_page_cache *cache = &ring->page_cache; | |
422 | ||
423 | if (cache->index >= MLX4_EN_CACHE_SIZE) | |
424 | return false; | |
425 | ||
426 | cache->buf[cache->index].page = frame->page; | |
427 | cache->buf[cache->index].dma = frame->dma; | |
428 | cache->index++; | |
429 | return true; | |
430 | } | |
431 | ||
432 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | |
433 | struct mlx4_en_rx_ring **pring, | |
434 | u32 size, u16 stride) | |
435 | { | |
436 | struct mlx4_en_dev *mdev = priv->mdev; | |
437 | struct mlx4_en_rx_ring *ring = *pring; | |
438 | struct bpf_prog *old_prog; | |
439 | ||
440 | old_prog = rcu_dereference_protected( | |
441 | ring->xdp_prog, | |
442 | lockdep_is_held(&mdev->state_lock)); | |
443 | if (old_prog) | |
444 | bpf_prog_put(old_prog); | |
445 | xdp_rxq_info_unreg(&ring->xdp_rxq); | |
446 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); | |
447 | kvfree(ring->rx_info); | |
448 | ring->rx_info = NULL; | |
449 | kfree(ring); | |
450 | *pring = NULL; | |
451 | } | |
452 | ||
453 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | |
454 | struct mlx4_en_rx_ring *ring) | |
455 | { | |
456 | int i; | |
457 | ||
458 | for (i = 0; i < ring->page_cache.index; i++) { | |
459 | dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma, | |
460 | PAGE_SIZE, priv->dma_dir); | |
461 | put_page(ring->page_cache.buf[i].page); | |
462 | } | |
463 | ring->page_cache.index = 0; | |
464 | mlx4_en_free_rx_buf(priv, ring); | |
465 | if (ring->stride <= TXBB_SIZE) | |
466 | ring->buf -= TXBB_SIZE; | |
467 | } | |
468 | ||
469 | ||
470 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | |
471 | struct mlx4_en_rx_alloc *frags, | |
472 | struct sk_buff *skb, | |
473 | int length) | |
474 | { | |
475 | const struct mlx4_en_frag_info *frag_info = priv->frag_info; | |
476 | unsigned int truesize = 0; | |
477 | bool release = true; | |
478 | int nr, frag_size; | |
479 | struct page *page; | |
480 | dma_addr_t dma; | |
481 | ||
482 | /* Collect used fragments while replacing them in the HW descriptors */ | |
483 | for (nr = 0;; frags++) { | |
484 | frag_size = min_t(int, length, frag_info->frag_size); | |
485 | ||
486 | page = frags->page; | |
487 | if (unlikely(!page)) | |
488 | goto fail; | |
489 | ||
490 | dma = frags->dma; | |
491 | dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, | |
492 | frag_size, priv->dma_dir); | |
493 | ||
494 | __skb_fill_page_desc(skb, nr, page, frags->page_offset, | |
495 | frag_size); | |
496 | ||
497 | truesize += frag_info->frag_stride; | |
498 | if (frag_info->frag_stride == PAGE_SIZE / 2) { | |
499 | frags->page_offset ^= PAGE_SIZE / 2; | |
500 | release = page_count(page) != 1 || | |
501 | page_is_pfmemalloc(page) || | |
502 | page_to_nid(page) != numa_mem_id(); | |
503 | } else if (!priv->rx_headroom) { | |
504 | /* rx_headroom for non XDP setup is always 0. | |
505 | * When XDP is set, the above condition will | |
506 | * guarantee page is always released. | |
507 | */ | |
508 | u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); | |
509 | ||
510 | frags->page_offset += sz_align; | |
511 | release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; | |
512 | } | |
513 | if (release) { | |
514 | dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir); | |
515 | frags->page = NULL; | |
516 | } else { | |
517 | page_ref_inc(page); | |
518 | } | |
519 | ||
520 | nr++; | |
521 | length -= frag_size; | |
522 | if (!length) | |
523 | break; | |
524 | frag_info++; | |
525 | } | |
526 | skb->truesize += truesize; | |
527 | return nr; | |
528 | ||
529 | fail: | |
530 | while (nr > 0) { | |
531 | nr--; | |
532 | __skb_frag_unref(skb_shinfo(skb)->frags + nr); | |
533 | } | |
534 | return 0; | |
535 | } | |
536 | ||
537 | static void validate_loopback(struct mlx4_en_priv *priv, void *va) | |
538 | { | |
539 | const unsigned char *data = va + ETH_HLEN; | |
540 | int i; | |
541 | ||
542 | for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) { | |
543 | if (data[i] != (unsigned char)i) | |
544 | return; | |
545 | } | |
546 | /* Loopback found */ | |
547 | priv->loopback_ok = 1; | |
548 | } | |
549 | ||
550 | static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, | |
551 | struct mlx4_en_rx_ring *ring) | |
552 | { | |
553 | u32 missing = ring->actual_size - (ring->prod - ring->cons); | |
554 | ||
555 | /* Try to batch allocations, but not too much. */ | |
556 | if (missing < 8) | |
557 | return; | |
558 | do { | |
559 | if (mlx4_en_prepare_rx_desc(priv, ring, | |
560 | ring->prod & ring->size_mask, | |
561 | GFP_ATOMIC | __GFP_MEMALLOC)) | |
562 | break; | |
563 | ring->prod++; | |
564 | } while (likely(--missing)); | |
565 | ||
566 | mlx4_en_update_rx_prod_db(ring); | |
567 | } | |
568 | ||
569 | /* When hardware doesn't strip the vlan, we need to calculate the checksum | |
570 | * over it and add it to the hardware's checksum calculation | |
571 | */ | |
572 | static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, | |
573 | struct vlan_hdr *vlanh) | |
574 | { | |
575 | return csum_add(hw_checksum, *(__wsum *)vlanh); | |
576 | } | |
577 | ||
578 | /* Although the stack expects checksum which doesn't include the pseudo | |
579 | * header, the HW adds it. To address that, we are subtracting the pseudo | |
580 | * header checksum from the checksum value provided by the HW. | |
581 | */ | |
582 | static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | |
583 | struct iphdr *iph) | |
584 | { | |
585 | __u16 length_for_csum = 0; | |
586 | __wsum csum_pseudo_header = 0; | |
587 | __u8 ipproto = iph->protocol; | |
588 | ||
589 | if (unlikely(ipproto == IPPROTO_SCTP)) | |
590 | return -1; | |
591 | ||
592 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); | |
593 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, | |
594 | length_for_csum, ipproto, 0); | |
595 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); | |
596 | return 0; | |
597 | } | |
598 | ||
599 | #if IS_ENABLED(CONFIG_IPV6) | |
600 | /* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header: | |
601 | * 4 first bytes : priority, version, flow_lbl | |
602 | * and 2 additional bytes : nexthdr, hop_limit. | |
603 | */ | |
604 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | |
605 | struct ipv6hdr *ipv6h) | |
606 | { | |
607 | __u8 nexthdr = ipv6h->nexthdr; | |
608 | __wsum temp; | |
609 | ||
610 | if (unlikely(nexthdr == IPPROTO_FRAGMENT || | |
611 | nexthdr == IPPROTO_HOPOPTS || | |
612 | nexthdr == IPPROTO_SCTP)) | |
613 | return -1; | |
614 | ||
615 | /* priority, version, flow_lbl */ | |
616 | temp = csum_add(hw_checksum, *(__wsum *)ipv6h); | |
617 | /* nexthdr and hop_limit */ | |
618 | skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr); | |
619 | return 0; | |
620 | } | |
621 | #endif | |
622 | ||
623 | /* We reach this function only after checking that any of | |
624 | * the (IPv4 | IPv6) bits are set in cqe->status. | |
625 | */ | |
626 | static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |
627 | netdev_features_t dev_features) | |
628 | { | |
629 | __wsum hw_checksum = 0; | |
630 | ||
631 | void *hdr = (u8 *)va + sizeof(struct ethhdr); | |
632 | ||
633 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); | |
634 | ||
635 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && | |
636 | !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { | |
637 | hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); | |
638 | hdr += sizeof(struct vlan_hdr); | |
639 | } | |
640 | ||
641 | #if IS_ENABLED(CONFIG_IPV6) | |
642 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) | |
643 | return get_fixed_ipv6_csum(hw_checksum, skb, hdr); | |
644 | #endif | |
645 | return get_fixed_ipv4_csum(hw_checksum, skb, hdr); | |
646 | } | |
647 | ||
648 | #if IS_ENABLED(CONFIG_IPV6) | |
649 | #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6) | |
650 | #else | |
651 | #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4) | |
652 | #endif | |
653 | ||
654 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | |
655 | { | |
656 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
657 | int factor = priv->cqe_factor; | |
658 | struct mlx4_en_rx_ring *ring; | |
659 | struct bpf_prog *xdp_prog; | |
660 | int cq_ring = cq->ring; | |
661 | bool doorbell_pending; | |
662 | struct mlx4_cqe *cqe; | |
663 | struct xdp_buff xdp; | |
664 | int polled = 0; | |
665 | int index; | |
666 | ||
667 | if (unlikely(!priv->port_up || budget <= 0)) | |
668 | return 0; | |
669 | ||
670 | ring = priv->rx_ring[cq_ring]; | |
671 | ||
672 | /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ | |
673 | rcu_read_lock(); | |
674 | xdp_prog = rcu_dereference(ring->xdp_prog); | |
675 | xdp.rxq = &ring->xdp_rxq; | |
676 | doorbell_pending = 0; | |
677 | ||
678 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx | |
679 | * descriptor offset can be deduced from the CQE index instead of | |
680 | * reading 'cqe->index' */ | |
681 | index = cq->mcq.cons_index & ring->size_mask; | |
682 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; | |
683 | ||
684 | /* Process all completed CQEs */ | |
685 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | |
686 | cq->mcq.cons_index & cq->size)) { | |
687 | struct mlx4_en_rx_alloc *frags; | |
688 | enum pkt_hash_types hash_type; | |
689 | struct sk_buff *skb; | |
690 | unsigned int length; | |
691 | int ip_summed; | |
692 | void *va; | |
693 | int nr; | |
694 | ||
695 | frags = ring->rx_info + (index << priv->log_rx_info); | |
696 | va = page_address(frags[0].page) + frags[0].page_offset; | |
697 | prefetchw(va); | |
698 | /* | |
699 | * make sure we read the CQE after we read the ownership bit | |
700 | */ | |
701 | dma_rmb(); | |
702 | ||
703 | /* Drop packet on bad receive or bad checksum */ | |
704 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | |
705 | MLX4_CQE_OPCODE_ERROR)) { | |
706 | en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", | |
707 | ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, | |
708 | ((struct mlx4_err_cqe *)cqe)->syndrome); | |
709 | goto next; | |
710 | } | |
711 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { | |
712 | en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); | |
713 | goto next; | |
714 | } | |
715 | ||
716 | /* Check if we need to drop the packet if SRIOV is not enabled | |
717 | * and not performing the selftest or flb disabled | |
718 | */ | |
719 | if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { | |
720 | const struct ethhdr *ethh = va; | |
721 | dma_addr_t dma; | |
722 | /* Get pointer to first fragment since we haven't | |
723 | * skb yet and cast it to ethhdr struct | |
724 | */ | |
725 | dma = frags[0].dma + frags[0].page_offset; | |
726 | dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), | |
727 | DMA_FROM_DEVICE); | |
728 | ||
729 | if (is_multicast_ether_addr(ethh->h_dest)) { | |
730 | struct mlx4_mac_entry *entry; | |
731 | struct hlist_head *bucket; | |
732 | unsigned int mac_hash; | |
733 | ||
734 | /* Drop the packet, since HW loopback-ed it */ | |
735 | mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; | |
736 | bucket = &priv->mac_hash[mac_hash]; | |
737 | hlist_for_each_entry_rcu(entry, bucket, hlist) { | |
738 | if (ether_addr_equal_64bits(entry->mac, | |
739 | ethh->h_source)) | |
740 | goto next; | |
741 | } | |
742 | } | |
743 | } | |
744 | ||
745 | if (unlikely(priv->validate_loopback)) { | |
746 | validate_loopback(priv, va); | |
747 | goto next; | |
748 | } | |
749 | ||
750 | /* | |
751 | * Packet is OK - process it. | |
752 | */ | |
753 | length = be32_to_cpu(cqe->byte_cnt); | |
754 | length -= ring->fcs_del; | |
755 | ||
756 | /* A bpf program gets first chance to drop the packet. It may | |
757 | * read bytes but not past the end of the frag. | |
758 | */ | |
759 | if (xdp_prog) { | |
760 | dma_addr_t dma; | |
761 | void *orig_data; | |
762 | u32 act; | |
763 | ||
764 | dma = frags[0].dma + frags[0].page_offset; | |
765 | dma_sync_single_for_cpu(priv->ddev, dma, | |
766 | priv->frag_info[0].frag_size, | |
767 | DMA_FROM_DEVICE); | |
768 | ||
769 | xdp.data_hard_start = va - frags[0].page_offset; | |
770 | xdp.data = va; | |
771 | xdp_set_data_meta_invalid(&xdp); | |
772 | xdp.data_end = xdp.data + length; | |
773 | orig_data = xdp.data; | |
774 | ||
775 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | |
776 | ||
777 | length = xdp.data_end - xdp.data; | |
778 | if (xdp.data != orig_data) { | |
779 | frags[0].page_offset = xdp.data - | |
780 | xdp.data_hard_start; | |
781 | va = xdp.data; | |
782 | } | |
783 | ||
784 | switch (act) { | |
785 | case XDP_PASS: | |
786 | break; | |
787 | case XDP_TX: | |
788 | if (likely(!mlx4_en_xmit_frame(ring, frags, priv, | |
789 | length, cq_ring, | |
790 | &doorbell_pending))) { | |
791 | frags[0].page = NULL; | |
792 | goto next; | |
793 | } | |
794 | trace_xdp_exception(dev, xdp_prog, act); | |
795 | goto xdp_drop_no_cnt; /* Drop on xmit failure */ | |
796 | default: | |
797 | bpf_warn_invalid_xdp_action(act); | |
798 | case XDP_ABORTED: | |
799 | trace_xdp_exception(dev, xdp_prog, act); | |
800 | case XDP_DROP: | |
801 | ring->xdp_drop++; | |
802 | xdp_drop_no_cnt: | |
803 | goto next; | |
804 | } | |
805 | } | |
806 | ||
807 | ring->bytes += length; | |
808 | ring->packets++; | |
809 | ||
810 | skb = napi_get_frags(&cq->napi); | |
811 | if (unlikely(!skb)) | |
812 | goto next; | |
813 | ||
814 | if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) { | |
815 | u64 timestamp = mlx4_en_get_cqe_ts(cqe); | |
816 | ||
817 | mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb), | |
818 | timestamp); | |
819 | } | |
820 | skb_record_rx_queue(skb, cq_ring); | |
821 | ||
822 | if (likely(dev->features & NETIF_F_RXCSUM)) { | |
823 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | | |
824 | MLX4_CQE_STATUS_UDP)) && | |
825 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | |
826 | cqe->checksum == cpu_to_be16(0xffff)) { | |
827 | bool l2_tunnel; | |
828 | ||
829 | l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && | |
830 | (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); | |
831 | ip_summed = CHECKSUM_UNNECESSARY; | |
832 | hash_type = PKT_HASH_TYPE_L4; | |
833 | if (l2_tunnel) | |
834 | skb->csum_level = 1; | |
835 | ring->csum_ok++; | |
836 | } else { | |
837 | if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && | |
838 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY)))) | |
839 | goto csum_none; | |
840 | if (check_csum(cqe, skb, va, dev->features)) | |
841 | goto csum_none; | |
842 | ip_summed = CHECKSUM_COMPLETE; | |
843 | hash_type = PKT_HASH_TYPE_L3; | |
844 | ring->csum_complete++; | |
845 | } | |
846 | } else { | |
847 | csum_none: | |
848 | ip_summed = CHECKSUM_NONE; | |
849 | hash_type = PKT_HASH_TYPE_L3; | |
850 | ring->csum_none++; | |
851 | } | |
852 | skb->ip_summed = ip_summed; | |
853 | if (dev->features & NETIF_F_RXHASH) | |
854 | skb_set_hash(skb, | |
855 | be32_to_cpu(cqe->immed_rss_invalid), | |
856 | hash_type); | |
857 | ||
858 | if ((cqe->vlan_my_qpn & | |
859 | cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && | |
860 | (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) | |
861 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
862 | be16_to_cpu(cqe->sl_vid)); | |
863 | else if ((cqe->vlan_my_qpn & | |
864 | cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) && | |
865 | (dev->features & NETIF_F_HW_VLAN_STAG_RX)) | |
866 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), | |
867 | be16_to_cpu(cqe->sl_vid)); | |
868 | ||
869 | nr = mlx4_en_complete_rx_desc(priv, frags, skb, length); | |
870 | if (likely(nr)) { | |
871 | skb_shinfo(skb)->nr_frags = nr; | |
872 | skb->len = length; | |
873 | skb->data_len = length; | |
874 | napi_gro_frags(&cq->napi); | |
875 | } else { | |
876 | skb->vlan_tci = 0; | |
877 | skb_clear_hash(skb); | |
878 | } | |
879 | next: | |
880 | ++cq->mcq.cons_index; | |
881 | index = (cq->mcq.cons_index) & ring->size_mask; | |
882 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; | |
883 | if (unlikely(++polled == budget)) | |
884 | break; | |
885 | } | |
886 | ||
887 | rcu_read_unlock(); | |
888 | ||
889 | if (likely(polled)) { | |
890 | if (doorbell_pending) { | |
891 | priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true; | |
892 | mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]); | |
893 | } | |
894 | ||
895 | mlx4_cq_set_ci(&cq->mcq); | |
896 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | |
897 | ring->cons = cq->mcq.cons_index; | |
898 | } | |
899 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); | |
900 | ||
901 | mlx4_en_refill_rx_buffers(priv, ring); | |
902 | ||
903 | return polled; | |
904 | } | |
905 | ||
906 | ||
907 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) | |
908 | { | |
909 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | |
910 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | |
911 | ||
912 | if (likely(priv->port_up)) | |
913 | napi_schedule_irqoff(&cq->napi); | |
914 | else | |
915 | mlx4_en_arm_cq(priv, cq); | |
916 | } | |
917 | ||
918 | /* Rx CQ polling - called by NAPI */ | |
919 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | |
920 | { | |
921 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); | |
922 | struct net_device *dev = cq->dev; | |
923 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
924 | struct mlx4_en_cq *xdp_tx_cq = NULL; | |
925 | bool clean_complete = true; | |
926 | int done; | |
927 | ||
928 | if (priv->tx_ring_num[TX_XDP]) { | |
929 | xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; | |
930 | if (xdp_tx_cq->xdp_busy) { | |
931 | clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq, | |
932 | budget); | |
933 | xdp_tx_cq->xdp_busy = !clean_complete; | |
934 | } | |
935 | } | |
936 | ||
937 | done = mlx4_en_process_rx_cq(dev, cq, budget); | |
938 | ||
939 | /* If we used up all the quota - we're probably not done yet... */ | |
940 | if (done == budget || !clean_complete) { | |
941 | const struct cpumask *aff; | |
942 | struct irq_data *idata; | |
943 | int cpu_curr; | |
944 | ||
945 | /* in case we got here because of !clean_complete */ | |
946 | done = budget; | |
947 | ||
948 | INC_PERF_COUNTER(priv->pstats.napi_quota); | |
949 | ||
950 | cpu_curr = smp_processor_id(); | |
951 | idata = irq_desc_get_irq_data(cq->irq_desc); | |
952 | aff = irq_data_get_affinity_mask(idata); | |
953 | ||
954 | if (likely(cpumask_test_cpu(cpu_curr, aff))) | |
955 | return budget; | |
956 | ||
957 | /* Current cpu is not according to smp_irq_affinity - | |
958 | * probably affinity changed. Need to stop this NAPI | |
959 | * poll, and restart it on the right CPU. | |
960 | * Try to avoid returning a too small value (like 0), | |
961 | * to not fool net_rx_action() and its netdev_budget | |
962 | */ | |
963 | if (done) | |
964 | done--; | |
965 | } | |
966 | /* Done for now */ | |
967 | if (likely(napi_complete_done(napi, done))) | |
968 | mlx4_en_arm_cq(priv, cq); | |
969 | return done; | |
970 | } | |
971 | ||
972 | void mlx4_en_calc_rx_buf(struct net_device *dev) | |
973 | { | |
974 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
975 | int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); | |
976 | int i = 0; | |
977 | ||
978 | /* bpf requires buffers to be set up as 1 packet per page. | |
979 | * This only works when num_frags == 1. | |
980 | */ | |
981 | if (priv->tx_ring_num[TX_XDP]) { | |
982 | priv->frag_info[0].frag_size = eff_mtu; | |
983 | /* This will gain efficient xdp frame recycling at the | |
984 | * expense of more costly truesize accounting | |
985 | */ | |
986 | priv->frag_info[0].frag_stride = PAGE_SIZE; | |
987 | priv->dma_dir = PCI_DMA_BIDIRECTIONAL; | |
988 | priv->rx_headroom = XDP_PACKET_HEADROOM; | |
989 | i = 1; | |
990 | } else { | |
991 | int frag_size_max = 2048, buf_size = 0; | |
992 | ||
993 | /* should not happen, right ? */ | |
994 | if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048) | |
995 | frag_size_max = PAGE_SIZE; | |
996 | ||
997 | while (buf_size < eff_mtu) { | |
998 | int frag_stride, frag_size = eff_mtu - buf_size; | |
999 | int pad, nb; | |
1000 | ||
1001 | if (i < MLX4_EN_MAX_RX_FRAGS - 1) | |
1002 | frag_size = min(frag_size, frag_size_max); | |
1003 | ||
1004 | priv->frag_info[i].frag_size = frag_size; | |
1005 | frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES); | |
1006 | /* We can only pack 2 1536-bytes frames in on 4K page | |
1007 | * Therefore, each frame would consume more bytes (truesize) | |
1008 | */ | |
1009 | nb = PAGE_SIZE / frag_stride; | |
1010 | pad = (PAGE_SIZE - nb * frag_stride) / nb; | |
1011 | pad &= ~(SMP_CACHE_BYTES - 1); | |
1012 | priv->frag_info[i].frag_stride = frag_stride + pad; | |
1013 | ||
1014 | buf_size += frag_size; | |
1015 | i++; | |
1016 | } | |
1017 | priv->dma_dir = PCI_DMA_FROMDEVICE; | |
1018 | priv->rx_headroom = 0; | |
1019 | } | |
1020 | ||
1021 | priv->num_frags = i; | |
1022 | priv->rx_skb_size = eff_mtu; | |
1023 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); | |
1024 | ||
1025 | en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", | |
1026 | eff_mtu, priv->num_frags); | |
1027 | for (i = 0; i < priv->num_frags; i++) { | |
1028 | en_dbg(DRV, | |
1029 | priv, | |
1030 | " frag:%d - size:%d stride:%d\n", | |
1031 | i, | |
1032 | priv->frag_info[i].frag_size, | |
1033 | priv->frag_info[i].frag_stride); | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | /* RSS related functions */ | |
1038 | ||
1039 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, | |
1040 | struct mlx4_en_rx_ring *ring, | |
1041 | enum mlx4_qp_state *state, | |
1042 | struct mlx4_qp *qp) | |
1043 | { | |
1044 | struct mlx4_en_dev *mdev = priv->mdev; | |
1045 | struct mlx4_qp_context *context; | |
1046 | int err = 0; | |
1047 | ||
1048 | context = kmalloc(sizeof(*context), GFP_KERNEL); | |
1049 | if (!context) | |
1050 | return -ENOMEM; | |
1051 | ||
1052 | err = mlx4_qp_alloc(mdev->dev, qpn, qp); | |
1053 | if (err) { | |
1054 | en_err(priv, "Failed to allocate qp #%x\n", qpn); | |
1055 | goto out; | |
1056 | } | |
1057 | qp->event = mlx4_en_sqp_event; | |
1058 | ||
1059 | memset(context, 0, sizeof(*context)); | |
1060 | mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, | |
1061 | qpn, ring->cqn, -1, context); | |
1062 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); | |
1063 | ||
1064 | /* Cancel FCS removal if FW allows */ | |
1065 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { | |
1066 | context->param3 |= cpu_to_be32(1 << 29); | |
1067 | if (priv->dev->features & NETIF_F_RXFCS) | |
1068 | ring->fcs_del = 0; | |
1069 | else | |
1070 | ring->fcs_del = ETH_FCS_LEN; | |
1071 | } else | |
1072 | ring->fcs_del = 0; | |
1073 | ||
1074 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); | |
1075 | if (err) { | |
1076 | mlx4_qp_remove(mdev->dev, qp); | |
1077 | mlx4_qp_free(mdev->dev, qp); | |
1078 | } | |
1079 | mlx4_en_update_rx_prod_db(ring); | |
1080 | out: | |
1081 | kfree(context); | |
1082 | return err; | |
1083 | } | |
1084 | ||
1085 | int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) | |
1086 | { | |
1087 | int err; | |
1088 | u32 qpn; | |
1089 | ||
1090 | err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, | |
1091 | MLX4_RESERVE_A0_QP, | |
1092 | MLX4_RES_USAGE_DRIVER); | |
1093 | if (err) { | |
1094 | en_err(priv, "Failed reserving drop qpn\n"); | |
1095 | return err; | |
1096 | } | |
1097 | err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp); | |
1098 | if (err) { | |
1099 | en_err(priv, "Failed allocating drop qp\n"); | |
1100 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1101 | return err; | |
1102 | } | |
1103 | ||
1104 | return 0; | |
1105 | } | |
1106 | ||
1107 | void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) | |
1108 | { | |
1109 | u32 qpn; | |
1110 | ||
1111 | qpn = priv->drop_qp.qpn; | |
1112 | mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); | |
1113 | mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); | |
1114 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1115 | } | |
1116 | ||
1117 | /* Allocate rx qp's and configure them according to rss map */ | |
1118 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |
1119 | { | |
1120 | struct mlx4_en_dev *mdev = priv->mdev; | |
1121 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1122 | struct mlx4_qp_context context; | |
1123 | struct mlx4_rss_context *rss_context; | |
1124 | int rss_rings; | |
1125 | void *ptr; | |
1126 | u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | | |
1127 | MLX4_RSS_TCP_IPV6); | |
1128 | int i, qpn; | |
1129 | int err = 0; | |
1130 | int good_qps = 0; | |
1131 | u8 flags; | |
1132 | ||
1133 | en_dbg(DRV, priv, "Configuring rss steering\n"); | |
1134 | ||
1135 | flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0; | |
1136 | err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, | |
1137 | priv->rx_ring_num, | |
1138 | &rss_map->base_qpn, flags, | |
1139 | MLX4_RES_USAGE_DRIVER); | |
1140 | if (err) { | |
1141 | en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); | |
1142 | return err; | |
1143 | } | |
1144 | ||
1145 | for (i = 0; i < priv->rx_ring_num; i++) { | |
1146 | qpn = rss_map->base_qpn + i; | |
1147 | err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], | |
1148 | &rss_map->state[i], | |
1149 | &rss_map->qps[i]); | |
1150 | if (err) | |
1151 | goto rss_err; | |
1152 | ||
1153 | ++good_qps; | |
1154 | } | |
1155 | ||
1156 | if (priv->rx_ring_num == 1) { | |
1157 | rss_map->indir_qp = &rss_map->qps[0]; | |
1158 | priv->base_qpn = rss_map->indir_qp->qpn; | |
1159 | en_info(priv, "Optimized Non-RSS steering\n"); | |
1160 | return 0; | |
1161 | } | |
1162 | ||
1163 | rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL); | |
1164 | if (!rss_map->indir_qp) { | |
1165 | err = -ENOMEM; | |
1166 | goto rss_err; | |
1167 | } | |
1168 | ||
1169 | /* Configure RSS indirection qp */ | |
1170 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); | |
1171 | if (err) { | |
1172 | en_err(priv, "Failed to allocate RSS indirection QP\n"); | |
1173 | goto rss_err; | |
1174 | } | |
1175 | ||
1176 | rss_map->indir_qp->event = mlx4_en_sqp_event; | |
1177 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | |
1178 | priv->rx_ring[0]->cqn, -1, &context); | |
1179 | ||
1180 | if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) | |
1181 | rss_rings = priv->rx_ring_num; | |
1182 | else | |
1183 | rss_rings = priv->prof->rss_rings; | |
1184 | ||
1185 | ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) | |
1186 | + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; | |
1187 | rss_context = ptr; | |
1188 | rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | | |
1189 | (rss_map->base_qpn)); | |
1190 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); | |
1191 | if (priv->mdev->profile.udp_rss) { | |
1192 | rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; | |
1193 | rss_context->base_qpn_udp = rss_context->default_qpn; | |
1194 | } | |
1195 | ||
1196 | if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | |
1197 | en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); | |
1198 | rss_mask |= MLX4_RSS_BY_INNER_HEADERS; | |
1199 | } | |
1200 | ||
1201 | rss_context->flags = rss_mask; | |
1202 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | |
1203 | if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { | |
1204 | rss_context->hash_fn = MLX4_RSS_HASH_XOR; | |
1205 | } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { | |
1206 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | |
1207 | memcpy(rss_context->rss_key, priv->rss_key, | |
1208 | MLX4_EN_RSS_KEY_SIZE); | |
1209 | } else { | |
1210 | en_err(priv, "Unknown RSS hash function requested\n"); | |
1211 | err = -EINVAL; | |
1212 | goto indir_err; | |
1213 | } | |
1214 | ||
1215 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, | |
1216 | rss_map->indir_qp, &rss_map->indir_state); | |
1217 | if (err) | |
1218 | goto indir_err; | |
1219 | ||
1220 | return 0; | |
1221 | ||
1222 | indir_err: | |
1223 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
1224 | MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); | |
1225 | mlx4_qp_remove(mdev->dev, rss_map->indir_qp); | |
1226 | mlx4_qp_free(mdev->dev, rss_map->indir_qp); | |
1227 | kfree(rss_map->indir_qp); | |
1228 | rss_map->indir_qp = NULL; | |
1229 | rss_err: | |
1230 | for (i = 0; i < good_qps; i++) { | |
1231 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | |
1232 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1233 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1234 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1235 | } | |
1236 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); | |
1237 | return err; | |
1238 | } | |
1239 | ||
1240 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | |
1241 | { | |
1242 | struct mlx4_en_dev *mdev = priv->mdev; | |
1243 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1244 | int i; | |
1245 | ||
1246 | if (priv->rx_ring_num > 1) { | |
1247 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
1248 | MLX4_QP_STATE_RST, NULL, 0, 0, | |
1249 | rss_map->indir_qp); | |
1250 | mlx4_qp_remove(mdev->dev, rss_map->indir_qp); | |
1251 | mlx4_qp_free(mdev->dev, rss_map->indir_qp); | |
1252 | kfree(rss_map->indir_qp); | |
1253 | rss_map->indir_qp = NULL; | |
1254 | } | |
1255 | ||
1256 | for (i = 0; i < priv->rx_ring_num; i++) { | |
1257 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | |
1258 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1259 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1260 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1261 | } | |
1262 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); | |
1263 | } |