2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/vxlan.h>
36 #include <net/geneve.h>
37 #include <linux/bpf.h>
38 #include <linux/debugfs.h>
39 #include <linux/if_bridge.h>
40 #include <linux/filter.h>
41 #include <net/page_pool/types.h>
42 #include <net/pkt_sched.h>
43 #include <net/xdp_sock_drv.h>
49 #include "en_accel/ipsec.h"
50 #include "en_accel/macsec.h"
51 #include "en_accel/en_accel.h"
52 #include "en_accel/ktls.h"
53 #include "lib/vxlan.h"
54 #include "lib/clock.h"
58 #include "en/monitor_stats.h"
59 #include "en/health.h"
60 #include "en/params.h"
61 #include "en/xsk/pool.h"
62 #include "en/xsk/setup.h"
63 #include "en/xsk/rx.h"
64 #include "en/xsk/tx.h"
65 #include "en/hv_vhca_stats.h"
66 #include "en/devlink.h"
73 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
, u8 page_shift
,
74 enum mlx5e_mpwrq_umr_mode umr_mode
)
76 u16 umr_wqebbs
, max_wqebbs
;
79 striding_rq_umr
= MLX5_CAP_GEN(mdev
, striding_rq
) && MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
80 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
84 umr_wqebbs
= mlx5e_mpwrq_umr_wqebbs(mdev
, page_shift
, umr_mode
);
85 max_wqebbs
= mlx5e_get_max_sq_aligned_wqebbs(mdev
);
86 /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
87 * calculated from mlx5e_get_max_sq_aligned_wqebbs.
89 if (WARN_ON(umr_wqebbs
> max_wqebbs
))
95 void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
97 struct mlx5_core_dev
*mdev
= priv
->mdev
;
101 port_state
= mlx5_query_vport_state(mdev
,
102 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT
,
105 up
= port_state
== VPORT_STATE_UP
;
106 if (up
== netif_carrier_ok(priv
->netdev
))
107 netif_carrier_event(priv
->netdev
);
109 netdev_info(priv
->netdev
, "Link up\n");
110 netif_carrier_on(priv
->netdev
);
112 netdev_info(priv
->netdev
, "Link down\n");
113 netif_carrier_off(priv
->netdev
);
117 static void mlx5e_update_carrier_work(struct work_struct
*work
)
119 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
120 update_carrier_work
);
122 mutex_lock(&priv
->state_lock
);
123 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
124 if (priv
->profile
->update_carrier
)
125 priv
->profile
->update_carrier(priv
);
126 mutex_unlock(&priv
->state_lock
);
129 static void mlx5e_update_stats_work(struct work_struct
*work
)
131 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
134 mutex_lock(&priv
->state_lock
);
135 priv
->profile
->update_stats(priv
);
136 mutex_unlock(&priv
->state_lock
);
139 void mlx5e_queue_update_stats(struct mlx5e_priv
*priv
)
141 if (!priv
->profile
->update_stats
)
144 if (unlikely(test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
)))
147 queue_work(priv
->wq
, &priv
->update_stats_work
);
150 static int async_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
152 struct mlx5e_priv
*priv
= container_of(nb
, struct mlx5e_priv
, events_nb
);
153 struct mlx5_eqe
*eqe
= data
;
155 if (event
!= MLX5_EVENT_TYPE_PORT_CHANGE
)
158 switch (eqe
->sub_type
) {
159 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
160 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
161 queue_work(priv
->wq
, &priv
->update_carrier_work
);
170 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
172 priv
->events_nb
.notifier_call
= async_event
;
173 mlx5_notifier_register(priv
->mdev
, &priv
->events_nb
);
176 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
178 mlx5_notifier_unregister(priv
->mdev
, &priv
->events_nb
);
181 static int blocking_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
183 struct mlx5e_priv
*priv
= container_of(nb
, struct mlx5e_priv
, blocking_events_nb
);
184 struct mlx5_devlink_trap_event_ctx
*trap_event_ctx
= data
;
188 case MLX5_DRIVER_EVENT_TYPE_TRAP
:
189 err
= mlx5e_handle_trap_event(priv
, trap_event_ctx
->trap
);
191 trap_event_ctx
->err
= err
;
201 static void mlx5e_enable_blocking_events(struct mlx5e_priv
*priv
)
203 priv
->blocking_events_nb
.notifier_call
= blocking_event
;
204 mlx5_blocking_notifier_register(priv
->mdev
, &priv
->blocking_events_nb
);
207 static void mlx5e_disable_blocking_events(struct mlx5e_priv
*priv
)
209 mlx5_blocking_notifier_unregister(priv
->mdev
, &priv
->blocking_events_nb
);
212 static u16
mlx5e_mpwrq_umr_octowords(u32 entries
, enum mlx5e_mpwrq_umr_mode umr_mode
)
214 u8 umr_entry_size
= mlx5e_mpwrq_umr_entry_size(umr_mode
);
217 sz
= ALIGN(entries
* umr_entry_size
, MLX5_UMR_FLEX_ALIGNMENT
);
219 return sz
/ MLX5_OCTWORD
;
222 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq
*rq
,
223 struct mlx5e_icosq
*sq
,
224 struct mlx5e_umr_wqe
*wqe
)
226 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
227 struct mlx5_wqe_umr_ctrl_seg
*ucseg
= &wqe
->uctrl
;
231 ds_cnt
= DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq
->mdev
, rq
->mpwqe
.page_shift
,
235 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
) |
237 cseg
->umr_mkey
= rq
->mpwqe
.umr_mkey_be
;
239 ucseg
->flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
| MLX5_UMR_INLINE
;
240 octowords
= mlx5e_mpwrq_umr_octowords(rq
->mpwqe
.pages_per_wqe
, rq
->mpwqe
.umr_mode
);
241 ucseg
->xlt_octowords
= cpu_to_be16(octowords
);
242 ucseg
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
245 static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq
*rq
, int node
)
247 rq
->mpwqe
.shampo
= kvzalloc_node(sizeof(*rq
->mpwqe
.shampo
),
249 if (!rq
->mpwqe
.shampo
)
254 static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq
*rq
)
256 kvfree(rq
->mpwqe
.shampo
);
259 static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq
*rq
, int node
)
261 struct mlx5e_shampo_hd
*shampo
= rq
->mpwqe
.shampo
;
263 shampo
->bitmap
= bitmap_zalloc_node(shampo
->hd_per_wq
, GFP_KERNEL
,
265 shampo
->info
= kvzalloc_node(array_size(shampo
->hd_per_wq
,
266 sizeof(*shampo
->info
)),
268 shampo
->pages
= kvzalloc_node(array_size(shampo
->hd_per_wq
,
269 sizeof(*shampo
->pages
)),
271 if (!shampo
->bitmap
|| !shampo
->info
|| !shampo
->pages
)
277 kvfree(shampo
->info
);
278 kvfree(shampo
->bitmap
);
279 kvfree(shampo
->pages
);
284 static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq
*rq
)
286 kvfree(rq
->mpwqe
.shampo
->bitmap
);
287 kvfree(rq
->mpwqe
.shampo
->info
);
288 kvfree(rq
->mpwqe
.shampo
->pages
);
291 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq
*rq
, int node
)
293 int wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
296 alloc_size
= array_size(wq_sz
, struct_size(rq
->mpwqe
.info
,
297 alloc_units
.frag_pages
,
298 rq
->mpwqe
.pages_per_wqe
));
300 rq
->mpwqe
.info
= kvzalloc_node(alloc_size
, GFP_KERNEL
, node
);
304 /* For deferred page release (release right before alloc), make sure
305 * that on first round release is not called.
307 for (int i
= 0; i
< wq_sz
; i
++) {
308 struct mlx5e_mpw_info
*wi
= mlx5e_get_mpw_info(rq
, i
);
310 bitmap_fill(wi
->skip_release_bitmap
, rq
->mpwqe
.pages_per_wqe
);
313 mlx5e_build_umr_wqe(rq
, rq
->icosq
, &rq
->mpwqe
.umr_wqe
);
319 static u8
mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode
)
322 case MLX5E_MPWRQ_UMR_MODE_ALIGNED
:
323 return MLX5_MKC_ACCESS_MODE_MTT
;
324 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED
:
325 return MLX5_MKC_ACCESS_MODE_KSM
;
326 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED
:
327 return MLX5_MKC_ACCESS_MODE_KLMS
;
328 case MLX5E_MPWRQ_UMR_MODE_TRIPLE
:
329 return MLX5_MKC_ACCESS_MODE_KSM
;
331 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode
);
335 static int mlx5e_create_umr_mkey(struct mlx5_core_dev
*mdev
,
336 u32 npages
, u8 page_shift
, u32
*umr_mkey
,
337 dma_addr_t filler_addr
,
338 enum mlx5e_mpwrq_umr_mode umr_mode
,
341 struct mlx5_mtt
*mtt
;
342 struct mlx5_ksm
*ksm
;
343 struct mlx5_klm
*klm
;
351 if ((umr_mode
== MLX5E_MPWRQ_UMR_MODE_UNALIGNED
||
352 umr_mode
== MLX5E_MPWRQ_UMR_MODE_TRIPLE
) &&
353 !MLX5_CAP_GEN(mdev
, fixed_buffer_size
)) {
354 mlx5_core_warn(mdev
, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
358 octwords
= mlx5e_mpwrq_umr_octowords(npages
, umr_mode
);
360 inlen
= MLX5_FLEXIBLE_INLEN(mdev
, MLX5_ST_SZ_BYTES(create_mkey_in
),
361 MLX5_OCTWORD
, octwords
);
365 in
= kvzalloc(inlen
, GFP_KERNEL
);
369 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
371 MLX5_SET(mkc
, mkc
, free
, 1);
372 MLX5_SET(mkc
, mkc
, umr_en
, 1);
373 MLX5_SET(mkc
, mkc
, lw
, 1);
374 MLX5_SET(mkc
, mkc
, lr
, 1);
375 MLX5_SET(mkc
, mkc
, access_mode_1_0
, mlx5e_mpwrq_access_mode(umr_mode
));
376 mlx5e_mkey_set_relaxed_ordering(mdev
, mkc
);
377 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
378 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.hw_objs
.pdn
);
379 MLX5_SET64(mkc
, mkc
, len
, npages
<< page_shift
);
380 MLX5_SET(mkc
, mkc
, translations_octword_size
, octwords
);
381 if (umr_mode
== MLX5E_MPWRQ_UMR_MODE_TRIPLE
)
382 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
- 2);
383 else if (umr_mode
!= MLX5E_MPWRQ_UMR_MODE_OVERSIZED
)
384 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
385 MLX5_SET(create_mkey_in
, in
, translations_octword_actual_size
, octwords
);
387 /* Initialize the mkey with all MTTs pointing to a default
388 * page (filler_addr). When the channels are activated, UMR
389 * WQEs will redirect the RX WQEs to the actual memory from
390 * the RQ's pool, while the gaps (wqe_overflow) remain mapped
391 * to the default page.
394 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED
:
395 klm
= MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
396 for (i
= 0; i
< npages
; i
++) {
397 klm
[i
<< 1] = (struct mlx5_klm
) {
398 .va
= cpu_to_be64(filler_addr
),
399 .bcount
= cpu_to_be32(xsk_chunk_size
),
400 .key
= cpu_to_be32(mdev
->mlx5e_res
.hw_objs
.mkey
),
402 klm
[(i
<< 1) + 1] = (struct mlx5_klm
) {
403 .va
= cpu_to_be64(filler_addr
),
404 .bcount
= cpu_to_be32((1 << page_shift
) - xsk_chunk_size
),
405 .key
= cpu_to_be32(mdev
->mlx5e_res
.hw_objs
.mkey
),
409 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED
:
410 ksm
= MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
411 for (i
= 0; i
< npages
; i
++)
412 ksm
[i
] = (struct mlx5_ksm
) {
413 .key
= cpu_to_be32(mdev
->mlx5e_res
.hw_objs
.mkey
),
414 .va
= cpu_to_be64(filler_addr
),
417 case MLX5E_MPWRQ_UMR_MODE_ALIGNED
:
418 mtt
= MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
419 for (i
= 0; i
< npages
; i
++)
420 mtt
[i
] = (struct mlx5_mtt
) {
421 .ptag
= cpu_to_be64(filler_addr
),
424 case MLX5E_MPWRQ_UMR_MODE_TRIPLE
:
425 ksm
= MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
426 for (i
= 0; i
< npages
* 4; i
++) {
427 ksm
[i
] = (struct mlx5_ksm
) {
428 .key
= cpu_to_be32(mdev
->mlx5e_res
.hw_objs
.mkey
),
429 .va
= cpu_to_be64(filler_addr
),
435 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
441 static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev
*mdev
,
450 inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
452 in
= kvzalloc(inlen
, GFP_KERNEL
);
456 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
458 MLX5_SET(mkc
, mkc
, free
, 1);
459 MLX5_SET(mkc
, mkc
, umr_en
, 1);
460 MLX5_SET(mkc
, mkc
, lw
, 1);
461 MLX5_SET(mkc
, mkc
, lr
, 1);
462 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_KLMS
);
463 mlx5e_mkey_set_relaxed_ordering(mdev
, mkc
);
464 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
465 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.hw_objs
.pdn
);
466 MLX5_SET(mkc
, mkc
, translations_octword_size
, nentries
);
467 MLX5_SET(mkc
, mkc
, length64
, 1);
468 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
474 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev
*mdev
, struct mlx5e_rq
*rq
)
476 u32 xsk_chunk_size
= rq
->xsk_pool
? rq
->xsk_pool
->chunk_size
: 0;
477 u32 wq_size
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
478 u32 num_entries
, max_num_entries
;
482 max_num_entries
= mlx5e_mpwrq_max_num_entries(mdev
, rq
->mpwqe
.umr_mode
);
484 /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
485 if (WARN_ON_ONCE(check_mul_overflow(wq_size
, (u32
)rq
->mpwqe
.mtts_per_wqe
,
487 num_entries
> max_num_entries
))
488 mlx5_core_err(mdev
, "%s: multiplication overflow: %u * %u > %u\n",
489 __func__
, wq_size
, rq
->mpwqe
.mtts_per_wqe
,
492 err
= mlx5e_create_umr_mkey(mdev
, num_entries
, rq
->mpwqe
.page_shift
,
493 &umr_mkey
, rq
->wqe_overflow
.addr
,
494 rq
->mpwqe
.umr_mode
, xsk_chunk_size
);
495 rq
->mpwqe
.umr_mkey_be
= cpu_to_be32(umr_mkey
);
499 static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev
*mdev
,
502 u32 max_klm_size
= BIT(MLX5_CAP_GEN(mdev
, log_max_klm_list_size
));
504 if (max_klm_size
< rq
->mpwqe
.shampo
->hd_per_wq
) {
505 mlx5_core_err(mdev
, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
506 max_klm_size
, rq
->mpwqe
.shampo
->hd_per_wq
);
509 return mlx5e_create_umr_klm_mkey(mdev
, rq
->mpwqe
.shampo
->hd_per_wq
,
510 &rq
->mpwqe
.shampo
->mkey
);
513 static void mlx5e_init_frags_partition(struct mlx5e_rq
*rq
)
515 struct mlx5e_wqe_frag_info next_frag
= {};
516 struct mlx5e_wqe_frag_info
*prev
= NULL
;
519 WARN_ON(rq
->xsk_pool
);
521 next_frag
.frag_page
= &rq
->wqe
.alloc_units
->frag_pages
[0];
523 /* Skip first release due to deferred release. */
524 next_frag
.flags
= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE
);
526 for (i
= 0; i
< mlx5_wq_cyc_get_size(&rq
->wqe
.wq
); i
++) {
527 struct mlx5e_rq_frag_info
*frag_info
= &rq
->wqe
.info
.arr
[0];
528 struct mlx5e_wqe_frag_info
*frag
=
529 &rq
->wqe
.frags
[i
<< rq
->wqe
.info
.log_num_frags
];
532 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++, frag
++) {
533 if (next_frag
.offset
+ frag_info
[f
].frag_stride
> PAGE_SIZE
) {
534 /* Pages are assigned at runtime. */
535 next_frag
.frag_page
++;
536 next_frag
.offset
= 0;
538 prev
->flags
|= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE
);
543 next_frag
.offset
+= frag_info
[f
].frag_stride
;
549 prev
->flags
|= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE
);
552 static void mlx5e_init_xsk_buffs(struct mlx5e_rq
*rq
)
556 /* Assumptions used by XSK batched allocator. */
557 WARN_ON(rq
->wqe
.info
.num_frags
!= 1);
558 WARN_ON(rq
->wqe
.info
.log_num_frags
!= 0);
559 WARN_ON(rq
->wqe
.info
.arr
[0].frag_stride
!= PAGE_SIZE
);
561 /* Considering the above assumptions a fragment maps to a single
564 for (i
= 0; i
< mlx5_wq_cyc_get_size(&rq
->wqe
.wq
); i
++) {
565 rq
->wqe
.frags
[i
].xskp
= &rq
->wqe
.alloc_units
->xsk_buffs
[i
];
567 /* Skip first release due to deferred release as WQES are
570 rq
->wqe
.frags
[i
].flags
|= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE
);
574 static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq
*rq
, int node
)
576 int wq_sz
= mlx5_wq_cyc_get_size(&rq
->wqe
.wq
);
577 int len
= wq_sz
<< rq
->wqe
.info
.log_num_frags
;
578 struct mlx5e_wqe_frag_info
*frags
;
579 union mlx5e_alloc_units
*aus
;
583 aus_sz
= sizeof(*aus
->xsk_buffs
);
585 aus_sz
= sizeof(*aus
->frag_pages
);
587 aus
= kvzalloc_node(array_size(len
, aus_sz
), GFP_KERNEL
, node
);
591 frags
= kvzalloc_node(array_size(len
, sizeof(*frags
)), GFP_KERNEL
, node
);
597 rq
->wqe
.alloc_units
= aus
;
598 rq
->wqe
.frags
= frags
;
601 mlx5e_init_xsk_buffs(rq
);
603 mlx5e_init_frags_partition(rq
);
608 static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq
*rq
)
610 kvfree(rq
->wqe
.frags
);
611 kvfree(rq
->wqe
.alloc_units
);
614 static void mlx5e_rq_err_cqe_work(struct work_struct
*recover_work
)
616 struct mlx5e_rq
*rq
= container_of(recover_work
, struct mlx5e_rq
, recover_work
);
618 mlx5e_reporter_rq_cqe_err(rq
);
621 static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq
*rq
)
623 rq
->wqe_overflow
.page
= alloc_page(GFP_KERNEL
);
624 if (!rq
->wqe_overflow
.page
)
627 rq
->wqe_overflow
.addr
= dma_map_page(rq
->pdev
, rq
->wqe_overflow
.page
, 0,
628 PAGE_SIZE
, rq
->buff
.map_dir
);
629 if (dma_mapping_error(rq
->pdev
, rq
->wqe_overflow
.addr
)) {
630 __free_page(rq
->wqe_overflow
.page
);
636 static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq
*rq
)
638 dma_unmap_page(rq
->pdev
, rq
->wqe_overflow
.addr
, PAGE_SIZE
,
640 __free_page(rq
->wqe_overflow
.page
);
643 static int mlx5e_init_rxq_rq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
644 u32 xdp_frag_size
, struct mlx5e_rq
*rq
)
646 struct mlx5_core_dev
*mdev
= c
->mdev
;
649 rq
->wq_type
= params
->rq_wq_type
;
651 rq
->netdev
= c
->netdev
;
653 rq
->tstamp
= c
->tstamp
;
654 rq
->clock
= &mdev
->clock
;
655 rq
->icosq
= &c
->icosq
;
660 MLX5E_SW2HW_MTU(params
, params
->sw_mtu
) - ETH_FCS_LEN
* !params
->scatter_fcs_en
;
661 rq
->xdpsq
= &c
->rq_xdpsq
;
662 rq
->stats
= &c
->priv
->channel_stats
[c
->ix
]->rq
;
663 rq
->ptp_cyc2time
= mlx5_rq_ts_translator(mdev
);
664 err
= mlx5e_rq_set_handlers(rq
, params
, NULL
);
668 return __xdp_rxq_info_reg(&rq
->xdp_rxq
, rq
->netdev
, rq
->ix
, c
->napi
.napi_id
,
672 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev
*mdev
,
673 struct mlx5e_params
*params
,
674 struct mlx5e_rq_param
*rqp
,
679 void *wqc
= MLX5_ADDR_OF(rqc
, rqp
->rqc
, wq
);
683 if (!test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
))
685 err
= mlx5e_rq_shampo_hd_alloc(rq
, node
);
688 rq
->mpwqe
.shampo
->hd_per_wq
=
689 mlx5e_shampo_hd_per_wq(mdev
, params
, rqp
);
690 err
= mlx5e_create_rq_hd_umr_mkey(mdev
, rq
);
693 err
= mlx5e_rq_shampo_hd_info_alloc(rq
, node
);
695 goto err_shampo_info
;
696 rq
->hw_gro_data
= kvzalloc_node(sizeof(*rq
->hw_gro_data
), GFP_KERNEL
, node
);
697 if (!rq
->hw_gro_data
) {
699 goto err_hw_gro_data
;
701 rq
->mpwqe
.shampo
->key
=
702 cpu_to_be32(rq
->mpwqe
.shampo
->mkey
);
703 rq
->mpwqe
.shampo
->hd_per_wqe
=
704 mlx5e_shampo_hd_per_wqe(mdev
, params
, rqp
);
705 wq_size
= BIT(MLX5_GET(wq
, wqc
, log_wq_sz
));
706 *pool_size
+= (rq
->mpwqe
.shampo
->hd_per_wqe
* wq_size
) /
707 MLX5E_SHAMPO_WQ_HEADER_PER_PAGE
;
711 mlx5e_rq_shampo_hd_info_free(rq
);
713 mlx5_core_destroy_mkey(mdev
, rq
->mpwqe
.shampo
->mkey
);
715 mlx5e_rq_shampo_hd_free(rq
);
720 static void mlx5e_rq_free_shampo(struct mlx5e_rq
*rq
)
722 if (!test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
))
725 kvfree(rq
->hw_gro_data
);
726 mlx5e_rq_shampo_hd_info_free(rq
);
727 mlx5_core_destroy_mkey(rq
->mdev
, rq
->mpwqe
.shampo
->mkey
);
728 mlx5e_rq_shampo_hd_free(rq
);
731 static int mlx5e_alloc_rq(struct mlx5e_params
*params
,
732 struct mlx5e_xsk_param
*xsk
,
733 struct mlx5e_rq_param
*rqp
,
734 int node
, struct mlx5e_rq
*rq
)
736 struct mlx5_core_dev
*mdev
= rq
->mdev
;
737 void *rqc
= rqp
->rqc
;
738 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
744 rqp
->wq
.db_numa_node
= node
;
745 INIT_WORK(&rq
->recover_work
, mlx5e_rq_err_cqe_work
);
747 if (params
->xdp_prog
)
748 bpf_prog_inc(params
->xdp_prog
);
749 RCU_INIT_POINTER(rq
->xdp_prog
, params
->xdp_prog
);
751 rq
->buff
.map_dir
= params
->xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
752 rq
->buff
.headroom
= mlx5e_get_rq_headroom(mdev
, params
, xsk
);
753 pool_size
= 1 << params
->log_rq_mtu_frames
;
755 rq
->mkey_be
= cpu_to_be32(mdev
->mlx5e_res
.hw_objs
.mkey
);
757 switch (rq
->wq_type
) {
758 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
759 err
= mlx5_wq_ll_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->mpwqe
.wq
,
762 goto err_rq_xdp_prog
;
764 err
= mlx5e_alloc_mpwqe_rq_drop_page(rq
);
766 goto err_rq_wq_destroy
;
768 rq
->mpwqe
.wq
.db
= &rq
->mpwqe
.wq
.db
[MLX5_RCV_DBR
];
770 wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
772 rq
->mpwqe
.page_shift
= mlx5e_mpwrq_page_shift(mdev
, xsk
);
773 rq
->mpwqe
.umr_mode
= mlx5e_mpwrq_umr_mode(mdev
, xsk
);
774 rq
->mpwqe
.pages_per_wqe
=
775 mlx5e_mpwrq_pages_per_wqe(mdev
, rq
->mpwqe
.page_shift
,
777 rq
->mpwqe
.umr_wqebbs
=
778 mlx5e_mpwrq_umr_wqebbs(mdev
, rq
->mpwqe
.page_shift
,
780 rq
->mpwqe
.mtts_per_wqe
=
781 mlx5e_mpwrq_mtts_per_wqe(mdev
, rq
->mpwqe
.page_shift
,
784 pool_size
= rq
->mpwqe
.pages_per_wqe
<<
785 mlx5e_mpwqe_get_log_rq_size(mdev
, params
, xsk
);
787 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, xsk
) && params
->xdp_prog
)
788 pool_size
*= 2; /* additional page per packet for the linear part */
790 rq
->mpwqe
.log_stride_sz
= mlx5e_mpwqe_get_log_stride_size(mdev
, params
, xsk
);
791 rq
->mpwqe
.num_strides
=
792 BIT(mlx5e_mpwqe_get_log_num_strides(mdev
, params
, xsk
));
793 rq
->mpwqe
.min_wqe_bulk
= mlx5e_mpwqe_get_min_wqe_bulk(wq_sz
);
795 rq
->buff
.frame0_sz
= (1 << rq
->mpwqe
.log_stride_sz
);
797 err
= mlx5e_create_rq_umr_mkey(mdev
, rq
);
799 goto err_rq_drop_page
;
801 err
= mlx5e_rq_alloc_mpwqe_info(rq
, node
);
805 err
= mlx5_rq_shampo_alloc(mdev
, params
, rqp
, rq
, &pool_size
, node
);
807 goto err_free_mpwqe_info
;
810 default: /* MLX5_WQ_TYPE_CYCLIC */
811 err
= mlx5_wq_cyc_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->wqe
.wq
,
814 goto err_rq_xdp_prog
;
816 rq
->wqe
.wq
.db
= &rq
->wqe
.wq
.db
[MLX5_RCV_DBR
];
818 wq_sz
= mlx5_wq_cyc_get_size(&rq
->wqe
.wq
);
820 rq
->wqe
.info
= rqp
->frags_info
;
821 rq
->buff
.frame0_sz
= rq
->wqe
.info
.arr
[0].frag_stride
;
823 err
= mlx5e_init_wqe_alloc_info(rq
, node
);
825 goto err_rq_wq_destroy
;
829 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
830 MEM_TYPE_XSK_BUFF_POOL
, NULL
);
831 xsk_pool_set_rxq_info(rq
->xsk_pool
, &rq
->xdp_rxq
);
833 /* Create a page_pool and register it with rxq */
834 struct page_pool_params pp_params
= { 0 };
837 pp_params
.flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
| PP_FLAG_PAGE_FRAG
;
838 pp_params
.pool_size
= pool_size
;
839 pp_params
.nid
= node
;
840 pp_params
.dev
= rq
->pdev
;
841 pp_params
.napi
= rq
->cq
.napi
;
842 pp_params
.dma_dir
= rq
->buff
.map_dir
;
843 pp_params
.max_len
= PAGE_SIZE
;
845 /* page_pool can be used even when there is no rq->xdp_prog,
846 * given page_pool does not handle DMA mapping there is no
847 * required state to clear. And page_pool gracefully handle
850 rq
->page_pool
= page_pool_create(&pp_params
);
851 if (IS_ERR(rq
->page_pool
)) {
852 err
= PTR_ERR(rq
->page_pool
);
853 rq
->page_pool
= NULL
;
854 goto err_free_by_rq_type
;
856 if (xdp_rxq_info_is_reg(&rq
->xdp_rxq
))
857 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
858 MEM_TYPE_PAGE_POOL
, rq
->page_pool
);
861 goto err_destroy_page_pool
;
863 for (i
= 0; i
< wq_sz
; i
++) {
864 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
865 struct mlx5e_rx_wqe_ll
*wqe
=
866 mlx5_wq_ll_get_wqe(&rq
->mpwqe
.wq
, i
);
868 rq
->mpwqe
.num_strides
<< rq
->mpwqe
.log_stride_sz
;
869 u64 dma_offset
= mul_u32_u32(i
, rq
->mpwqe
.mtts_per_wqe
) <<
870 rq
->mpwqe
.page_shift
;
871 u16 headroom
= test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
) ?
872 0 : rq
->buff
.headroom
;
874 wqe
->data
[0].addr
= cpu_to_be64(dma_offset
+ headroom
);
875 wqe
->data
[0].byte_count
= cpu_to_be32(byte_count
);
876 wqe
->data
[0].lkey
= rq
->mpwqe
.umr_mkey_be
;
878 struct mlx5e_rx_wqe_cyc
*wqe
=
879 mlx5_wq_cyc_get_wqe(&rq
->wqe
.wq
, i
);
882 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++) {
883 u32 frag_size
= rq
->wqe
.info
.arr
[f
].frag_size
|
884 MLX5_HW_START_PADDING
;
886 wqe
->data
[f
].byte_count
= cpu_to_be32(frag_size
);
887 wqe
->data
[f
].lkey
= rq
->mkey_be
;
889 /* check if num_frags is not a pow of two */
890 if (rq
->wqe
.info
.num_frags
< (1 << rq
->wqe
.info
.log_num_frags
)) {
891 wqe
->data
[f
].byte_count
= 0;
892 wqe
->data
[f
].lkey
= params
->terminate_lkey_be
;
893 wqe
->data
[f
].addr
= 0;
898 INIT_WORK(&rq
->dim
.work
, mlx5e_rx_dim_work
);
900 switch (params
->rx_cq_moderation
.cq_period_mode
) {
901 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
902 rq
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_CQE
;
904 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE
:
906 rq
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
911 err_destroy_page_pool
:
912 page_pool_destroy(rq
->page_pool
);
914 switch (rq
->wq_type
) {
915 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
916 mlx5e_rq_free_shampo(rq
);
918 kvfree(rq
->mpwqe
.info
);
920 mlx5_core_destroy_mkey(mdev
, be32_to_cpu(rq
->mpwqe
.umr_mkey_be
));
922 mlx5e_free_mpwqe_rq_drop_page(rq
);
924 default: /* MLX5_WQ_TYPE_CYCLIC */
925 mlx5e_free_wqe_alloc_info(rq
);
928 mlx5_wq_destroy(&rq
->wq_ctrl
);
930 if (params
->xdp_prog
)
931 bpf_prog_put(params
->xdp_prog
);
936 static void mlx5e_free_rq(struct mlx5e_rq
*rq
)
938 struct bpf_prog
*old_prog
;
940 if (xdp_rxq_info_is_reg(&rq
->xdp_rxq
)) {
941 old_prog
= rcu_dereference_protected(rq
->xdp_prog
,
942 lockdep_is_held(&rq
->priv
->state_lock
));
944 bpf_prog_put(old_prog
);
947 switch (rq
->wq_type
) {
948 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
949 kvfree(rq
->mpwqe
.info
);
950 mlx5_core_destroy_mkey(rq
->mdev
, be32_to_cpu(rq
->mpwqe
.umr_mkey_be
));
951 mlx5e_free_mpwqe_rq_drop_page(rq
);
952 mlx5e_rq_free_shampo(rq
);
954 default: /* MLX5_WQ_TYPE_CYCLIC */
955 mlx5e_free_wqe_alloc_info(rq
);
958 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
959 page_pool_destroy(rq
->page_pool
);
960 mlx5_wq_destroy(&rq
->wq_ctrl
);
963 int mlx5e_create_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
965 struct mlx5_core_dev
*mdev
= rq
->mdev
;
973 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
974 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
975 in
= kvzalloc(inlen
, GFP_KERNEL
);
979 ts_format
= mlx5_is_real_time_rq(mdev
) ?
980 MLX5_TIMESTAMP_FORMAT_REAL_TIME
:
981 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING
;
982 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
983 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
985 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
987 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
988 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
989 MLX5_SET(rqc
, rqc
, ts_format
, ts_format
);
990 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
991 MLX5_ADAPTER_PAGE_SHIFT
);
992 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
994 if (test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
)) {
995 MLX5_SET(wq
, wq
, log_headers_buffer_entry_num
,
996 order_base_2(rq
->mpwqe
.shampo
->hd_per_wq
));
997 MLX5_SET(wq
, wq
, headers_mkey
, rq
->mpwqe
.shampo
->mkey
);
1000 mlx5_fill_page_frag_array(&rq
->wq_ctrl
.buf
,
1001 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1003 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
1010 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
, int next_state
)
1012 struct mlx5_core_dev
*mdev
= rq
->mdev
;
1019 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
1020 in
= kvzalloc(inlen
, GFP_KERNEL
);
1024 if (curr_state
== MLX5_RQC_STATE_RST
&& next_state
== MLX5_RQC_STATE_RDY
)
1025 mlx5e_rqwq_reset(rq
);
1027 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
1029 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
1030 MLX5_SET(rqc
, rqc
, state
, next_state
);
1032 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
);
1039 static void mlx5e_flush_rq_cq(struct mlx5e_rq
*rq
)
1041 struct mlx5_cqwq
*cqwq
= &rq
->cq
.wq
;
1042 struct mlx5_cqe64
*cqe
;
1044 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED
, &rq
->state
)) {
1045 while ((cqe
= mlx5_cqwq_get_cqe_enahnced_comp(cqwq
)))
1046 mlx5_cqwq_pop(cqwq
);
1048 while ((cqe
= mlx5_cqwq_get_cqe(cqwq
)))
1049 mlx5_cqwq_pop(cqwq
);
1052 mlx5_cqwq_update_db_record(cqwq
);
1055 int mlx5e_flush_rq(struct mlx5e_rq
*rq
, int curr_state
)
1057 struct net_device
*dev
= rq
->netdev
;
1060 err
= mlx5e_modify_rq_state(rq
, curr_state
, MLX5_RQC_STATE_RST
);
1062 netdev_err(dev
, "Failed to move rq 0x%x to reset\n", rq
->rqn
);
1066 mlx5e_free_rx_descs(rq
);
1067 mlx5e_flush_rq_cq(rq
);
1069 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
1071 netdev_err(dev
, "Failed to move rq 0x%x to ready\n", rq
->rqn
);
1078 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
1080 struct mlx5_core_dev
*mdev
= rq
->mdev
;
1086 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
1087 in
= kvzalloc(inlen
, GFP_KERNEL
);
1091 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
1093 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
1094 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
1095 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
1096 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
1097 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
1099 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
);
1106 void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
1108 mlx5_core_destroy_rq(rq
->mdev
, rq
->rqn
);
1111 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
, int wait_time
)
1113 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(wait_time
);
1115 u16 min_wqes
= mlx5_min_rx_wqes(rq
->wq_type
, mlx5e_rqwq_get_size(rq
));
1118 if (mlx5e_rqwq_get_cur_sz(rq
) >= min_wqes
)
1122 } while (time_before(jiffies
, exp_time
));
1124 netdev_warn(rq
->netdev
, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
1125 rq
->ix
, rq
->rqn
, mlx5e_rqwq_get_cur_sz(rq
), min_wqes
);
1127 mlx5e_reporter_rx_timeout(rq
);
1131 void mlx5e_free_rx_missing_descs(struct mlx5e_rq
*rq
)
1133 struct mlx5_wq_ll
*wq
;
1137 if (rq
->wq_type
!= MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
1143 /* Release WQEs that are in missing state: they have been
1144 * popped from the list after completion but were not freed
1145 * due to deferred release.
1146 * Also free the linked-list reserved entry, hence the "+ 1".
1148 for (i
= 0; i
< mlx5_wq_ll_missing(wq
) + 1; i
++) {
1149 rq
->dealloc_wqe(rq
, head
);
1150 head
= mlx5_wq_ll_get_wqe_next_ix(wq
, head
);
1153 if (test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
)) {
1156 len
= (rq
->mpwqe
.shampo
->pi
- rq
->mpwqe
.shampo
->ci
) &
1157 (rq
->mpwqe
.shampo
->hd_per_wq
- 1);
1158 mlx5e_shampo_dealloc_hd(rq
, len
, rq
->mpwqe
.shampo
->ci
, false);
1159 rq
->mpwqe
.shampo
->pi
= rq
->mpwqe
.shampo
->ci
;
1162 rq
->mpwqe
.actual_wq_head
= wq
->head
;
1163 rq
->mpwqe
.umr_in_progress
= 0;
1164 rq
->mpwqe
.umr_completed
= 0;
1167 void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
1172 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
1173 struct mlx5_wq_ll
*wq
= &rq
->mpwqe
.wq
;
1175 mlx5e_free_rx_missing_descs(rq
);
1177 while (!mlx5_wq_ll_is_empty(wq
)) {
1178 struct mlx5e_rx_wqe_ll
*wqe
;
1180 wqe_ix_be
= *wq
->tail_next
;
1181 wqe_ix
= be16_to_cpu(wqe_ix_be
);
1182 wqe
= mlx5_wq_ll_get_wqe(wq
, wqe_ix
);
1183 rq
->dealloc_wqe(rq
, wqe_ix
);
1184 mlx5_wq_ll_pop(wq
, wqe_ix_be
,
1185 &wqe
->next
.next_wqe_index
);
1188 if (test_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
))
1189 mlx5e_shampo_dealloc_hd(rq
, rq
->mpwqe
.shampo
->hd_per_wq
,
1192 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
1193 u16 missing
= mlx5_wq_cyc_missing(wq
);
1194 u16 head
= mlx5_wq_cyc_get_head(wq
);
1196 while (!mlx5_wq_cyc_is_empty(wq
)) {
1197 wqe_ix
= mlx5_wq_cyc_get_tail(wq
);
1198 rq
->dealloc_wqe(rq
, wqe_ix
);
1199 mlx5_wq_cyc_pop(wq
);
1201 /* Missing slots might also contain unreleased pages due to
1205 wqe_ix
= mlx5_wq_cyc_ctr2ix(wq
, head
++);
1206 rq
->dealloc_wqe(rq
, wqe_ix
);
1212 int mlx5e_open_rq(struct mlx5e_params
*params
, struct mlx5e_rq_param
*param
,
1213 struct mlx5e_xsk_param
*xsk
, int node
,
1214 struct mlx5e_rq
*rq
)
1216 struct mlx5_core_dev
*mdev
= rq
->mdev
;
1219 if (params
->packet_merge
.type
== MLX5E_PACKET_MERGE_SHAMPO
)
1220 __set_bit(MLX5E_RQ_STATE_SHAMPO
, &rq
->state
);
1222 err
= mlx5e_alloc_rq(params
, xsk
, param
, node
, rq
);
1226 err
= mlx5e_create_rq(rq
, param
);
1230 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
1232 goto err_destroy_rq
;
1234 if (MLX5_CAP_ETH(mdev
, cqe_checksum_full
))
1235 __set_bit(MLX5E_RQ_STATE_CSUM_FULL
, &rq
->state
);
1237 if (params
->rx_dim_enabled
)
1238 __set_bit(MLX5E_RQ_STATE_DIM
, &rq
->state
);
1240 /* We disable csum_complete when XDP is enabled since
1241 * XDP programs might manipulate packets which will render
1242 * skb->checksum incorrect.
1244 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
) || params
->xdp_prog
)
1245 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE
, &rq
->state
);
1247 /* For CQE compression on striding RQ, use stride index provided by
1248 * HW if capability is supported.
1250 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
) &&
1251 MLX5_CAP_GEN(mdev
, mini_cqe_resp_stride_index
))
1252 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX
, &rq
->state
);
1254 /* For enhanced CQE compression packet processing. decompress
1255 * session according to the enhanced layout.
1257 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
) &&
1258 MLX5_CAP_GEN(mdev
, enhanced_cqe_compression
))
1259 __set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED
, &rq
->state
);
1264 mlx5e_destroy_rq(rq
);
1271 void mlx5e_activate_rq(struct mlx5e_rq
*rq
)
1273 set_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
1276 void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
)
1278 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
1279 synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
1282 void mlx5e_close_rq(struct mlx5e_rq
*rq
)
1284 cancel_work_sync(&rq
->dim
.work
);
1285 cancel_work_sync(&rq
->recover_work
);
1286 mlx5e_destroy_rq(rq
);
1287 mlx5e_free_rx_descs(rq
);
1291 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq
*sq
)
1293 kvfree(sq
->db
.xdpi_fifo
.xi
);
1294 kvfree(sq
->db
.wqe_info
);
1297 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq
*sq
, int numa
)
1299 struct mlx5e_xdp_info_fifo
*xdpi_fifo
= &sq
->db
.xdpi_fifo
;
1300 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1304 /* upper bound for maximum num of entries of all xmit_modes. */
1305 entries
= roundup_pow_of_two(wq_sz
* MLX5_SEND_WQEBB_NUM_DS
*
1306 MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO
);
1308 size
= array_size(sizeof(*xdpi_fifo
->xi
), entries
);
1309 xdpi_fifo
->xi
= kvzalloc_node(size
, GFP_KERNEL
, numa
);
1313 xdpi_fifo
->pc
= &sq
->xdpi_fifo_pc
;
1314 xdpi_fifo
->cc
= &sq
->xdpi_fifo_cc
;
1315 xdpi_fifo
->mask
= entries
- 1;
1320 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq
*sq
, int numa
)
1322 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1326 size
= array_size(sizeof(*sq
->db
.wqe_info
), wq_sz
);
1327 sq
->db
.wqe_info
= kvzalloc_node(size
, GFP_KERNEL
, numa
);
1328 if (!sq
->db
.wqe_info
)
1331 err
= mlx5e_alloc_xdpsq_fifo(sq
, numa
);
1333 mlx5e_free_xdpsq_db(sq
);
1340 static int mlx5e_alloc_xdpsq(struct mlx5e_channel
*c
,
1341 struct mlx5e_params
*params
,
1342 struct xsk_buff_pool
*xsk_pool
,
1343 struct mlx5e_sq_param
*param
,
1344 struct mlx5e_xdpsq
*sq
,
1347 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1348 struct mlx5_core_dev
*mdev
= c
->mdev
;
1349 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1353 sq
->mkey_be
= c
->mkey_be
;
1355 sq
->uar_map
= mdev
->mlx5e_res
.hw_objs
.bfreg
.map
;
1356 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1357 sq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
) - ETH_FCS_LEN
;
1358 sq
->xsk_pool
= xsk_pool
;
1360 sq
->stats
= sq
->xsk_pool
?
1361 &c
->priv
->channel_stats
[c
->ix
]->xsksq
:
1363 &c
->priv
->channel_stats
[c
->ix
]->xdpsq
:
1364 &c
->priv
->channel_stats
[c
->ix
]->rq_xdpsq
;
1365 sq
->stop_room
= param
->is_mpw
? mlx5e_stop_room_for_mpwqe(mdev
) :
1366 mlx5e_stop_room_for_max_wqe(mdev
);
1367 sq
->max_sq_mpw_wqebbs
= mlx5e_get_max_sq_aligned_wqebbs(mdev
);
1369 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1370 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1373 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1375 err
= mlx5e_alloc_xdpsq_db(sq
, cpu_to_node(c
->cpu
));
1377 goto err_sq_wq_destroy
;
1382 mlx5_wq_destroy(&sq
->wq_ctrl
);
1387 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq
*sq
)
1389 mlx5e_free_xdpsq_db(sq
);
1390 mlx5_wq_destroy(&sq
->wq_ctrl
);
1393 static void mlx5e_free_icosq_db(struct mlx5e_icosq
*sq
)
1395 kvfree(sq
->db
.wqe_info
);
1398 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq
*sq
, int numa
)
1400 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1403 size
= array_size(wq_sz
, sizeof(*sq
->db
.wqe_info
));
1404 sq
->db
.wqe_info
= kvzalloc_node(size
, GFP_KERNEL
, numa
);
1405 if (!sq
->db
.wqe_info
)
1411 static void mlx5e_icosq_err_cqe_work(struct work_struct
*recover_work
)
1413 struct mlx5e_icosq
*sq
= container_of(recover_work
, struct mlx5e_icosq
,
1416 mlx5e_reporter_icosq_cqe_err(sq
);
1419 static void mlx5e_async_icosq_err_cqe_work(struct work_struct
*recover_work
)
1421 struct mlx5e_icosq
*sq
= container_of(recover_work
, struct mlx5e_icosq
,
1424 /* Not implemented yet. */
1426 netdev_warn(sq
->channel
->netdev
, "async_icosq recovery is not implemented\n");
1429 static int mlx5e_alloc_icosq(struct mlx5e_channel
*c
,
1430 struct mlx5e_sq_param
*param
,
1431 struct mlx5e_icosq
*sq
,
1432 work_func_t recover_work_func
)
1434 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1435 struct mlx5_core_dev
*mdev
= c
->mdev
;
1436 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1440 sq
->uar_map
= mdev
->mlx5e_res
.hw_objs
.bfreg
.map
;
1441 sq
->reserved_room
= param
->stop_room
;
1443 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1444 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1447 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1449 err
= mlx5e_alloc_icosq_db(sq
, cpu_to_node(c
->cpu
));
1451 goto err_sq_wq_destroy
;
1453 INIT_WORK(&sq
->recover_work
, recover_work_func
);
1458 mlx5_wq_destroy(&sq
->wq_ctrl
);
1463 static void mlx5e_free_icosq(struct mlx5e_icosq
*sq
)
1465 mlx5e_free_icosq_db(sq
);
1466 mlx5_wq_destroy(&sq
->wq_ctrl
);
1469 void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
)
1471 kvfree(sq
->db
.wqe_info
);
1472 kvfree(sq
->db
.skb_fifo
.fifo
);
1473 kvfree(sq
->db
.dma_fifo
);
1476 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
)
1478 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1479 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1481 sq
->db
.dma_fifo
= kvzalloc_node(array_size(df_sz
,
1482 sizeof(*sq
->db
.dma_fifo
)),
1484 sq
->db
.skb_fifo
.fifo
= kvzalloc_node(array_size(df_sz
,
1485 sizeof(*sq
->db
.skb_fifo
.fifo
)),
1487 sq
->db
.wqe_info
= kvzalloc_node(array_size(wq_sz
,
1488 sizeof(*sq
->db
.wqe_info
)),
1490 if (!sq
->db
.dma_fifo
|| !sq
->db
.skb_fifo
.fifo
|| !sq
->db
.wqe_info
) {
1491 mlx5e_free_txqsq_db(sq
);
1495 sq
->dma_fifo_mask
= df_sz
- 1;
1497 sq
->db
.skb_fifo
.pc
= &sq
->skb_fifo_pc
;
1498 sq
->db
.skb_fifo
.cc
= &sq
->skb_fifo_cc
;
1499 sq
->db
.skb_fifo
.mask
= df_sz
- 1;
1504 static int mlx5e_alloc_txqsq(struct mlx5e_channel
*c
,
1506 struct mlx5e_params
*params
,
1507 struct mlx5e_sq_param
*param
,
1508 struct mlx5e_txqsq
*sq
,
1511 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1512 struct mlx5_core_dev
*mdev
= c
->mdev
;
1513 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1517 sq
->clock
= &mdev
->clock
;
1518 sq
->mkey_be
= c
->mkey_be
;
1519 sq
->netdev
= c
->netdev
;
1524 sq
->txq_ix
= txq_ix
;
1525 sq
->uar_map
= mdev
->mlx5e_res
.hw_objs
.bfreg
.map
;
1526 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1527 sq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
1528 sq
->max_sq_mpw_wqebbs
= mlx5e_get_max_sq_aligned_wqebbs(mdev
);
1529 INIT_WORK(&sq
->recover_work
, mlx5e_tx_err_cqe_work
);
1530 if (!MLX5_CAP_ETH(mdev
, wqe_vlan_insert
))
1531 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE
, &sq
->state
);
1532 if (mlx5_ipsec_device_caps(c
->priv
->mdev
))
1533 set_bit(MLX5E_SQ_STATE_IPSEC
, &sq
->state
);
1535 set_bit(MLX5E_SQ_STATE_MPWQE
, &sq
->state
);
1536 sq
->stop_room
= param
->stop_room
;
1537 sq
->ptp_cyc2time
= mlx5_sq_ts_translator(mdev
);
1539 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1540 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1543 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1545 err
= mlx5e_alloc_txqsq_db(sq
, cpu_to_node(c
->cpu
));
1547 goto err_sq_wq_destroy
;
1549 INIT_WORK(&sq
->dim
.work
, mlx5e_tx_dim_work
);
1550 sq
->dim
.mode
= params
->tx_cq_moderation
.cq_period_mode
;
1555 mlx5_wq_destroy(&sq
->wq_ctrl
);
1560 void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
)
1562 mlx5e_free_txqsq_db(sq
);
1563 mlx5_wq_destroy(&sq
->wq_ctrl
);
1566 static int mlx5e_create_sq(struct mlx5_core_dev
*mdev
,
1567 struct mlx5e_sq_param
*param
,
1568 struct mlx5e_create_sq_param
*csp
,
1578 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
1579 sizeof(u64
) * csp
->wq_ctrl
->buf
.npages
;
1580 in
= kvzalloc(inlen
, GFP_KERNEL
);
1584 ts_format
= mlx5_is_real_time_sq(mdev
) ?
1585 MLX5_TIMESTAMP_FORMAT_REAL_TIME
:
1586 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING
;
1587 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1588 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1590 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
1591 MLX5_SET(sqc
, sqc
, tis_lst_sz
, csp
->tis_lst_sz
);
1592 MLX5_SET(sqc
, sqc
, tis_num_0
, csp
->tisn
);
1593 MLX5_SET(sqc
, sqc
, cqn
, csp
->cqn
);
1594 MLX5_SET(sqc
, sqc
, ts_cqe_to_dest_cqn
, csp
->ts_cqe_to_dest_cqn
);
1595 MLX5_SET(sqc
, sqc
, ts_format
, ts_format
);
1598 if (MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
1599 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, csp
->min_inline_mode
);
1601 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1602 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1604 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1605 MLX5_SET(wq
, wq
, uar_page
, mdev
->mlx5e_res
.hw_objs
.bfreg
.index
);
1606 MLX5_SET(wq
, wq
, log_wq_pg_sz
, csp
->wq_ctrl
->buf
.page_shift
-
1607 MLX5_ADAPTER_PAGE_SHIFT
);
1608 MLX5_SET64(wq
, wq
, dbr_addr
, csp
->wq_ctrl
->db
.dma
);
1610 mlx5_fill_page_frag_array(&csp
->wq_ctrl
->buf
,
1611 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1613 err
= mlx5_core_create_sq(mdev
, in
, inlen
, sqn
);
1620 int mlx5e_modify_sq(struct mlx5_core_dev
*mdev
, u32 sqn
,
1621 struct mlx5e_modify_sq_param
*p
)
1629 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
1630 in
= kvzalloc(inlen
, GFP_KERNEL
);
1634 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
1636 MLX5_SET(modify_sq_in
, in
, sq_state
, p
->curr_state
);
1637 MLX5_SET(sqc
, sqc
, state
, p
->next_state
);
1638 if (p
->rl_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1640 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, p
->rl_index
);
1642 if (p
->qos_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1644 MLX5_SET(sqc
, sqc
, qos_queue_group_id
, p
->qos_queue_group_id
);
1646 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, bitmask
);
1648 err
= mlx5_core_modify_sq(mdev
, sqn
, in
);
1655 static void mlx5e_destroy_sq(struct mlx5_core_dev
*mdev
, u32 sqn
)
1657 mlx5_core_destroy_sq(mdev
, sqn
);
1660 int mlx5e_create_sq_rdy(struct mlx5_core_dev
*mdev
,
1661 struct mlx5e_sq_param
*param
,
1662 struct mlx5e_create_sq_param
*csp
,
1663 u16 qos_queue_group_id
,
1666 struct mlx5e_modify_sq_param msp
= {0};
1669 err
= mlx5e_create_sq(mdev
, param
, csp
, sqn
);
1673 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1674 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1675 if (qos_queue_group_id
) {
1676 msp
.qos_update
= true;
1677 msp
.qos_queue_group_id
= qos_queue_group_id
;
1679 err
= mlx5e_modify_sq(mdev
, *sqn
, &msp
);
1681 mlx5e_destroy_sq(mdev
, *sqn
);
1686 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1687 struct mlx5e_txqsq
*sq
, u32 rate
);
1689 int mlx5e_open_txqsq(struct mlx5e_channel
*c
, u32 tisn
, int txq_ix
,
1690 struct mlx5e_params
*params
, struct mlx5e_sq_param
*param
,
1691 struct mlx5e_txqsq
*sq
, int tc
, u16 qos_queue_group_id
,
1692 struct mlx5e_sq_stats
*sq_stats
)
1694 struct mlx5e_create_sq_param csp
= {};
1698 err
= mlx5e_alloc_txqsq(c
, txq_ix
, params
, param
, sq
, tc
);
1702 sq
->stats
= sq_stats
;
1706 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1707 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1708 csp
.min_inline_mode
= sq
->min_inline_mode
;
1709 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, qos_queue_group_id
, &sq
->sqn
);
1711 goto err_free_txqsq
;
1713 tx_rate
= c
->priv
->tx_rates
[sq
->txq_ix
];
1715 mlx5e_set_sq_maxrate(c
->netdev
, sq
, tx_rate
);
1717 if (params
->tx_dim_enabled
)
1718 sq
->state
|= BIT(MLX5E_SQ_STATE_DIM
);
1723 mlx5e_free_txqsq(sq
);
1728 void mlx5e_activate_txqsq(struct mlx5e_txqsq
*sq
)
1730 sq
->txq
= netdev_get_tx_queue(sq
->netdev
, sq
->txq_ix
);
1731 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1732 netdev_tx_reset_queue(sq
->txq
);
1733 netif_tx_start_queue(sq
->txq
);
1736 void mlx5e_tx_disable_queue(struct netdev_queue
*txq
)
1738 __netif_tx_lock_bh(txq
);
1739 netif_tx_stop_queue(txq
);
1740 __netif_tx_unlock_bh(txq
);
1743 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq
*sq
)
1745 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1747 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1748 synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
1750 mlx5e_tx_disable_queue(sq
->txq
);
1752 /* last doorbell out, godspeed .. */
1753 if (mlx5e_wqc_has_room_for(wq
, sq
->cc
, sq
->pc
, 1)) {
1754 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
1755 struct mlx5e_tx_wqe
*nop
;
1757 sq
->db
.wqe_info
[pi
] = (struct mlx5e_tx_wqe_info
) {
1761 nop
= mlx5e_post_nop(wq
, sq
->sqn
, &sq
->pc
);
1762 mlx5e_notify_hw(wq
, sq
->pc
, sq
->uar_map
, &nop
->ctrl
);
1766 void mlx5e_close_txqsq(struct mlx5e_txqsq
*sq
)
1768 struct mlx5_core_dev
*mdev
= sq
->mdev
;
1769 struct mlx5_rate_limit rl
= {0};
1771 cancel_work_sync(&sq
->dim
.work
);
1772 cancel_work_sync(&sq
->recover_work
);
1773 mlx5e_destroy_sq(mdev
, sq
->sqn
);
1774 if (sq
->rate_limit
) {
1775 rl
.rate
= sq
->rate_limit
;
1776 mlx5_rl_remove_rate(mdev
, &rl
);
1778 mlx5e_free_txqsq_descs(sq
);
1779 mlx5e_free_txqsq(sq
);
1782 void mlx5e_tx_err_cqe_work(struct work_struct
*recover_work
)
1784 struct mlx5e_txqsq
*sq
= container_of(recover_work
, struct mlx5e_txqsq
,
1787 mlx5e_reporter_tx_err_cqe(sq
);
1790 static int mlx5e_open_icosq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
1791 struct mlx5e_sq_param
*param
, struct mlx5e_icosq
*sq
,
1792 work_func_t recover_work_func
)
1794 struct mlx5e_create_sq_param csp
= {};
1797 err
= mlx5e_alloc_icosq(c
, param
, sq
, recover_work_func
);
1801 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1802 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1803 csp
.min_inline_mode
= params
->tx_min_inline_mode
;
1804 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, 0, &sq
->sqn
);
1806 goto err_free_icosq
;
1808 if (param
->is_tls
) {
1809 sq
->ktls_resync
= mlx5e_ktls_rx_resync_create_resp_list();
1810 if (IS_ERR(sq
->ktls_resync
)) {
1811 err
= PTR_ERR(sq
->ktls_resync
);
1812 goto err_destroy_icosq
;
1818 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1820 mlx5e_free_icosq(sq
);
1825 void mlx5e_activate_icosq(struct mlx5e_icosq
*icosq
)
1827 set_bit(MLX5E_SQ_STATE_ENABLED
, &icosq
->state
);
1830 void mlx5e_deactivate_icosq(struct mlx5e_icosq
*icosq
)
1832 clear_bit(MLX5E_SQ_STATE_ENABLED
, &icosq
->state
);
1833 synchronize_net(); /* Sync with NAPI. */
1836 static void mlx5e_close_icosq(struct mlx5e_icosq
*sq
)
1838 struct mlx5e_channel
*c
= sq
->channel
;
1840 if (sq
->ktls_resync
)
1841 mlx5e_ktls_rx_resync_destroy_resp_list(sq
->ktls_resync
);
1842 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1843 mlx5e_free_icosq_descs(sq
);
1844 mlx5e_free_icosq(sq
);
1847 int mlx5e_open_xdpsq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
1848 struct mlx5e_sq_param
*param
, struct xsk_buff_pool
*xsk_pool
,
1849 struct mlx5e_xdpsq
*sq
, bool is_redirect
)
1851 struct mlx5e_create_sq_param csp
= {};
1854 err
= mlx5e_alloc_xdpsq(c
, params
, xsk_pool
, param
, sq
, is_redirect
);
1859 csp
.tisn
= c
->priv
->tisn
[c
->lag_port
][0]; /* tc = 0 */
1860 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1861 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1862 csp
.min_inline_mode
= sq
->min_inline_mode
;
1863 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1865 if (param
->is_xdp_mb
)
1866 set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF
, &sq
->state
);
1868 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, 0, &sq
->sqn
);
1870 goto err_free_xdpsq
;
1872 mlx5e_set_xmit_fp(sq
, param
->is_mpw
);
1874 if (!param
->is_mpw
&& !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF
, &sq
->state
)) {
1875 unsigned int ds_cnt
= MLX5E_TX_WQE_EMPTY_DS_COUNT
+ 1;
1876 unsigned int inline_hdr_sz
= 0;
1879 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
1880 inline_hdr_sz
= MLX5E_XDP_MIN_INLINE
;
1884 /* Pre initialize fixed WQE fields */
1885 for (i
= 0; i
< mlx5_wq_cyc_get_size(&sq
->wq
); i
++) {
1886 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, i
);
1887 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
1888 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
1890 sq
->db
.wqe_info
[i
] = (struct mlx5e_xdp_wqe_info
) {
1895 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
1896 eseg
->inline_hdr
.sz
= cpu_to_be16(inline_hdr_sz
);
1903 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1904 mlx5e_free_xdpsq(sq
);
1909 void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
)
1911 struct mlx5e_channel
*c
= sq
->channel
;
1913 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1914 synchronize_net(); /* Sync with NAPI. */
1916 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1917 mlx5e_free_xdpsq_descs(sq
);
1918 mlx5e_free_xdpsq(sq
);
1921 static int mlx5e_alloc_cq_common(struct mlx5e_priv
*priv
,
1922 struct mlx5e_cq_param
*param
,
1923 struct mlx5e_cq
*cq
)
1925 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1926 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1930 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1936 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1937 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1938 *mcq
->set_ci_db
= 0;
1940 mcq
->vector
= param
->eq_ix
;
1941 mcq
->comp
= mlx5e_completion_event
;
1942 mcq
->event
= mlx5e_cq_error_event
;
1944 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
1945 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
1948 cqe
->validity_iteration_count
= 0xff;
1952 cq
->netdev
= priv
->netdev
;
1958 static int mlx5e_alloc_cq(struct mlx5e_priv
*priv
,
1959 struct mlx5e_cq_param
*param
,
1960 struct mlx5e_create_cq_param
*ccp
,
1961 struct mlx5e_cq
*cq
)
1965 param
->wq
.buf_numa_node
= ccp
->node
;
1966 param
->wq
.db_numa_node
= ccp
->node
;
1967 param
->eq_ix
= ccp
->ix
;
1969 err
= mlx5e_alloc_cq_common(priv
, param
, cq
);
1971 cq
->napi
= ccp
->napi
;
1972 cq
->ch_stats
= ccp
->ch_stats
;
1977 static void mlx5e_free_cq(struct mlx5e_cq
*cq
)
1979 mlx5_wq_destroy(&cq
->wq_ctrl
);
1982 static int mlx5e_create_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
1984 u32 out
[MLX5_ST_SZ_DW(create_cq_out
)];
1985 struct mlx5_core_dev
*mdev
= cq
->mdev
;
1986 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1994 err
= mlx5_comp_eqn_get(mdev
, param
->eq_ix
, &eqn
);
1998 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
1999 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
2000 in
= kvzalloc(inlen
, GFP_KERNEL
);
2004 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
2006 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
2008 mlx5_fill_page_frag_array(&cq
->wq_ctrl
.buf
,
2009 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
2011 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
2012 MLX5_SET(cqc
, cqc
, c_eqn_or_apu_element
, eqn
);
2013 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
2014 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
2015 MLX5_ADAPTER_PAGE_SHIFT
);
2016 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
2018 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
, out
, sizeof(out
));
2030 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
2032 mlx5_core_destroy_cq(cq
->mdev
, &cq
->mcq
);
2035 int mlx5e_open_cq(struct mlx5e_priv
*priv
, struct dim_cq_moder moder
,
2036 struct mlx5e_cq_param
*param
, struct mlx5e_create_cq_param
*ccp
,
2037 struct mlx5e_cq
*cq
)
2039 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2042 err
= mlx5e_alloc_cq(priv
, param
, ccp
, cq
);
2046 err
= mlx5e_create_cq(cq
, param
);
2050 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
2051 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
, moder
.usec
, moder
.pkts
);
2060 void mlx5e_close_cq(struct mlx5e_cq
*cq
)
2062 mlx5e_destroy_cq(cq
);
2066 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
2067 struct mlx5e_params
*params
,
2068 struct mlx5e_create_cq_param
*ccp
,
2069 struct mlx5e_channel_param
*cparam
)
2074 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
2075 err
= mlx5e_open_cq(c
->priv
, params
->tx_cq_moderation
, &cparam
->txq_sq
.cqp
,
2076 ccp
, &c
->sq
[tc
].cq
);
2078 goto err_close_tx_cqs
;
2084 for (tc
--; tc
>= 0; tc
--)
2085 mlx5e_close_cq(&c
->sq
[tc
].cq
);
2090 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
2094 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2095 mlx5e_close_cq(&c
->sq
[tc
].cq
);
2098 static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq
*tc_to_txq
, unsigned int txq
)
2102 for (tc
= 0; tc
< TC_MAX_QUEUE
; tc
++)
2103 if (txq
- tc_to_txq
[tc
].offset
< tc_to_txq
[tc
].count
)
2106 WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq
);
2110 static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params
*params
, int txq_ix
,
2115 if (params
->mqprio
.mode
!= TC_MQPRIO_MODE_CHANNEL
) {
2120 tc
= mlx5e_mqprio_txq_to_tc(params
->mqprio
.tc_to_txq
, txq_ix
);
2124 if (tc
>= params
->mqprio
.num_tc
) {
2125 WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
2126 tc
, params
->mqprio
.num_tc
);
2130 *hw_id
= params
->mqprio
.channel
.hw_id
[tc
];
2134 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
2135 struct mlx5e_params
*params
,
2136 struct mlx5e_channel_param
*cparam
)
2140 for (tc
= 0; tc
< mlx5e_get_dcb_num_tc(params
); tc
++) {
2141 int txq_ix
= c
->ix
+ tc
* params
->num_channels
;
2142 u32 qos_queue_group_id
;
2144 err
= mlx5e_txq_get_qos_node_hw_id(params
, txq_ix
, &qos_queue_group_id
);
2148 err
= mlx5e_open_txqsq(c
, c
->priv
->tisn
[c
->lag_port
][tc
], txq_ix
,
2149 params
, &cparam
->txq_sq
, &c
->sq
[tc
], tc
,
2151 &c
->priv
->channel_stats
[c
->ix
]->sq
[tc
]);
2159 for (tc
--; tc
>= 0; tc
--)
2160 mlx5e_close_txqsq(&c
->sq
[tc
]);
2165 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
2169 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2170 mlx5e_close_txqsq(&c
->sq
[tc
]);
2173 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
2174 struct mlx5e_txqsq
*sq
, u32 rate
)
2176 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2177 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2178 struct mlx5e_modify_sq_param msp
= {0};
2179 struct mlx5_rate_limit rl
= {0};
2183 if (rate
== sq
->rate_limit
)
2187 if (sq
->rate_limit
) {
2188 rl
.rate
= sq
->rate_limit
;
2189 /* remove current rl index to free space to next ones */
2190 mlx5_rl_remove_rate(mdev
, &rl
);
2197 err
= mlx5_rl_add_rate(mdev
, &rl_index
, &rl
);
2199 netdev_err(dev
, "Failed configuring rate %u: %d\n",
2205 msp
.curr_state
= MLX5_SQC_STATE_RDY
;
2206 msp
.next_state
= MLX5_SQC_STATE_RDY
;
2207 msp
.rl_index
= rl_index
;
2208 msp
.rl_update
= true;
2209 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
2211 netdev_err(dev
, "Failed configuring rate %u: %d\n",
2213 /* remove the rate from the table */
2215 mlx5_rl_remove_rate(mdev
, &rl
);
2219 sq
->rate_limit
= rate
;
2223 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
2225 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2226 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2227 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[index
];
2230 if (!mlx5_rl_is_supported(mdev
)) {
2231 netdev_err(dev
, "Rate limiting is not supported on this device\n");
2235 /* rate is given in Mb/sec, HW config is in Kb/sec */
2238 /* Check whether rate in valid range, 0 is always valid */
2239 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
2240 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
2244 mutex_lock(&priv
->state_lock
);
2245 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2246 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
2248 priv
->tx_rates
[index
] = rate
;
2249 mutex_unlock(&priv
->state_lock
);
2254 static int mlx5e_open_rxq_rq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
2255 struct mlx5e_rq_param
*rq_params
)
2259 err
= mlx5e_init_rxq_rq(c
, params
, rq_params
->xdp_frag_size
, &c
->rq
);
2263 return mlx5e_open_rq(params
, rq_params
, NULL
, cpu_to_node(c
->cpu
), &c
->rq
);
2266 static int mlx5e_open_queues(struct mlx5e_channel
*c
,
2267 struct mlx5e_params
*params
,
2268 struct mlx5e_channel_param
*cparam
)
2270 struct dim_cq_moder icocq_moder
= {0, 0};
2271 struct mlx5e_create_cq_param ccp
;
2274 mlx5e_build_create_cq_param(&ccp
, c
);
2276 err
= mlx5e_open_cq(c
->priv
, icocq_moder
, &cparam
->async_icosq
.cqp
, &ccp
,
2277 &c
->async_icosq
.cq
);
2281 err
= mlx5e_open_cq(c
->priv
, icocq_moder
, &cparam
->icosq
.cqp
, &ccp
,
2284 goto err_close_async_icosq_cq
;
2286 err
= mlx5e_open_tx_cqs(c
, params
, &ccp
, cparam
);
2288 goto err_close_icosq_cq
;
2290 err
= mlx5e_open_cq(c
->priv
, params
->tx_cq_moderation
, &cparam
->xdp_sq
.cqp
, &ccp
,
2293 goto err_close_tx_cqs
;
2295 err
= mlx5e_open_cq(c
->priv
, params
->rx_cq_moderation
, &cparam
->rq
.cqp
, &ccp
,
2298 goto err_close_xdp_tx_cqs
;
2300 err
= c
->xdp
? mlx5e_open_cq(c
->priv
, params
->tx_cq_moderation
, &cparam
->xdp_sq
.cqp
,
2301 &ccp
, &c
->rq_xdpsq
.cq
) : 0;
2303 goto err_close_rx_cq
;
2305 spin_lock_init(&c
->async_icosq_lock
);
2307 err
= mlx5e_open_icosq(c
, params
, &cparam
->async_icosq
, &c
->async_icosq
,
2308 mlx5e_async_icosq_err_cqe_work
);
2310 goto err_close_xdpsq_cq
;
2312 mutex_init(&c
->icosq_recovery_lock
);
2314 err
= mlx5e_open_icosq(c
, params
, &cparam
->icosq
, &c
->icosq
,
2315 mlx5e_icosq_err_cqe_work
);
2317 goto err_close_async_icosq
;
2319 err
= mlx5e_open_sqs(c
, params
, cparam
);
2321 goto err_close_icosq
;
2323 err
= mlx5e_open_rxq_rq(c
, params
, &cparam
->rq
);
2328 err
= mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, NULL
,
2329 &c
->rq_xdpsq
, false);
2334 err
= mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, NULL
, &c
->xdpsq
, true);
2336 goto err_close_xdp_sq
;
2342 mlx5e_close_xdpsq(&c
->rq_xdpsq
);
2345 mlx5e_close_rq(&c
->rq
);
2351 mlx5e_close_icosq(&c
->icosq
);
2353 err_close_async_icosq
:
2354 mlx5e_close_icosq(&c
->async_icosq
);
2358 mlx5e_close_cq(&c
->rq_xdpsq
.cq
);
2361 mlx5e_close_cq(&c
->rq
.cq
);
2363 err_close_xdp_tx_cqs
:
2364 mlx5e_close_cq(&c
->xdpsq
.cq
);
2367 mlx5e_close_tx_cqs(c
);
2370 mlx5e_close_cq(&c
->icosq
.cq
);
2372 err_close_async_icosq_cq
:
2373 mlx5e_close_cq(&c
->async_icosq
.cq
);
2378 static void mlx5e_close_queues(struct mlx5e_channel
*c
)
2380 mlx5e_close_xdpsq(&c
->xdpsq
);
2382 mlx5e_close_xdpsq(&c
->rq_xdpsq
);
2383 /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
2384 cancel_work_sync(&c
->icosq
.recover_work
);
2385 mlx5e_close_rq(&c
->rq
);
2387 mlx5e_close_icosq(&c
->icosq
);
2388 mutex_destroy(&c
->icosq_recovery_lock
);
2389 mlx5e_close_icosq(&c
->async_icosq
);
2391 mlx5e_close_cq(&c
->rq_xdpsq
.cq
);
2392 mlx5e_close_cq(&c
->rq
.cq
);
2393 mlx5e_close_cq(&c
->xdpsq
.cq
);
2394 mlx5e_close_tx_cqs(c
);
2395 mlx5e_close_cq(&c
->icosq
.cq
);
2396 mlx5e_close_cq(&c
->async_icosq
.cq
);
2399 static u8
mlx5e_enumerate_lag_port(struct mlx5_core_dev
*mdev
, int ix
)
2401 u16 port_aff_bias
= mlx5_core_is_pf(mdev
) ? 0 : MLX5_CAP_GEN(mdev
, vhca_id
);
2403 return (ix
+ port_aff_bias
) % mlx5e_get_num_lag_ports(mdev
);
2406 static int mlx5e_channel_stats_alloc(struct mlx5e_priv
*priv
, int ix
, int cpu
)
2408 if (ix
> priv
->stats_nch
) {
2409 netdev_warn(priv
->netdev
, "Unexpected channel stats index %d > %d\n", ix
,
2414 if (priv
->channel_stats
[ix
])
2417 /* Asymmetric dynamic memory allocation.
2418 * Freed in mlx5e_priv_arrays_free, not on channel closure.
2420 netdev_dbg(priv
->netdev
, "Creating channel stats %d\n", ix
);
2421 priv
->channel_stats
[ix
] = kvzalloc_node(sizeof(**priv
->channel_stats
),
2422 GFP_KERNEL
, cpu_to_node(cpu
));
2423 if (!priv
->channel_stats
[ix
])
2430 void mlx5e_trigger_napi_icosq(struct mlx5e_channel
*c
)
2432 spin_lock_bh(&c
->async_icosq_lock
);
2433 mlx5e_trigger_irq(&c
->async_icosq
);
2434 spin_unlock_bh(&c
->async_icosq_lock
);
2437 void mlx5e_trigger_napi_sched(struct napi_struct
*napi
)
2440 napi_schedule(napi
);
2444 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
2445 struct mlx5e_params
*params
,
2446 struct mlx5e_channel_param
*cparam
,
2447 struct xsk_buff_pool
*xsk_pool
,
2448 struct mlx5e_channel
**cp
)
2450 int cpu
= mlx5_comp_vector_get_cpu(priv
->mdev
, ix
);
2451 struct net_device
*netdev
= priv
->netdev
;
2452 struct mlx5e_xsk_param xsk
;
2453 struct mlx5e_channel
*c
;
2457 err
= mlx5_comp_irqn_get(priv
->mdev
, ix
, &irq
);
2461 err
= mlx5e_channel_stats_alloc(priv
, ix
, cpu
);
2465 c
= kvzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
2470 c
->mdev
= priv
->mdev
;
2471 c
->tstamp
= &priv
->tstamp
;
2474 c
->pdev
= mlx5_core_dma_dev(priv
->mdev
);
2475 c
->netdev
= priv
->netdev
;
2476 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.hw_objs
.mkey
);
2477 c
->num_tc
= mlx5e_get_dcb_num_tc(params
);
2478 c
->xdp
= !!params
->xdp_prog
;
2479 c
->stats
= &priv
->channel_stats
[ix
]->ch
;
2480 c
->aff_mask
= irq_get_effective_affinity_mask(irq
);
2481 c
->lag_port
= mlx5e_enumerate_lag_port(priv
->mdev
, ix
);
2483 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
);
2485 err
= mlx5e_open_queues(c
, params
, cparam
);
2490 mlx5e_build_xsk_param(xsk_pool
, &xsk
);
2491 err
= mlx5e_open_xsk(priv
, params
, &xsk
, xsk_pool
, c
);
2493 goto err_close_queues
;
2501 mlx5e_close_queues(c
);
2504 netif_napi_del(&c
->napi
);
2511 static void mlx5e_activate_channel(struct mlx5e_channel
*c
)
2515 napi_enable(&c
->napi
);
2517 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2518 mlx5e_activate_txqsq(&c
->sq
[tc
]);
2519 mlx5e_activate_icosq(&c
->icosq
);
2520 mlx5e_activate_icosq(&c
->async_icosq
);
2522 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
))
2523 mlx5e_activate_xsk(c
);
2525 mlx5e_activate_rq(&c
->rq
);
2528 static void mlx5e_deactivate_channel(struct mlx5e_channel
*c
)
2532 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
))
2533 mlx5e_deactivate_xsk(c
);
2535 mlx5e_deactivate_rq(&c
->rq
);
2537 mlx5e_deactivate_icosq(&c
->async_icosq
);
2538 mlx5e_deactivate_icosq(&c
->icosq
);
2539 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2540 mlx5e_deactivate_txqsq(&c
->sq
[tc
]);
2541 mlx5e_qos_deactivate_queues(c
);
2543 napi_disable(&c
->napi
);
2546 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
2548 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
))
2550 mlx5e_close_queues(c
);
2551 mlx5e_qos_close_queues(c
);
2552 netif_napi_del(&c
->napi
);
2557 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
2558 struct mlx5e_channels
*chs
)
2560 struct mlx5e_channel_param
*cparam
;
2564 chs
->num
= chs
->params
.num_channels
;
2566 chs
->c
= kcalloc(chs
->num
, sizeof(struct mlx5e_channel
*), GFP_KERNEL
);
2567 cparam
= kvzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
2568 if (!chs
->c
|| !cparam
)
2571 err
= mlx5e_build_channel_param(priv
->mdev
, &chs
->params
, priv
->q_counter
, cparam
);
2575 for (i
= 0; i
< chs
->num
; i
++) {
2576 struct xsk_buff_pool
*xsk_pool
= NULL
;
2578 if (chs
->params
.xdp_prog
)
2579 xsk_pool
= mlx5e_xsk_get_pool(&chs
->params
, chs
->params
.xsk
, i
);
2581 err
= mlx5e_open_channel(priv
, i
, &chs
->params
, cparam
, xsk_pool
, &chs
->c
[i
]);
2583 goto err_close_channels
;
2586 if (MLX5E_GET_PFLAG(&chs
->params
, MLX5E_PFLAG_TX_PORT_TS
) || chs
->params
.ptp_rx
) {
2587 err
= mlx5e_ptp_open(priv
, &chs
->params
, chs
->c
[0]->lag_port
, &chs
->ptp
);
2589 goto err_close_channels
;
2593 err
= mlx5e_qos_open_queues(priv
, chs
);
2598 mlx5e_health_channels_update(priv
);
2604 mlx5e_ptp_close(chs
->ptp
);
2607 for (i
--; i
>= 0; i
--)
2608 mlx5e_close_channel(chs
->c
[i
]);
2617 static void mlx5e_activate_channels(struct mlx5e_priv
*priv
, struct mlx5e_channels
*chs
)
2621 for (i
= 0; i
< chs
->num
; i
++)
2622 mlx5e_activate_channel(chs
->c
[i
]);
2625 mlx5e_qos_activate_queues(priv
);
2627 for (i
= 0; i
< chs
->num
; i
++)
2628 mlx5e_trigger_napi_icosq(chs
->c
[i
]);
2631 mlx5e_ptp_activate_channel(chs
->ptp
);
2634 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels
*chs
)
2639 for (i
= 0; i
< chs
->num
; i
++) {
2640 int timeout
= err
? 0 : MLX5E_RQ_WQES_TIMEOUT
;
2641 struct mlx5e_channel
*c
= chs
->c
[i
];
2643 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
))
2646 err
|= mlx5e_wait_for_min_rx_wqes(&c
->rq
, timeout
);
2648 /* Don't wait on the XSK RQ, because the newer xdpsock sample
2649 * doesn't provide any Fill Ring entries at the setup stage.
2653 return err
? -ETIMEDOUT
: 0;
2656 static void mlx5e_deactivate_channels(struct mlx5e_channels
*chs
)
2661 mlx5e_ptp_deactivate_channel(chs
->ptp
);
2663 for (i
= 0; i
< chs
->num
; i
++)
2664 mlx5e_deactivate_channel(chs
->c
[i
]);
2667 void mlx5e_close_channels(struct mlx5e_channels
*chs
)
2672 mlx5e_ptp_close(chs
->ptp
);
2675 for (i
= 0; i
< chs
->num
; i
++)
2676 mlx5e_close_channel(chs
->c
[i
]);
2682 static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv
*priv
)
2684 struct mlx5e_rx_res
*res
= priv
->rx_res
;
2686 return mlx5e_rx_res_packet_merge_set_param(res
, &priv
->channels
.params
.packet_merge
);
2689 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge
);
2691 static int mlx5e_set_mtu(struct mlx5_core_dev
*mdev
,
2692 struct mlx5e_params
*params
, u16 mtu
)
2694 u16 hw_mtu
= MLX5E_SW2HW_MTU(params
, mtu
);
2697 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
2701 /* Update vport context MTU */
2702 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
2706 static void mlx5e_query_mtu(struct mlx5_core_dev
*mdev
,
2707 struct mlx5e_params
*params
, u16
*mtu
)
2712 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
2713 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
2714 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
2716 *mtu
= MLX5E_HW2SW_MTU(params
, hw_mtu
);
2719 int mlx5e_set_dev_port_mtu(struct mlx5e_priv
*priv
)
2721 struct mlx5e_params
*params
= &priv
->channels
.params
;
2722 struct net_device
*netdev
= priv
->netdev
;
2723 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2727 err
= mlx5e_set_mtu(mdev
, params
, params
->sw_mtu
);
2731 mlx5e_query_mtu(mdev
, params
, &mtu
);
2732 if (mtu
!= params
->sw_mtu
)
2733 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
2734 __func__
, mtu
, params
->sw_mtu
);
2736 params
->sw_mtu
= mtu
;
2740 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu
);
2742 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv
*priv
)
2744 struct mlx5e_params
*params
= &priv
->channels
.params
;
2745 struct net_device
*netdev
= priv
->netdev
;
2746 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2749 /* MTU range: 68 - hw-specific max */
2750 netdev
->min_mtu
= ETH_MIN_MTU
;
2752 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
2753 netdev
->max_mtu
= min_t(unsigned int, MLX5E_HW2SW_MTU(params
, max_mtu
),
2757 static int mlx5e_netdev_set_tcs(struct net_device
*netdev
, u16 nch
, u8 ntc
,
2758 struct netdev_tc_txq
*tc_to_txq
)
2762 netdev_reset_tc(netdev
);
2767 err
= netdev_set_num_tc(netdev
, ntc
);
2769 netdev_WARN(netdev
, "netdev_set_num_tc failed (%d), ntc = %d\n", err
, ntc
);
2773 for (tc
= 0; tc
< ntc
; tc
++) {
2776 count
= tc_to_txq
[tc
].count
;
2777 offset
= tc_to_txq
[tc
].offset
;
2778 netdev_set_tc_queue(netdev
, tc
, count
, offset
);
2784 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv
*priv
)
2786 int nch
, ntc
, num_txqs
, err
;
2790 qos_queues
= mlx5e_htb_cur_leaf_nodes(priv
->htb
);
2792 nch
= priv
->channels
.params
.num_channels
;
2793 ntc
= mlx5e_get_dcb_num_tc(&priv
->channels
.params
);
2794 num_txqs
= nch
* ntc
+ qos_queues
;
2795 if (MLX5E_GET_PFLAG(&priv
->channels
.params
, MLX5E_PFLAG_TX_PORT_TS
))
2798 netdev_dbg(priv
->netdev
, "Setting num_txqs %d\n", num_txqs
);
2799 err
= netif_set_real_num_tx_queues(priv
->netdev
, num_txqs
);
2801 netdev_warn(priv
->netdev
, "netif_set_real_num_tx_queues failed, %d\n", err
);
2806 static int mlx5e_update_netdev_queues(struct mlx5e_priv
*priv
)
2808 struct netdev_tc_txq old_tc_to_txq
[TC_MAX_QUEUE
], *tc_to_txq
;
2809 struct net_device
*netdev
= priv
->netdev
;
2810 int old_num_txqs
, old_ntc
;
2815 old_num_txqs
= netdev
->real_num_tx_queues
;
2816 old_ntc
= netdev
->num_tc
? : 1;
2817 for (i
= 0; i
< ARRAY_SIZE(old_tc_to_txq
); i
++)
2818 old_tc_to_txq
[i
] = netdev
->tc_to_txq
[i
];
2820 nch
= priv
->channels
.params
.num_channels
;
2821 ntc
= priv
->channels
.params
.mqprio
.num_tc
;
2822 tc_to_txq
= priv
->channels
.params
.mqprio
.tc_to_txq
;
2824 err
= mlx5e_netdev_set_tcs(netdev
, nch
, ntc
, tc_to_txq
);
2827 err
= mlx5e_update_tx_netdev_queues(priv
);
2830 err
= netif_set_real_num_rx_queues(netdev
, nch
);
2832 netdev_warn(netdev
, "netif_set_real_num_rx_queues failed, %d\n", err
);
2839 /* netif_set_real_num_rx_queues could fail only when nch increased. Only
2840 * one of nch and ntc is changed in this function. That means, the call
2841 * to netif_set_real_num_tx_queues below should not fail, because it
2842 * decreases the number of TX queues.
2844 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev
, old_num_txqs
));
2847 WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev
, old_num_txqs
/ old_ntc
, old_ntc
,
2853 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues
);
2855 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv
*priv
,
2856 struct mlx5e_params
*params
)
2858 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2859 int num_comp_vectors
, ix
, irq
;
2861 num_comp_vectors
= mlx5_comp_vectors_max(mdev
);
2863 for (ix
= 0; ix
< params
->num_channels
; ix
++) {
2864 cpumask_clear(priv
->scratchpad
.cpumask
);
2866 for (irq
= ix
; irq
< num_comp_vectors
; irq
+= params
->num_channels
) {
2867 int cpu
= mlx5_comp_vector_get_cpu(mdev
, irq
);
2869 cpumask_set_cpu(cpu
, priv
->scratchpad
.cpumask
);
2872 netif_set_xps_queue(priv
->netdev
, priv
->scratchpad
.cpumask
, ix
);
2876 static int mlx5e_num_channels_changed(struct mlx5e_priv
*priv
)
2878 u16 count
= priv
->channels
.params
.num_channels
;
2881 err
= mlx5e_update_netdev_queues(priv
);
2885 mlx5e_set_default_xps_cpumasks(priv
, &priv
->channels
.params
);
2887 /* This function may be called on attach, before priv->rx_res is created. */
2888 if (!netif_is_rxfh_configured(priv
->netdev
) && priv
->rx_res
)
2889 mlx5e_rx_res_rss_set_indir_uniform(priv
->rx_res
, count
);
2894 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed
);
2896 static void mlx5e_build_txq_maps(struct mlx5e_priv
*priv
)
2898 int i
, ch
, tc
, num_tc
;
2900 ch
= priv
->channels
.num
;
2901 num_tc
= mlx5e_get_dcb_num_tc(&priv
->channels
.params
);
2903 for (i
= 0; i
< ch
; i
++) {
2904 for (tc
= 0; tc
< num_tc
; tc
++) {
2905 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
2906 struct mlx5e_txqsq
*sq
= &c
->sq
[tc
];
2908 priv
->txq2sq
[sq
->txq_ix
] = sq
;
2912 if (!priv
->channels
.ptp
)
2915 if (!test_bit(MLX5E_PTP_STATE_TX
, priv
->channels
.ptp
->state
))
2918 for (tc
= 0; tc
< num_tc
; tc
++) {
2919 struct mlx5e_ptp
*c
= priv
->channels
.ptp
;
2920 struct mlx5e_txqsq
*sq
= &c
->ptpsq
[tc
].txqsq
;
2922 priv
->txq2sq
[sq
->txq_ix
] = sq
;
2926 /* Make the change to txq2sq visible before the queue is started.
2927 * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
2928 * which pairs with this barrier.
2933 void mlx5e_activate_priv_channels(struct mlx5e_priv
*priv
)
2935 mlx5e_build_txq_maps(priv
);
2936 mlx5e_activate_channels(priv
, &priv
->channels
);
2937 mlx5e_xdp_tx_enable(priv
);
2939 /* dev_watchdog() wants all TX queues to be started when the carrier is
2940 * OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
2941 * Make it happy to avoid TX timeout false alarms.
2943 netif_tx_start_all_queues(priv
->netdev
);
2945 if (mlx5e_is_vport_rep(priv
))
2946 mlx5e_rep_activate_channels(priv
);
2948 mlx5e_wait_channels_min_rx_wqes(&priv
->channels
);
2951 mlx5e_rx_res_channels_activate(priv
->rx_res
, &priv
->channels
);
2954 void mlx5e_deactivate_priv_channels(struct mlx5e_priv
*priv
)
2957 mlx5e_rx_res_channels_deactivate(priv
->rx_res
);
2959 if (mlx5e_is_vport_rep(priv
))
2960 mlx5e_rep_deactivate_channels(priv
);
2962 /* The results of ndo_select_queue are unreliable, while netdev config
2963 * is being changed (real_num_tx_queues, num_tc). Stop all queues to
2964 * prevent ndo_start_xmit from being called, so that it can assume that
2965 * the selected queue is always valid.
2967 netif_tx_disable(priv
->netdev
);
2969 mlx5e_xdp_tx_disable(priv
);
2970 mlx5e_deactivate_channels(&priv
->channels
);
2973 static int mlx5e_switch_priv_params(struct mlx5e_priv
*priv
,
2974 struct mlx5e_params
*new_params
,
2975 mlx5e_fp_preactivate preactivate
,
2978 struct mlx5e_params old_params
;
2980 old_params
= priv
->channels
.params
;
2981 priv
->channels
.params
= *new_params
;
2986 err
= preactivate(priv
, context
);
2988 priv
->channels
.params
= old_params
;
2996 static int mlx5e_switch_priv_channels(struct mlx5e_priv
*priv
,
2997 struct mlx5e_channels
*new_chs
,
2998 mlx5e_fp_preactivate preactivate
,
3001 struct net_device
*netdev
= priv
->netdev
;
3002 struct mlx5e_channels old_chs
;
3006 carrier_ok
= netif_carrier_ok(netdev
);
3007 netif_carrier_off(netdev
);
3009 mlx5e_deactivate_priv_channels(priv
);
3011 old_chs
= priv
->channels
;
3012 priv
->channels
= *new_chs
;
3014 /* New channels are ready to roll, call the preactivate hook if needed
3015 * to modify HW settings or update kernel parameters.
3018 err
= preactivate(priv
, context
);
3020 priv
->channels
= old_chs
;
3025 mlx5e_close_channels(&old_chs
);
3026 priv
->profile
->update_rx(priv
);
3028 mlx5e_selq_apply(&priv
->selq
);
3030 mlx5e_activate_priv_channels(priv
);
3032 /* return carrier back if needed */
3034 netif_carrier_on(netdev
);
3039 int mlx5e_safe_switch_params(struct mlx5e_priv
*priv
,
3040 struct mlx5e_params
*params
,
3041 mlx5e_fp_preactivate preactivate
,
3042 void *context
, bool reset
)
3044 struct mlx5e_channels
*new_chs
;
3047 reset
&= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3049 return mlx5e_switch_priv_params(priv
, params
, preactivate
, context
);
3051 new_chs
= kzalloc(sizeof(*new_chs
), GFP_KERNEL
);
3054 new_chs
->params
= *params
;
3056 mlx5e_selq_prepare_params(&priv
->selq
, &new_chs
->params
);
3058 err
= mlx5e_open_channels(priv
, new_chs
);
3060 goto err_cancel_selq
;
3062 err
= mlx5e_switch_priv_channels(priv
, new_chs
, preactivate
, context
);
3070 mlx5e_close_channels(new_chs
);
3073 mlx5e_selq_cancel(&priv
->selq
);
3078 int mlx5e_safe_reopen_channels(struct mlx5e_priv
*priv
)
3080 return mlx5e_safe_switch_params(priv
, &priv
->channels
.params
, NULL
, NULL
, true);
3083 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
)
3085 priv
->tstamp
.tx_type
= HWTSTAMP_TX_OFF
;
3086 priv
->tstamp
.rx_filter
= HWTSTAMP_FILTER_NONE
;
3089 static void mlx5e_modify_admin_state(struct mlx5_core_dev
*mdev
,
3090 enum mlx5_port_status state
)
3092 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3093 int vport_admin_state
;
3095 mlx5_set_port_admin_status(mdev
, state
);
3097 if (mlx5_eswitch_mode(mdev
) == MLX5_ESWITCH_OFFLOADS
||
3098 !MLX5_CAP_GEN(mdev
, uplink_follow
))
3101 if (state
== MLX5_PORT_UP
)
3102 vport_admin_state
= MLX5_VPORT_ADMIN_STATE_AUTO
;
3104 vport_admin_state
= MLX5_VPORT_ADMIN_STATE_DOWN
;
3106 mlx5_eswitch_set_vport_state(esw
, MLX5_VPORT_UPLINK
, vport_admin_state
);
3109 int mlx5e_open_locked(struct net_device
*netdev
)
3111 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3114 mlx5e_selq_prepare_params(&priv
->selq
, &priv
->channels
.params
);
3116 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3118 err
= mlx5e_open_channels(priv
, &priv
->channels
);
3120 goto err_clear_state_opened_flag
;
3122 err
= priv
->profile
->update_rx(priv
);
3124 goto err_close_channels
;
3126 mlx5e_selq_apply(&priv
->selq
);
3127 mlx5e_activate_priv_channels(priv
);
3128 mlx5e_apply_traps(priv
, true);
3129 if (priv
->profile
->update_carrier
)
3130 priv
->profile
->update_carrier(priv
);
3132 mlx5e_queue_update_stats(priv
);
3136 mlx5e_close_channels(&priv
->channels
);
3137 err_clear_state_opened_flag
:
3138 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3139 mlx5e_selq_cancel(&priv
->selq
);
3143 int mlx5e_open(struct net_device
*netdev
)
3145 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3148 mutex_lock(&priv
->state_lock
);
3149 err
= mlx5e_open_locked(netdev
);
3151 mlx5e_modify_admin_state(priv
->mdev
, MLX5_PORT_UP
);
3152 mutex_unlock(&priv
->state_lock
);
3157 int mlx5e_close_locked(struct net_device
*netdev
)
3159 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3161 /* May already be CLOSED in case a previous configuration operation
3162 * (e.g RX/TX queue size change) that involves close&open failed.
3164 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3167 mlx5e_apply_traps(priv
, false);
3168 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3170 netif_carrier_off(priv
->netdev
);
3171 mlx5e_deactivate_priv_channels(priv
);
3172 mlx5e_close_channels(&priv
->channels
);
3177 int mlx5e_close(struct net_device
*netdev
)
3179 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3182 if (!netif_device_present(netdev
))
3185 mutex_lock(&priv
->state_lock
);
3186 mlx5e_modify_admin_state(priv
->mdev
, MLX5_PORT_DOWN
);
3187 err
= mlx5e_close_locked(netdev
);
3188 mutex_unlock(&priv
->state_lock
);
3193 static void mlx5e_free_drop_rq(struct mlx5e_rq
*rq
)
3195 mlx5_wq_destroy(&rq
->wq_ctrl
);
3198 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev
*mdev
,
3199 struct mlx5e_rq
*rq
,
3200 struct mlx5e_rq_param
*param
)
3202 void *rqc
= param
->rqc
;
3203 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
3206 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
3208 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wqe
.wq
,
3213 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3214 xdp_rxq_info_unused(&rq
->xdp_rxq
);
3221 static int mlx5e_alloc_drop_cq(struct mlx5e_priv
*priv
,
3222 struct mlx5e_cq
*cq
,
3223 struct mlx5e_cq_param
*param
)
3225 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3227 param
->wq
.buf_numa_node
= dev_to_node(mlx5_core_dma_dev(mdev
));
3228 param
->wq
.db_numa_node
= dev_to_node(mlx5_core_dma_dev(mdev
));
3230 return mlx5e_alloc_cq_common(priv
, param
, cq
);
3233 int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
,
3234 struct mlx5e_rq
*drop_rq
)
3236 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3237 struct mlx5e_cq_param cq_param
= {};
3238 struct mlx5e_rq_param rq_param
= {};
3239 struct mlx5e_cq
*cq
= &drop_rq
->cq
;
3242 mlx5e_build_drop_rq_param(mdev
, priv
->drop_rq_q_counter
, &rq_param
);
3244 err
= mlx5e_alloc_drop_cq(priv
, cq
, &cq_param
);
3248 err
= mlx5e_create_cq(cq
, &cq_param
);
3252 err
= mlx5e_alloc_drop_rq(mdev
, drop_rq
, &rq_param
);
3254 goto err_destroy_cq
;
3256 err
= mlx5e_create_rq(drop_rq
, &rq_param
);
3260 err
= mlx5e_modify_rq_state(drop_rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
3262 mlx5_core_warn(priv
->mdev
, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err
);
3267 mlx5e_free_drop_rq(drop_rq
);
3270 mlx5e_destroy_cq(cq
);
3278 void mlx5e_close_drop_rq(struct mlx5e_rq
*drop_rq
)
3280 mlx5e_destroy_rq(drop_rq
);
3281 mlx5e_free_drop_rq(drop_rq
);
3282 mlx5e_destroy_cq(&drop_rq
->cq
);
3283 mlx5e_free_cq(&drop_rq
->cq
);
3286 int mlx5e_create_tis(struct mlx5_core_dev
*mdev
, void *in
, u32
*tisn
)
3288 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
3290 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.hw_objs
.td
.tdn
);
3292 if (MLX5_GET(tisc
, tisc
, tls_en
))
3293 MLX5_SET(tisc
, tisc
, pd
, mdev
->mlx5e_res
.hw_objs
.pdn
);
3295 if (mlx5_lag_is_lacp_owner(mdev
))
3296 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
3298 return mlx5_core_create_tis(mdev
, in
, tisn
);
3301 void mlx5e_destroy_tis(struct mlx5_core_dev
*mdev
, u32 tisn
)
3303 mlx5_core_destroy_tis(mdev
, tisn
);
3306 void mlx5e_destroy_tises(struct mlx5e_priv
*priv
)
3310 for (i
= 0; i
< mlx5e_get_num_lag_ports(priv
->mdev
); i
++)
3311 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
3312 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[i
][tc
]);
3315 static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev
*mdev
)
3317 return MLX5_CAP_GEN(mdev
, lag_tx_port_affinity
) && mlx5e_get_num_lag_ports(mdev
) > 1;
3320 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
3325 for (i
= 0; i
< mlx5e_get_num_lag_ports(priv
->mdev
); i
++) {
3326 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
3327 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {};
3330 tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
3332 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
3334 if (mlx5e_lag_should_assign_affinity(priv
->mdev
))
3335 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, i
+ 1);
3337 err
= mlx5e_create_tis(priv
->mdev
, in
, &priv
->tisn
[i
][tc
]);
3339 goto err_close_tises
;
3346 for (; i
>= 0; i
--) {
3347 for (tc
--; tc
>= 0; tc
--)
3348 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[i
][tc
]);
3349 tc
= priv
->profile
->max_tc
;
3355 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
3357 if (priv
->mqprio_rl
) {
3358 mlx5e_mqprio_rl_cleanup(priv
->mqprio_rl
);
3359 mlx5e_mqprio_rl_free(priv
->mqprio_rl
);
3360 priv
->mqprio_rl
= NULL
;
3362 mlx5e_accel_cleanup_tx(priv
);
3363 mlx5e_destroy_tises(priv
);
3366 static int mlx5e_modify_channels_vsd(struct mlx5e_channels
*chs
, bool vsd
)
3371 for (i
= 0; i
< chs
->num
; i
++) {
3372 err
= mlx5e_modify_rq_vsd(&chs
->c
[i
]->rq
, vsd
);
3376 if (chs
->ptp
&& test_bit(MLX5E_PTP_STATE_RX
, chs
->ptp
->state
))
3377 return mlx5e_modify_rq_vsd(&chs
->ptp
->rq
, vsd
);
3382 static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq
*tc_to_txq
,
3387 memset(tc_to_txq
, 0, sizeof(*tc_to_txq
) * TC_MAX_QUEUE
);
3389 /* Map netdev TCs to offset 0.
3390 * We have our own UP to TXQ mapping for DCB mode of QoS
3392 for (tc
= 0; tc
< ntc
; tc
++) {
3393 tc_to_txq
[tc
] = (struct netdev_tc_txq
) {
3400 static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq
*tc_to_txq
,
3401 struct tc_mqprio_qopt
*qopt
)
3405 for (tc
= 0; tc
< TC_MAX_QUEUE
; tc
++) {
3406 tc_to_txq
[tc
] = (struct netdev_tc_txq
) {
3407 .count
= qopt
->count
[tc
],
3408 .offset
= qopt
->offset
[tc
],
3413 static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params
*params
, u8 num_tc
)
3415 params
->mqprio
.mode
= TC_MQPRIO_MODE_DCB
;
3416 params
->mqprio
.num_tc
= num_tc
;
3417 mlx5e_mqprio_build_default_tc_to_txq(params
->mqprio
.tc_to_txq
, num_tc
,
3418 params
->num_channels
);
3421 static void mlx5e_mqprio_rl_update_params(struct mlx5e_params
*params
,
3422 struct mlx5e_mqprio_rl
*rl
)
3426 for (tc
= 0; tc
< TC_MAX_QUEUE
; tc
++) {
3430 mlx5e_mqprio_rl_get_node_hw_id(rl
, tc
, &hw_id
);
3431 params
->mqprio
.channel
.hw_id
[tc
] = hw_id
;
3435 static void mlx5e_params_mqprio_channel_set(struct mlx5e_params
*params
,
3436 struct tc_mqprio_qopt_offload
*mqprio
,
3437 struct mlx5e_mqprio_rl
*rl
)
3441 params
->mqprio
.mode
= TC_MQPRIO_MODE_CHANNEL
;
3442 params
->mqprio
.num_tc
= mqprio
->qopt
.num_tc
;
3444 for (tc
= 0; tc
< TC_MAX_QUEUE
; tc
++)
3445 params
->mqprio
.channel
.max_rate
[tc
] = mqprio
->max_rate
[tc
];
3447 mlx5e_mqprio_rl_update_params(params
, rl
);
3448 mlx5e_mqprio_build_tc_to_txq(params
->mqprio
.tc_to_txq
, &mqprio
->qopt
);
3451 static void mlx5e_params_mqprio_reset(struct mlx5e_params
*params
)
3453 mlx5e_params_mqprio_dcb_set(params
, 1);
3456 static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv
*priv
,
3457 struct tc_mqprio_qopt
*mqprio
)
3459 struct mlx5e_params new_params
;
3460 u8 tc
= mqprio
->num_tc
;
3463 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
3465 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
3468 new_params
= priv
->channels
.params
;
3469 mlx5e_params_mqprio_dcb_set(&new_params
, tc
? tc
: 1);
3471 err
= mlx5e_safe_switch_params(priv
, &new_params
,
3472 mlx5e_num_channels_changed_ctx
, NULL
, true);
3474 if (!err
&& priv
->mqprio_rl
) {
3475 mlx5e_mqprio_rl_cleanup(priv
->mqprio_rl
);
3476 mlx5e_mqprio_rl_free(priv
->mqprio_rl
);
3477 priv
->mqprio_rl
= NULL
;
3480 priv
->max_opened_tc
= max_t(u8
, priv
->max_opened_tc
,
3481 mlx5e_get_dcb_num_tc(&priv
->channels
.params
));
3485 static int mlx5e_mqprio_channel_validate(struct mlx5e_priv
*priv
,
3486 struct tc_mqprio_qopt_offload
*mqprio
)
3488 struct net_device
*netdev
= priv
->netdev
;
3489 struct mlx5e_ptp
*ptp_channel
;
3493 ptp_channel
= priv
->channels
.ptp
;
3494 if (ptp_channel
&& test_bit(MLX5E_PTP_STATE_TX
, ptp_channel
->state
)) {
3496 "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
3500 if (mqprio
->qopt
.offset
[0] != 0 || mqprio
->qopt
.num_tc
< 1 ||
3501 mqprio
->qopt
.num_tc
> MLX5E_MAX_NUM_MQPRIO_CH_TC
)
3504 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
3505 if (!mqprio
->qopt
.count
[i
]) {
3506 netdev_err(netdev
, "Zero size for queue-group (%d) is not supported\n", i
);
3509 if (mqprio
->min_rate
[i
]) {
3510 netdev_err(netdev
, "Min tx rate is not supported\n");
3514 if (mqprio
->max_rate
[i
]) {
3517 err
= mlx5e_qos_bytes_rate_check(priv
->mdev
, mqprio
->max_rate
[i
]);
3522 if (mqprio
->qopt
.offset
[i
] != agg_count
) {
3523 netdev_err(netdev
, "Discontinuous queues config is not supported\n");
3526 agg_count
+= mqprio
->qopt
.count
[i
];
3529 if (priv
->channels
.params
.num_channels
!= agg_count
) {
3530 netdev_err(netdev
, "Num of queues (%d) does not match available (%d)\n",
3531 agg_count
, priv
->channels
.params
.num_channels
);
3538 static bool mlx5e_mqprio_rate_limit(u8 num_tc
, u64 max_rate
[])
3542 for (tc
= 0; tc
< num_tc
; tc
++)
3548 static struct mlx5e_mqprio_rl
*mlx5e_mqprio_rl_create(struct mlx5_core_dev
*mdev
,
3549 u8 num_tc
, u64 max_rate
[])
3551 struct mlx5e_mqprio_rl
*rl
;
3554 if (!mlx5e_mqprio_rate_limit(num_tc
, max_rate
))
3557 rl
= mlx5e_mqprio_rl_alloc();
3559 return ERR_PTR(-ENOMEM
);
3561 err
= mlx5e_mqprio_rl_init(rl
, mdev
, num_tc
, max_rate
);
3563 mlx5e_mqprio_rl_free(rl
);
3564 return ERR_PTR(err
);
3570 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv
*priv
,
3571 struct tc_mqprio_qopt_offload
*mqprio
)
3573 mlx5e_fp_preactivate preactivate
;
3574 struct mlx5e_params new_params
;
3575 struct mlx5e_mqprio_rl
*rl
;
3579 err
= mlx5e_mqprio_channel_validate(priv
, mqprio
);
3583 rl
= mlx5e_mqprio_rl_create(priv
->mdev
, mqprio
->qopt
.num_tc
, mqprio
->max_rate
);
3587 new_params
= priv
->channels
.params
;
3588 mlx5e_params_mqprio_channel_set(&new_params
, mqprio
, rl
);
3590 nch_changed
= mlx5e_get_dcb_num_tc(&priv
->channels
.params
) > 1;
3591 preactivate
= nch_changed
? mlx5e_num_channels_changed_ctx
:
3592 mlx5e_update_netdev_queues_ctx
;
3593 err
= mlx5e_safe_switch_params(priv
, &new_params
, preactivate
, NULL
, true);
3596 mlx5e_mqprio_rl_cleanup(rl
);
3597 mlx5e_mqprio_rl_free(rl
);
3602 if (priv
->mqprio_rl
) {
3603 mlx5e_mqprio_rl_cleanup(priv
->mqprio_rl
);
3604 mlx5e_mqprio_rl_free(priv
->mqprio_rl
);
3606 priv
->mqprio_rl
= rl
;
3611 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv
*priv
,
3612 struct tc_mqprio_qopt_offload
*mqprio
)
3614 /* MQPRIO is another toplevel qdisc that can't be attached
3615 * simultaneously with the offloaded HTB.
3617 if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv
->selq
)))
3620 switch (mqprio
->mode
) {
3621 case TC_MQPRIO_MODE_DCB
:
3622 return mlx5e_setup_tc_mqprio_dcb(priv
, &mqprio
->qopt
);
3623 case TC_MQPRIO_MODE_CHANNEL
:
3624 return mlx5e_setup_tc_mqprio_channel(priv
, mqprio
);
3630 static LIST_HEAD(mlx5e_block_cb_list
);
3632 static int mlx5e_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
3635 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3636 bool tc_unbind
= false;
3639 if (type
== TC_SETUP_BLOCK
&&
3640 ((struct flow_block_offload
*)type_data
)->command
== FLOW_BLOCK_UNBIND
)
3643 if (!netif_device_present(dev
) && !tc_unbind
)
3647 case TC_SETUP_BLOCK
: {
3648 struct flow_block_offload
*f
= type_data
;
3650 f
->unlocked_driver_cb
= true;
3651 return flow_block_cb_setup_simple(type_data
,
3652 &mlx5e_block_cb_list
,
3653 mlx5e_setup_tc_block_cb
,
3656 case TC_SETUP_QDISC_MQPRIO
:
3657 mutex_lock(&priv
->state_lock
);
3658 err
= mlx5e_setup_tc_mqprio(priv
, type_data
);
3659 mutex_unlock(&priv
->state_lock
);
3661 case TC_SETUP_QDISC_HTB
:
3662 mutex_lock(&priv
->state_lock
);
3663 err
= mlx5e_htb_setup_tc(priv
, type_data
);
3664 mutex_unlock(&priv
->state_lock
);
3671 void mlx5e_fold_sw_stats64(struct mlx5e_priv
*priv
, struct rtnl_link_stats64
*s
)
3675 for (i
= 0; i
< priv
->stats_nch
; i
++) {
3676 struct mlx5e_channel_stats
*channel_stats
= priv
->channel_stats
[i
];
3677 struct mlx5e_rq_stats
*xskrq_stats
= &channel_stats
->xskrq
;
3678 struct mlx5e_rq_stats
*rq_stats
= &channel_stats
->rq
;
3681 s
->rx_packets
+= rq_stats
->packets
+ xskrq_stats
->packets
;
3682 s
->rx_bytes
+= rq_stats
->bytes
+ xskrq_stats
->bytes
;
3683 s
->multicast
+= rq_stats
->mcast_packets
+ xskrq_stats
->mcast_packets
;
3685 for (j
= 0; j
< priv
->max_opened_tc
; j
++) {
3686 struct mlx5e_sq_stats
*sq_stats
= &channel_stats
->sq
[j
];
3688 s
->tx_packets
+= sq_stats
->packets
;
3689 s
->tx_bytes
+= sq_stats
->bytes
;
3690 s
->tx_dropped
+= sq_stats
->dropped
;
3693 if (priv
->tx_ptp_opened
) {
3694 for (i
= 0; i
< priv
->max_opened_tc
; i
++) {
3695 struct mlx5e_sq_stats
*sq_stats
= &priv
->ptp_stats
.sq
[i
];
3697 s
->tx_packets
+= sq_stats
->packets
;
3698 s
->tx_bytes
+= sq_stats
->bytes
;
3699 s
->tx_dropped
+= sq_stats
->dropped
;
3702 if (priv
->rx_ptp_opened
) {
3703 struct mlx5e_rq_stats
*rq_stats
= &priv
->ptp_stats
.rq
;
3705 s
->rx_packets
+= rq_stats
->packets
;
3706 s
->rx_bytes
+= rq_stats
->bytes
;
3707 s
->multicast
+= rq_stats
->mcast_packets
;
3712 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3714 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3715 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
3717 if (!netif_device_present(dev
))
3720 /* In switchdev mode, monitor counters doesn't monitor
3721 * rx/tx stats of 802_3. The update stats mechanism
3722 * should keep the 802_3 layout counters updated
3724 if (!mlx5e_monitor_counter_supported(priv
) ||
3725 mlx5e_is_uplink_rep(priv
)) {
3726 /* update HW stats in background for next time */
3727 mlx5e_queue_update_stats(priv
);
3730 if (mlx5e_is_uplink_rep(priv
)) {
3731 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
3733 stats
->rx_packets
= PPORT_802_3_GET(pstats
, a_frames_received_ok
);
3734 stats
->rx_bytes
= PPORT_802_3_GET(pstats
, a_octets_received_ok
);
3735 stats
->tx_packets
= PPORT_802_3_GET(pstats
, a_frames_transmitted_ok
);
3736 stats
->tx_bytes
= PPORT_802_3_GET(pstats
, a_octets_transmitted_ok
);
3738 /* vport multicast also counts packets that are dropped due to steering
3739 * or rx out of buffer
3741 stats
->multicast
= VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
3743 mlx5e_fold_sw_stats64(priv
, stats
);
3746 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
3748 stats
->rx_length_errors
=
3749 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
3750 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
3751 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
) +
3752 VNIC_ENV_GET(&priv
->stats
.vnic
, eth_wqe_too_small
);
3753 stats
->rx_crc_errors
=
3754 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
3755 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
3756 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
3757 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
3758 stats
->rx_frame_errors
;
3759 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
3762 static void mlx5e_nic_set_rx_mode(struct mlx5e_priv
*priv
)
3764 if (mlx5e_is_uplink_rep(priv
))
3765 return; /* no rx mode for uplink rep */
3767 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3770 static void mlx5e_set_rx_mode(struct net_device
*dev
)
3772 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3774 mlx5e_nic_set_rx_mode(priv
);
3777 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
3779 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3780 struct sockaddr
*saddr
= addr
;
3782 if (!is_valid_ether_addr(saddr
->sa_data
))
3783 return -EADDRNOTAVAIL
;
3785 netif_addr_lock_bh(netdev
);
3786 eth_hw_addr_set(netdev
, saddr
->sa_data
);
3787 netif_addr_unlock_bh(netdev
);
3789 mlx5e_nic_set_rx_mode(priv
);
3794 #define MLX5E_SET_FEATURE(features, feature, enable) \
3797 *features |= feature; \
3799 *features &= ~feature; \
3802 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
3804 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
3806 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3807 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3808 struct mlx5e_params
*cur_params
;
3809 struct mlx5e_params new_params
;
3813 mutex_lock(&priv
->state_lock
);
3815 cur_params
= &priv
->channels
.params
;
3816 new_params
= *cur_params
;
3819 new_params
.packet_merge
.type
= MLX5E_PACKET_MERGE_LRO
;
3820 else if (new_params
.packet_merge
.type
== MLX5E_PACKET_MERGE_LRO
)
3821 new_params
.packet_merge
.type
= MLX5E_PACKET_MERGE_NONE
;
3825 if (!(cur_params
->packet_merge
.type
== MLX5E_PACKET_MERGE_SHAMPO
&&
3826 new_params
.packet_merge
.type
== MLX5E_PACKET_MERGE_LRO
)) {
3827 if (cur_params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
3828 if (mlx5e_rx_mpwqe_is_linear_skb(mdev
, cur_params
, NULL
) ==
3829 mlx5e_rx_mpwqe_is_linear_skb(mdev
, &new_params
, NULL
))
3834 err
= mlx5e_safe_switch_params(priv
, &new_params
,
3835 mlx5e_modify_tirs_packet_merge_ctx
, NULL
, reset
);
3837 mutex_unlock(&priv
->state_lock
);
3841 static int set_feature_hw_gro(struct net_device
*netdev
, bool enable
)
3843 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3844 struct mlx5e_params new_params
;
3848 mutex_lock(&priv
->state_lock
);
3849 new_params
= priv
->channels
.params
;
3852 new_params
.packet_merge
.type
= MLX5E_PACKET_MERGE_SHAMPO
;
3853 new_params
.packet_merge
.shampo
.match_criteria_type
=
3854 MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED
;
3855 new_params
.packet_merge
.shampo
.alignment_granularity
=
3856 MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE
;
3857 } else if (new_params
.packet_merge
.type
== MLX5E_PACKET_MERGE_SHAMPO
) {
3858 new_params
.packet_merge
.type
= MLX5E_PACKET_MERGE_NONE
;
3863 err
= mlx5e_safe_switch_params(priv
, &new_params
, NULL
, NULL
, reset
);
3865 mutex_unlock(&priv
->state_lock
);
3869 static int set_feature_cvlan_filter(struct net_device
*netdev
, bool enable
)
3871 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3874 mlx5e_enable_cvlan_filter(priv
->fs
,
3875 !!(priv
->netdev
->flags
& IFF_PROMISC
));
3877 mlx5e_disable_cvlan_filter(priv
->fs
,
3878 !!(priv
->netdev
->flags
& IFF_PROMISC
));
3883 static int set_feature_hw_tc(struct net_device
*netdev
, bool enable
)
3885 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3888 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
3889 int tc_flag
= mlx5e_is_uplink_rep(priv
) ? MLX5_TC_FLAG(ESW_OFFLOAD
) :
3890 MLX5_TC_FLAG(NIC_OFFLOAD
);
3891 if (!enable
&& mlx5e_tc_num_filters(priv
, tc_flag
)) {
3893 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3898 mutex_lock(&priv
->state_lock
);
3899 if (!enable
&& mlx5e_selq_is_htb_enabled(&priv
->selq
)) {
3900 netdev_err(netdev
, "Active HTB offload, can't turn hw_tc_offload off\n");
3903 mutex_unlock(&priv
->state_lock
);
3908 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
3910 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3911 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3913 return mlx5_set_port_fcs(mdev
, !enable
);
3916 static int mlx5e_set_rx_port_ts(struct mlx5_core_dev
*mdev
, bool enable
)
3918 u32 in
[MLX5_ST_SZ_DW(pcmr_reg
)] = {};
3919 bool supported
, curr_state
;
3922 if (!MLX5_CAP_GEN(mdev
, ports_check
))
3925 err
= mlx5_query_ports_check(mdev
, in
, sizeof(in
));
3929 supported
= MLX5_GET(pcmr_reg
, in
, rx_ts_over_crc_cap
);
3930 curr_state
= MLX5_GET(pcmr_reg
, in
, rx_ts_over_crc
);
3932 if (!supported
|| enable
== curr_state
)
3935 MLX5_SET(pcmr_reg
, in
, local_port
, 1);
3936 MLX5_SET(pcmr_reg
, in
, rx_ts_over_crc
, enable
);
3938 return mlx5_set_ports_check(mdev
, in
, sizeof(in
));
3941 static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv
*priv
, void *ctx
)
3943 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3944 bool enable
= *(bool *)ctx
;
3946 return mlx5e_set_rx_port_ts(mdev
, enable
);
3949 static int set_feature_rx_fcs(struct net_device
*netdev
, bool enable
)
3951 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3952 struct mlx5e_channels
*chs
= &priv
->channels
;
3953 struct mlx5e_params new_params
;
3955 bool rx_ts_over_crc
= !enable
;
3957 mutex_lock(&priv
->state_lock
);
3959 new_params
= chs
->params
;
3960 new_params
.scatter_fcs_en
= enable
;
3961 err
= mlx5e_safe_switch_params(priv
, &new_params
, mlx5e_set_rx_port_ts_wrap
,
3962 &rx_ts_over_crc
, true);
3963 mutex_unlock(&priv
->state_lock
);
3967 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
3969 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3972 mutex_lock(&priv
->state_lock
);
3974 mlx5e_fs_set_vlan_strip_disable(priv
->fs
, !enable
);
3975 priv
->channels
.params
.vlan_strip_disable
= !enable
;
3977 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3980 err
= mlx5e_modify_channels_vsd(&priv
->channels
, !enable
);
3982 mlx5e_fs_set_vlan_strip_disable(priv
->fs
, enable
);
3983 priv
->channels
.params
.vlan_strip_disable
= enable
;
3986 mutex_unlock(&priv
->state_lock
);
3991 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3993 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3994 struct mlx5e_flow_steering
*fs
= priv
->fs
;
3996 if (mlx5e_is_uplink_rep(priv
))
3997 return 0; /* no vlan table for uplink rep */
3999 return mlx5e_fs_vlan_rx_add_vid(fs
, dev
, proto
, vid
);
4002 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
4004 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4005 struct mlx5e_flow_steering
*fs
= priv
->fs
;
4007 if (mlx5e_is_uplink_rep(priv
))
4008 return 0; /* no vlan table for uplink rep */
4010 return mlx5e_fs_vlan_rx_kill_vid(fs
, dev
, proto
, vid
);
4013 #ifdef CONFIG_MLX5_EN_ARFS
4014 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
4016 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4020 err
= mlx5e_arfs_enable(priv
->fs
);
4022 err
= mlx5e_arfs_disable(priv
->fs
);
4028 static int mlx5e_handle_feature(struct net_device
*netdev
,
4029 netdev_features_t
*features
,
4030 netdev_features_t feature
,
4031 mlx5e_feature_handler feature_handler
)
4033 netdev_features_t changes
= *features
^ netdev
->features
;
4034 bool enable
= !!(*features
& feature
);
4037 if (!(changes
& feature
))
4040 err
= feature_handler(netdev
, enable
);
4042 MLX5E_SET_FEATURE(features
, feature
, !enable
);
4043 netdev_err(netdev
, "%s feature %pNF failed, err %d\n",
4044 enable
? "Enable" : "Disable", &feature
, err
);
4051 void mlx5e_set_xdp_feature(struct net_device
*netdev
)
4053 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4054 struct mlx5e_params
*params
= &priv
->channels
.params
;
4057 if (params
->packet_merge
.type
!= MLX5E_PACKET_MERGE_NONE
) {
4058 xdp_clear_features_flag(netdev
);
4062 val
= NETDEV_XDP_ACT_BASIC
| NETDEV_XDP_ACT_REDIRECT
|
4063 NETDEV_XDP_ACT_XSK_ZEROCOPY
|
4064 NETDEV_XDP_ACT_RX_SG
|
4065 NETDEV_XDP_ACT_NDO_XMIT
|
4066 NETDEV_XDP_ACT_NDO_XMIT_SG
;
4067 xdp_set_features_flag(netdev
, val
);
4070 int mlx5e_set_features(struct net_device
*netdev
, netdev_features_t features
)
4072 netdev_features_t oper_features
= features
;
4075 #define MLX5E_HANDLE_FEATURE(feature, handler) \
4076 mlx5e_handle_feature(netdev, &oper_features, feature, handler)
4078 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_LRO
, set_feature_lro
);
4079 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW
, set_feature_hw_gro
);
4080 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER
,
4081 set_feature_cvlan_filter
);
4082 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC
, set_feature_hw_tc
);
4083 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL
, set_feature_rx_all
);
4084 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS
, set_feature_rx_fcs
);
4085 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX
, set_feature_rx_vlan
);
4086 #ifdef CONFIG_MLX5_EN_ARFS
4087 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE
, set_feature_arfs
);
4089 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX
, mlx5e_ktls_set_feature_rx
);
4092 netdev
->features
= oper_features
;
4096 /* update XDP supported features */
4097 mlx5e_set_xdp_feature(netdev
);
4102 static netdev_features_t
mlx5e_fix_uplink_rep_features(struct net_device
*netdev
,
4103 netdev_features_t features
)
4105 features
&= ~NETIF_F_HW_TLS_RX
;
4106 if (netdev
->features
& NETIF_F_HW_TLS_RX
)
4107 netdev_warn(netdev
, "Disabling hw_tls_rx, not supported in switchdev mode\n");
4109 features
&= ~NETIF_F_HW_TLS_TX
;
4110 if (netdev
->features
& NETIF_F_HW_TLS_TX
)
4111 netdev_warn(netdev
, "Disabling hw_tls_tx, not supported in switchdev mode\n");
4113 features
&= ~NETIF_F_NTUPLE
;
4114 if (netdev
->features
& NETIF_F_NTUPLE
)
4115 netdev_warn(netdev
, "Disabling ntuple, not supported in switchdev mode\n");
4117 features
&= ~NETIF_F_GRO_HW
;
4118 if (netdev
->features
& NETIF_F_GRO_HW
)
4119 netdev_warn(netdev
, "Disabling HW_GRO, not supported in switchdev mode\n");
4121 features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
4122 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
4123 netdev_warn(netdev
, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
4128 static netdev_features_t
mlx5e_fix_features(struct net_device
*netdev
,
4129 netdev_features_t features
)
4131 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4132 struct mlx5e_vlan_table
*vlan
;
4133 struct mlx5e_params
*params
;
4135 if (!netif_device_present(netdev
))
4138 vlan
= mlx5e_fs_get_vlan(priv
->fs
);
4139 mutex_lock(&priv
->state_lock
);
4140 params
= &priv
->channels
.params
;
4142 !bitmap_empty(mlx5e_vlan_get_active_svlans(vlan
), VLAN_N_VID
)) {
4143 /* HW strips the outer C-tag header, this is a problem
4144 * for S-tag traffic.
4146 features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
4147 if (!params
->vlan_strip_disable
)
4148 netdev_warn(netdev
, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
4151 if (!MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
)) {
4152 if (features
& NETIF_F_LRO
) {
4153 netdev_warn(netdev
, "Disabling LRO, not supported in legacy RQ\n");
4154 features
&= ~NETIF_F_LRO
;
4156 if (features
& NETIF_F_GRO_HW
) {
4157 netdev_warn(netdev
, "Disabling HW-GRO, not supported in legacy RQ\n");
4158 features
&= ~NETIF_F_GRO_HW
;
4162 if (params
->xdp_prog
) {
4163 if (features
& NETIF_F_LRO
) {
4164 netdev_warn(netdev
, "LRO is incompatible with XDP\n");
4165 features
&= ~NETIF_F_LRO
;
4167 if (features
& NETIF_F_GRO_HW
) {
4168 netdev_warn(netdev
, "HW GRO is incompatible with XDP\n");
4169 features
&= ~NETIF_F_GRO_HW
;
4173 if (priv
->xsk
.refcnt
) {
4174 if (features
& NETIF_F_LRO
) {
4175 netdev_warn(netdev
, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
4177 features
&= ~NETIF_F_LRO
;
4179 if (features
& NETIF_F_GRO_HW
) {
4180 netdev_warn(netdev
, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
4182 features
&= ~NETIF_F_GRO_HW
;
4186 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
4187 features
&= ~NETIF_F_RXHASH
;
4188 if (netdev
->features
& NETIF_F_RXHASH
)
4189 netdev_warn(netdev
, "Disabling rxhash, not supported when CQE compress is active\n");
4191 if (features
& NETIF_F_GRO_HW
) {
4192 netdev_warn(netdev
, "Disabling HW-GRO, not supported when CQE compress is active\n");
4193 features
&= ~NETIF_F_GRO_HW
;
4197 if (mlx5e_is_uplink_rep(priv
)) {
4198 features
= mlx5e_fix_uplink_rep_features(netdev
, features
);
4199 features
|= NETIF_F_NETNS_LOCAL
;
4201 features
&= ~NETIF_F_NETNS_LOCAL
;
4204 mutex_unlock(&priv
->state_lock
);
4209 static bool mlx5e_xsk_validate_mtu(struct net_device
*netdev
,
4210 struct mlx5e_channels
*chs
,
4211 struct mlx5e_params
*new_params
,
4212 struct mlx5_core_dev
*mdev
)
4216 for (ix
= 0; ix
< chs
->params
.num_channels
; ix
++) {
4217 struct xsk_buff_pool
*xsk_pool
=
4218 mlx5e_xsk_get_pool(&chs
->params
, chs
->params
.xsk
, ix
);
4219 struct mlx5e_xsk_param xsk
;
4225 mlx5e_build_xsk_param(xsk_pool
, &xsk
);
4226 max_xdp_mtu
= mlx5e_xdp_max_mtu(new_params
, &xsk
);
4228 /* Validate XSK params and XDP MTU in advance */
4229 if (!mlx5e_validate_xsk_param(new_params
, &xsk
, mdev
) ||
4230 new_params
->sw_mtu
> max_xdp_mtu
) {
4231 u32 hr
= mlx5e_get_linear_rq_headroom(new_params
, &xsk
);
4232 int max_mtu_frame
, max_mtu_page
, max_mtu
;
4234 /* Two criteria must be met:
4235 * 1. HW MTU + all headrooms <= XSK frame size.
4236 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
4238 max_mtu_frame
= MLX5E_HW2SW_MTU(new_params
, xsk
.chunk_size
- hr
);
4239 max_mtu_page
= MLX5E_HW2SW_MTU(new_params
, SKB_MAX_HEAD(0));
4240 max_mtu
= min3(max_mtu_frame
, max_mtu_page
, max_xdp_mtu
);
4242 netdev_err(netdev
, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
4243 new_params
->sw_mtu
, ix
, max_mtu
);
4251 static bool mlx5e_params_validate_xdp(struct net_device
*netdev
,
4252 struct mlx5_core_dev
*mdev
,
4253 struct mlx5e_params
*params
)
4257 /* No XSK params: AF_XDP can't be enabled yet at the point of setting
4260 is_linear
= params
->rq_wq_type
== MLX5_WQ_TYPE_CYCLIC
?
4261 mlx5e_rx_is_linear_skb(mdev
, params
, NULL
) :
4262 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
);
4265 if (!params
->xdp_prog
->aux
->xdp_has_frags
) {
4266 netdev_warn(netdev
, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
4268 mlx5e_xdp_max_mtu(params
, NULL
));
4271 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
&&
4272 !mlx5e_verify_params_rx_mpwqe_strides(mdev
, params
, NULL
)) {
4273 netdev_warn(netdev
, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
4275 mlx5e_xdp_max_mtu(params
, NULL
));
4283 int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
,
4284 mlx5e_fp_preactivate preactivate
)
4286 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4287 struct mlx5e_params new_params
;
4288 struct mlx5e_params
*params
;
4292 mutex_lock(&priv
->state_lock
);
4294 params
= &priv
->channels
.params
;
4296 new_params
= *params
;
4297 new_params
.sw_mtu
= new_mtu
;
4298 err
= mlx5e_validate_params(priv
->mdev
, &new_params
);
4302 if (new_params
.xdp_prog
&& !mlx5e_params_validate_xdp(netdev
, priv
->mdev
,
4308 if (priv
->xsk
.refcnt
&&
4309 !mlx5e_xsk_validate_mtu(netdev
, &priv
->channels
,
4310 &new_params
, priv
->mdev
)) {
4315 if (params
->packet_merge
.type
== MLX5E_PACKET_MERGE_LRO
)
4318 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
&&
4319 params
->packet_merge
.type
!= MLX5E_PACKET_MERGE_SHAMPO
) {
4320 bool is_linear_old
= mlx5e_rx_mpwqe_is_linear_skb(priv
->mdev
, params
, NULL
);
4321 bool is_linear_new
= mlx5e_rx_mpwqe_is_linear_skb(priv
->mdev
,
4323 u8 sz_old
= mlx5e_mpwqe_get_log_rq_size(priv
->mdev
, params
, NULL
);
4324 u8 sz_new
= mlx5e_mpwqe_get_log_rq_size(priv
->mdev
, &new_params
, NULL
);
4326 /* Always reset in linear mode - hw_mtu is used in data path.
4327 * Check that the mode was non-linear and didn't change.
4328 * If XSK is active, XSK RQs are linear.
4329 * Reset if the RQ size changed, even if it's non-linear.
4331 if (!is_linear_old
&& !is_linear_new
&& !priv
->xsk
.refcnt
&&
4336 err
= mlx5e_safe_switch_params(priv
, &new_params
, preactivate
, NULL
, reset
);
4339 netdev
->mtu
= params
->sw_mtu
;
4340 mutex_unlock(&priv
->state_lock
);
4344 static int mlx5e_change_nic_mtu(struct net_device
*netdev
, int new_mtu
)
4346 return mlx5e_change_mtu(netdev
, new_mtu
, mlx5e_set_dev_port_mtu_ctx
);
4349 int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv
*priv
, void *ctx
)
4351 bool set
= *(bool *)ctx
;
4353 return mlx5e_ptp_rx_manage_fs(priv
, set
);
4356 static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv
*priv
, bool rx_filter
)
4358 bool rx_cqe_compress_def
= priv
->channels
.params
.rx_cqe_compress_def
;
4362 /* Reset CQE compression to Admin default */
4363 return mlx5e_modify_rx_cqe_compression_locked(priv
, rx_cqe_compress_def
, false);
4365 if (!MLX5E_GET_PFLAG(&priv
->channels
.params
, MLX5E_PFLAG_RX_CQE_COMPRESS
))
4368 /* Disable CQE compression */
4369 netdev_warn(priv
->netdev
, "Disabling RX cqe compression\n");
4370 err
= mlx5e_modify_rx_cqe_compression_locked(priv
, false, true);
4372 netdev_err(priv
->netdev
, "Failed disabling cqe compression err=%d\n", err
);
4377 static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv
*priv
, bool ptp_rx
)
4379 struct mlx5e_params new_params
;
4381 if (ptp_rx
== priv
->channels
.params
.ptp_rx
)
4384 new_params
= priv
->channels
.params
;
4385 new_params
.ptp_rx
= ptp_rx
;
4386 return mlx5e_safe_switch_params(priv
, &new_params
, mlx5e_ptp_rx_manage_fs_ctx
,
4387 &new_params
.ptp_rx
, true);
4390 int mlx5e_hwstamp_set(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
4392 struct hwtstamp_config config
;
4393 bool rx_cqe_compress_def
;
4397 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
) ||
4398 (mlx5_clock_get_ptp_index(priv
->mdev
) == -1))
4401 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
4404 /* TX HW timestamp */
4405 switch (config
.tx_type
) {
4406 case HWTSTAMP_TX_OFF
:
4407 case HWTSTAMP_TX_ON
:
4413 mutex_lock(&priv
->state_lock
);
4414 rx_cqe_compress_def
= priv
->channels
.params
.rx_cqe_compress_def
;
4416 /* RX HW timestamp */
4417 switch (config
.rx_filter
) {
4418 case HWTSTAMP_FILTER_NONE
:
4421 case HWTSTAMP_FILTER_ALL
:
4422 case HWTSTAMP_FILTER_SOME
:
4423 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
4424 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
4425 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
4426 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
4427 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
4428 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
4429 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
4430 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
4431 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
4432 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
4433 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
4434 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
4435 case HWTSTAMP_FILTER_NTP_ALL
:
4436 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
4437 /* ptp_rx is set if both HW TS is set and CQE
4438 * compression is set
4440 ptp_rx
= rx_cqe_compress_def
;
4447 if (!mlx5e_profile_feature_cap(priv
->profile
, PTP_RX
))
4448 err
= mlx5e_hwstamp_config_no_ptp_rx(priv
,
4449 config
.rx_filter
!= HWTSTAMP_FILTER_NONE
);
4451 err
= mlx5e_hwstamp_config_ptp_rx(priv
, ptp_rx
);
4455 memcpy(&priv
->tstamp
, &config
, sizeof(config
));
4456 mutex_unlock(&priv
->state_lock
);
4458 /* might need to fix some features */
4459 netdev_update_features(priv
->netdev
);
4461 return copy_to_user(ifr
->ifr_data
, &config
,
4462 sizeof(config
)) ? -EFAULT
: 0;
4464 mutex_unlock(&priv
->state_lock
);
4468 int mlx5e_hwstamp_get(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
4470 struct hwtstamp_config
*cfg
= &priv
->tstamp
;
4472 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
4475 return copy_to_user(ifr
->ifr_data
, cfg
, sizeof(*cfg
)) ? -EFAULT
: 0;
4478 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
4480 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4484 return mlx5e_hwstamp_set(priv
, ifr
);
4486 return mlx5e_hwstamp_get(priv
, ifr
);
4492 #ifdef CONFIG_MLX5_ESWITCH
4493 int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
4495 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4496 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4498 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
4501 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
4504 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4505 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4507 if (vlan_proto
!= htons(ETH_P_8021Q
))
4508 return -EPROTONOSUPPORT
;
4510 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
4514 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
4516 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4517 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4519 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
4522 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
4524 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4525 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4527 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
4530 int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
,
4533 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4534 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4536 return mlx5_eswitch_set_vport_rate(mdev
->priv
.eswitch
, vf
+ 1,
4537 max_tx_rate
, min_tx_rate
);
4540 static int mlx5_vport_link2ifla(u8 esw_link
)
4543 case MLX5_VPORT_ADMIN_STATE_DOWN
:
4544 return IFLA_VF_LINK_STATE_DISABLE
;
4545 case MLX5_VPORT_ADMIN_STATE_UP
:
4546 return IFLA_VF_LINK_STATE_ENABLE
;
4548 return IFLA_VF_LINK_STATE_AUTO
;
4551 static int mlx5_ifla_link2vport(u8 ifla_link
)
4553 switch (ifla_link
) {
4554 case IFLA_VF_LINK_STATE_DISABLE
:
4555 return MLX5_VPORT_ADMIN_STATE_DOWN
;
4556 case IFLA_VF_LINK_STATE_ENABLE
:
4557 return MLX5_VPORT_ADMIN_STATE_UP
;
4559 return MLX5_VPORT_ADMIN_STATE_AUTO
;
4562 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
4565 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4566 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4568 if (mlx5e_is_uplink_rep(priv
))
4571 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
4572 mlx5_ifla_link2vport(link_state
));
4575 int mlx5e_get_vf_config(struct net_device
*dev
,
4576 int vf
, struct ifla_vf_info
*ivi
)
4578 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4579 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4582 if (!netif_device_present(dev
))
4585 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
4588 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
4592 int mlx5e_get_vf_stats(struct net_device
*dev
,
4593 int vf
, struct ifla_vf_stats
*vf_stats
)
4595 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4596 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4598 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
4603 mlx5e_has_offload_stats(const struct net_device
*dev
, int attr_id
)
4605 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4607 if (!netif_device_present(dev
))
4610 if (!mlx5e_is_uplink_rep(priv
))
4613 return mlx5e_rep_has_offload_stats(dev
, attr_id
);
4617 mlx5e_get_offload_stats(int attr_id
, const struct net_device
*dev
,
4620 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4622 if (!mlx5e_is_uplink_rep(priv
))
4625 return mlx5e_rep_get_offload_stats(attr_id
, dev
, sp
);
4629 static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev
*mdev
, u8 proto_type
)
4631 switch (proto_type
) {
4633 return MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
);
4636 return (MLX5_CAP_ETH(mdev
, tunnel_stateless_ip_over_ip
) ||
4637 MLX5_CAP_ETH(mdev
, tunnel_stateless_ip_over_ip_tx
));
4643 static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev
*mdev
,
4644 struct sk_buff
*skb
)
4646 switch (skb
->inner_protocol
) {
4647 case htons(ETH_P_IP
):
4648 case htons(ETH_P_IPV6
):
4649 case htons(ETH_P_TEB
):
4651 case htons(ETH_P_MPLS_UC
):
4652 case htons(ETH_P_MPLS_MC
):
4653 return MLX5_CAP_ETH(mdev
, tunnel_stateless_mpls_over_gre
);
4658 static netdev_features_t
mlx5e_tunnel_features_check(struct mlx5e_priv
*priv
,
4659 struct sk_buff
*skb
,
4660 netdev_features_t features
)
4662 unsigned int offset
= 0;
4663 struct udphdr
*udph
;
4667 switch (vlan_get_protocol(skb
)) {
4668 case htons(ETH_P_IP
):
4669 proto
= ip_hdr(skb
)->protocol
;
4671 case htons(ETH_P_IPV6
):
4672 proto
= ipv6_find_hdr(skb
, &offset
, -1, NULL
, NULL
);
4680 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv
->mdev
, skb
))
4685 if (mlx5e_tunnel_proto_supported_tx(priv
->mdev
, IPPROTO_IPIP
))
4689 udph
= udp_hdr(skb
);
4690 port
= be16_to_cpu(udph
->dest
);
4692 /* Verify if UDP port is being offloaded by HW */
4693 if (mlx5_vxlan_lookup_port(priv
->mdev
->vxlan
, port
))
4696 #if IS_ENABLED(CONFIG_GENEVE)
4697 /* Support Geneve offload for default UDP port */
4698 if (port
== GENEVE_UDP_PORT
&& mlx5_geneve_tx_allowed(priv
->mdev
))
4702 #ifdef CONFIG_MLX5_EN_IPSEC
4704 return mlx5e_ipsec_feature_check(skb
, features
);
4709 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4710 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
4713 netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
4714 struct net_device
*netdev
,
4715 netdev_features_t features
)
4717 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4719 features
= vlan_features_check(skb
, features
);
4720 features
= vxlan_features_check(skb
, features
);
4722 /* Validate if the tunneled packet is being offloaded by HW */
4723 if (skb
->encapsulation
&&
4724 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
4725 return mlx5e_tunnel_features_check(priv
, skb
, features
);
4730 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
4732 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
4734 struct net_device
*netdev
= priv
->netdev
;
4738 mutex_lock(&priv
->state_lock
);
4740 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
4743 for (i
= 0; i
< netdev
->real_num_tx_queues
; i
++) {
4744 struct netdev_queue
*dev_queue
=
4745 netdev_get_tx_queue(netdev
, i
);
4746 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[i
];
4748 if (!netif_xmit_stopped(dev_queue
))
4751 if (mlx5e_reporter_tx_timeout(sq
))
4752 /* break if tried to reopened channels */
4757 mutex_unlock(&priv
->state_lock
);
4761 static void mlx5e_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
4763 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4765 netdev_err(dev
, "TX timeout detected\n");
4766 queue_work(priv
->wq
, &priv
->tx_timeout_work
);
4769 static int mlx5e_xdp_allowed(struct net_device
*netdev
, struct mlx5_core_dev
*mdev
,
4770 struct mlx5e_params
*params
)
4772 if (params
->packet_merge
.type
!= MLX5E_PACKET_MERGE_NONE
) {
4773 netdev_warn(netdev
, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
4777 if (!mlx5e_params_validate_xdp(netdev
, mdev
, params
))
4783 static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq
*rq
, struct bpf_prog
*prog
)
4785 struct bpf_prog
*old_prog
;
4787 old_prog
= rcu_replace_pointer(rq
->xdp_prog
, prog
,
4788 lockdep_is_held(&rq
->priv
->state_lock
));
4790 bpf_prog_put(old_prog
);
4793 static int mlx5e_xdp_set(struct net_device
*netdev
, struct bpf_prog
*prog
)
4795 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4796 struct mlx5e_params new_params
;
4797 struct bpf_prog
*old_prog
;
4802 mutex_lock(&priv
->state_lock
);
4804 new_params
= priv
->channels
.params
;
4805 new_params
.xdp_prog
= prog
;
4808 err
= mlx5e_xdp_allowed(netdev
, priv
->mdev
, &new_params
);
4813 /* no need for full reset when exchanging programs */
4814 reset
= (!priv
->channels
.params
.xdp_prog
|| !prog
);
4816 old_prog
= priv
->channels
.params
.xdp_prog
;
4818 err
= mlx5e_safe_switch_params(priv
, &new_params
, NULL
, NULL
, reset
);
4823 bpf_prog_put(old_prog
);
4825 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
) || reset
)
4828 /* exchanging programs w/o reset, we update ref counts on behalf
4829 * of the channels RQs here.
4831 bpf_prog_add(prog
, priv
->channels
.num
);
4832 for (i
= 0; i
< priv
->channels
.num
; i
++) {
4833 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
4835 mlx5e_rq_replace_xdp_prog(&c
->rq
, prog
);
4836 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
)) {
4838 mlx5e_rq_replace_xdp_prog(&c
->xskrq
, prog
);
4843 mutex_unlock(&priv
->state_lock
);
4845 /* Need to fix some features. */
4847 netdev_update_features(netdev
);
4852 static int mlx5e_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4854 switch (xdp
->command
) {
4855 case XDP_SETUP_PROG
:
4856 return mlx5e_xdp_set(dev
, xdp
->prog
);
4857 case XDP_SETUP_XSK_POOL
:
4858 return mlx5e_xsk_setup_pool(dev
, xdp
->xsk
.pool
,
4865 #ifdef CONFIG_MLX5_ESWITCH
4866 static int mlx5e_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
4867 struct net_device
*dev
, u32 filter_mask
,
4870 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4871 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4875 err
= mlx5_eswitch_get_vepa(mdev
->priv
.eswitch
, &setting
);
4878 mode
= setting
? BRIDGE_MODE_VEPA
: BRIDGE_MODE_VEB
;
4879 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
,
4881 0, 0, nlflags
, filter_mask
, NULL
);
4884 static int mlx5e_bridge_setlink(struct net_device
*dev
, struct nlmsghdr
*nlh
,
4885 u16 flags
, struct netlink_ext_ack
*extack
)
4887 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4888 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4889 struct nlattr
*attr
, *br_spec
;
4890 u16 mode
= BRIDGE_MODE_UNDEF
;
4894 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
4898 nla_for_each_nested(attr
, br_spec
, rem
) {
4899 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
4902 mode
= nla_get_u16(attr
);
4903 if (mode
> BRIDGE_MODE_VEPA
)
4909 if (mode
== BRIDGE_MODE_UNDEF
)
4912 setting
= (mode
== BRIDGE_MODE_VEPA
) ? 1 : 0;
4913 return mlx5_eswitch_set_vepa(mdev
->priv
.eswitch
, setting
);
4917 const struct net_device_ops mlx5e_netdev_ops
= {
4918 .ndo_open
= mlx5e_open
,
4919 .ndo_stop
= mlx5e_close
,
4920 .ndo_start_xmit
= mlx5e_xmit
,
4921 .ndo_setup_tc
= mlx5e_setup_tc
,
4922 .ndo_select_queue
= mlx5e_select_queue
,
4923 .ndo_get_stats64
= mlx5e_get_stats
,
4924 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
4925 .ndo_set_mac_address
= mlx5e_set_mac
,
4926 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
4927 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
4928 .ndo_set_features
= mlx5e_set_features
,
4929 .ndo_fix_features
= mlx5e_fix_features
,
4930 .ndo_change_mtu
= mlx5e_change_nic_mtu
,
4931 .ndo_eth_ioctl
= mlx5e_ioctl
,
4932 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
4933 .ndo_features_check
= mlx5e_features_check
,
4934 .ndo_tx_timeout
= mlx5e_tx_timeout
,
4935 .ndo_bpf
= mlx5e_xdp
,
4936 .ndo_xdp_xmit
= mlx5e_xdp_xmit
,
4937 .ndo_xsk_wakeup
= mlx5e_xsk_wakeup
,
4938 #ifdef CONFIG_MLX5_EN_ARFS
4939 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
4941 #ifdef CONFIG_MLX5_ESWITCH
4942 .ndo_bridge_setlink
= mlx5e_bridge_setlink
,
4943 .ndo_bridge_getlink
= mlx5e_bridge_getlink
,
4945 /* SRIOV E-Switch NDOs */
4946 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
4947 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
4948 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
4949 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
4950 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
4951 .ndo_get_vf_config
= mlx5e_get_vf_config
,
4952 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
4953 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
4954 .ndo_has_offload_stats
= mlx5e_has_offload_stats
,
4955 .ndo_get_offload_stats
= mlx5e_get_offload_stats
,
4959 static u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
)
4963 /* The supported periods are organized in ascending order */
4964 for (i
= 0; i
< MLX5E_LRO_TIMEOUT_ARR_SIZE
- 1; i
++)
4965 if (MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]) >= wanted_timeout
)
4968 return MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]);
4971 void mlx5e_build_nic_params(struct mlx5e_priv
*priv
, struct mlx5e_xsk
*xsk
, u16 mtu
)
4973 struct mlx5e_params
*params
= &priv
->channels
.params
;
4974 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4975 u8 rx_cq_period_mode
;
4977 params
->sw_mtu
= mtu
;
4978 params
->hard_mtu
= MLX5E_ETH_HARD_MTU
;
4979 params
->num_channels
= min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS
/ 2,
4981 mlx5e_params_mqprio_reset(params
);
4984 params
->log_sq_size
= is_kdump_kernel() ?
4985 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
:
4986 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
4987 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_SKB_TX_MPWQE
, mlx5e_tx_mpwqe_supported(mdev
));
4990 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_XDP_TX_MPWQE
, mlx5e_tx_mpwqe_supported(mdev
));
4992 /* set CQE compression */
4993 params
->rx_cqe_compress_def
= false;
4994 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
4995 MLX5_CAP_GEN(mdev
, vport_group_manager
))
4996 params
->rx_cqe_compress_def
= slow_pci_heuristic(mdev
);
4998 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
, params
->rx_cqe_compress_def
);
4999 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
, false);
5002 mlx5e_build_rq_params(mdev
, params
);
5004 params
->terminate_lkey_be
= mlx5_core_get_terminate_scatter_list_mkey(mdev
);
5006 params
->packet_merge
.timeout
= mlx5e_choose_lro_timeout(mdev
, MLX5E_DEFAULT_LRO_TIMEOUT
);
5008 /* CQ moderation params */
5009 rx_cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
5010 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
5011 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
5012 params
->rx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
5013 params
->tx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
5014 mlx5e_set_rx_cq_mode_params(params
, rx_cq_period_mode
);
5015 mlx5e_set_tx_cq_mode_params(params
, MLX5_CQ_PERIOD_MODE_START_FROM_EQE
);
5018 mlx5_query_min_inline(mdev
, ¶ms
->tx_min_inline_mode
);
5023 /* Do not update netdev->features directly in here
5024 * on mlx5e_attach_netdev() we will call mlx5e_update_features()
5025 * To update netdev->features please modify mlx5e_fix_features()
5029 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
5031 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5034 mlx5_query_mac_address(priv
->mdev
, addr
);
5035 if (is_zero_ether_addr(addr
) &&
5036 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
5037 eth_hw_addr_random(netdev
);
5038 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
5042 eth_hw_addr_set(netdev
, addr
);
5045 static int mlx5e_vxlan_set_port(struct net_device
*netdev
, unsigned int table
,
5046 unsigned int entry
, struct udp_tunnel_info
*ti
)
5048 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5050 return mlx5_vxlan_add_port(priv
->mdev
->vxlan
, ntohs(ti
->port
));
5053 static int mlx5e_vxlan_unset_port(struct net_device
*netdev
, unsigned int table
,
5054 unsigned int entry
, struct udp_tunnel_info
*ti
)
5056 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5058 return mlx5_vxlan_del_port(priv
->mdev
->vxlan
, ntohs(ti
->port
));
5061 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv
*priv
)
5063 if (!mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
5066 priv
->nic_info
.set_port
= mlx5e_vxlan_set_port
;
5067 priv
->nic_info
.unset_port
= mlx5e_vxlan_unset_port
;
5068 priv
->nic_info
.flags
= UDP_TUNNEL_NIC_INFO_MAY_SLEEP
|
5069 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN
;
5070 priv
->nic_info
.tables
[0].tunnel_types
= UDP_TUNNEL_TYPE_VXLAN
;
5071 /* Don't count the space hard-coded to the IANA port */
5072 priv
->nic_info
.tables
[0].n_entries
=
5073 mlx5_vxlan_max_udp_ports(priv
->mdev
) - 1;
5075 priv
->netdev
->udp_tunnel_nic_info
= &priv
->nic_info
;
5078 static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev
*mdev
)
5082 for (tt
= 0; tt
< MLX5_NUM_TUNNEL_TT
; tt
++) {
5083 if (mlx5e_tunnel_proto_supported_tx(mdev
, mlx5_get_proto_by_tunnel_type(tt
)))
5086 return (mlx5_vxlan_allowed(mdev
->vxlan
) || mlx5_geneve_tx_allowed(mdev
));
5089 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
5091 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5092 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5096 SET_NETDEV_DEV(netdev
, mdev
->device
);
5098 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
5099 netdev
->xdp_metadata_ops
= &mlx5e_xdp_metadata_ops
;
5101 mlx5e_dcbnl_build_netdev(netdev
);
5103 netdev
->watchdog_timeo
= 15 * HZ
;
5105 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
5107 netdev
->vlan_features
|= NETIF_F_SG
;
5108 netdev
->vlan_features
|= NETIF_F_HW_CSUM
;
5109 netdev
->vlan_features
|= NETIF_F_HW_MACSEC
;
5110 netdev
->vlan_features
|= NETIF_F_GRO
;
5111 netdev
->vlan_features
|= NETIF_F_TSO
;
5112 netdev
->vlan_features
|= NETIF_F_TSO6
;
5113 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
5114 netdev
->vlan_features
|= NETIF_F_RXHASH
;
5115 netdev
->vlan_features
|= NETIF_F_GSO_PARTIAL
;
5117 netdev
->mpls_features
|= NETIF_F_SG
;
5118 netdev
->mpls_features
|= NETIF_F_HW_CSUM
;
5119 netdev
->mpls_features
|= NETIF_F_TSO
;
5120 netdev
->mpls_features
|= NETIF_F_TSO6
;
5122 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
5123 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
5125 /* Tunneled LRO is not supported in the driver, and the same RQs are
5126 * shared between inner and outer TIRs, so the driver can't disable LRO
5127 * for inner TIRs while having it enabled for outer TIRs. Due to this,
5128 * block LRO altogether if the firmware declares tunneled LRO support.
5130 if (!!MLX5_CAP_ETH(mdev
, lro_cap
) &&
5131 !MLX5_CAP_ETH(mdev
, tunnel_lro_vxlan
) &&
5132 !MLX5_CAP_ETH(mdev
, tunnel_lro_gre
) &&
5133 mlx5e_check_fragmented_striding_rq_cap(mdev
, PAGE_SHIFT
,
5134 MLX5E_MPWRQ_UMR_MODE_ALIGNED
))
5135 netdev
->vlan_features
|= NETIF_F_LRO
;
5137 netdev
->hw_features
= netdev
->vlan_features
;
5138 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
5139 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
5140 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
5141 netdev
->hw_features
|= NETIF_F_HW_VLAN_STAG_TX
;
5143 if (mlx5e_tunnel_any_tx_proto_supported(mdev
)) {
5144 netdev
->hw_enc_features
|= NETIF_F_HW_CSUM
;
5145 netdev
->hw_enc_features
|= NETIF_F_TSO
;
5146 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
5147 netdev
->hw_enc_features
|= NETIF_F_GSO_PARTIAL
;
5150 if (mlx5_vxlan_allowed(mdev
->vxlan
) || mlx5_geneve_tx_allowed(mdev
)) {
5151 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
5152 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
5153 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
|
5154 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
5155 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
5156 netdev
->vlan_features
|= NETIF_F_GSO_UDP_TUNNEL
|
5157 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
5160 if (mlx5e_tunnel_proto_supported_tx(mdev
, IPPROTO_GRE
)) {
5161 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
5162 NETIF_F_GSO_GRE_CSUM
;
5163 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE
|
5164 NETIF_F_GSO_GRE_CSUM
;
5165 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE
|
5166 NETIF_F_GSO_GRE_CSUM
;
5169 if (mlx5e_tunnel_proto_supported_tx(mdev
, IPPROTO_IPIP
)) {
5170 netdev
->hw_features
|= NETIF_F_GSO_IPXIP4
|
5172 netdev
->hw_enc_features
|= NETIF_F_GSO_IPXIP4
|
5174 netdev
->gso_partial_features
|= NETIF_F_GSO_IPXIP4
|
5178 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_L4
;
5179 netdev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
5180 netdev
->features
|= NETIF_F_GSO_UDP_L4
;
5182 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
5185 netdev
->hw_features
|= NETIF_F_RXALL
;
5187 if (MLX5_CAP_ETH(mdev
, scatter_fcs
))
5188 netdev
->hw_features
|= NETIF_F_RXFCS
;
5190 if (mlx5_qos_is_supported(mdev
))
5191 netdev
->hw_features
|= NETIF_F_HW_TC
;
5193 netdev
->features
= netdev
->hw_features
;
5197 netdev
->features
&= ~NETIF_F_RXALL
;
5198 netdev
->features
&= ~NETIF_F_LRO
;
5199 netdev
->features
&= ~NETIF_F_GRO_HW
;
5200 netdev
->features
&= ~NETIF_F_RXFCS
;
5202 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
5203 if (FT_CAP(flow_modify_en
) &&
5204 FT_CAP(modify_root
) &&
5205 FT_CAP(identified_miss_table_mode
) &&
5206 FT_CAP(flow_table_modify
)) {
5207 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
5208 netdev
->hw_features
|= NETIF_F_HW_TC
;
5210 #ifdef CONFIG_MLX5_EN_ARFS
5211 netdev
->hw_features
|= NETIF_F_NTUPLE
;
5215 netdev
->features
|= NETIF_F_HIGHDMA
;
5216 netdev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
5218 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5220 netif_set_tso_max_size(netdev
, GSO_MAX_SIZE
);
5221 mlx5e_set_xdp_feature(netdev
);
5222 mlx5e_set_netdev_dev_addr(netdev
);
5223 mlx5e_macsec_build_netdev(priv
);
5224 mlx5e_ipsec_build_netdev(priv
);
5225 mlx5e_ktls_build_netdev(priv
);
5228 void mlx5e_create_q_counters(struct mlx5e_priv
*priv
)
5230 u32 out
[MLX5_ST_SZ_DW(alloc_q_counter_out
)] = {};
5231 u32 in
[MLX5_ST_SZ_DW(alloc_q_counter_in
)] = {};
5232 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5235 MLX5_SET(alloc_q_counter_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_Q_COUNTER
);
5236 err
= mlx5_cmd_exec_inout(mdev
, alloc_q_counter
, in
, out
);
5239 MLX5_GET(alloc_q_counter_out
, out
, counter_set_id
);
5241 err
= mlx5_cmd_exec_inout(mdev
, alloc_q_counter
, in
, out
);
5243 priv
->drop_rq_q_counter
=
5244 MLX5_GET(alloc_q_counter_out
, out
, counter_set_id
);
5247 void mlx5e_destroy_q_counters(struct mlx5e_priv
*priv
)
5249 u32 in
[MLX5_ST_SZ_DW(dealloc_q_counter_in
)] = {};
5251 MLX5_SET(dealloc_q_counter_in
, in
, opcode
,
5252 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
5253 if (priv
->q_counter
) {
5254 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
,
5256 mlx5_cmd_exec_in(priv
->mdev
, dealloc_q_counter
, in
);
5259 if (priv
->drop_rq_q_counter
) {
5260 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
,
5261 priv
->drop_rq_q_counter
);
5262 mlx5_cmd_exec_in(priv
->mdev
, dealloc_q_counter
, in
);
5266 static int mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
5267 struct net_device
*netdev
)
5269 const bool take_rtnl
= netdev
->reg_state
== NETREG_REGISTERED
;
5270 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5271 struct mlx5e_flow_steering
*fs
;
5274 mlx5e_build_nic_params(priv
, &priv
->xsk
, netdev
->mtu
);
5275 mlx5e_vxlan_set_netdev_info(priv
);
5277 mlx5e_timestamp_init(priv
);
5279 priv
->dfs_root
= debugfs_create_dir("nic",
5280 mlx5_debugfs_get_dev_root(mdev
));
5282 fs
= mlx5e_fs_init(priv
->profile
, mdev
,
5283 !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
),
5287 mlx5_core_err(mdev
, "FS initialization failed, %d\n", err
);
5288 debugfs_remove_recursive(priv
->dfs_root
);
5293 err
= mlx5e_ktls_init(priv
);
5295 mlx5_core_err(mdev
, "TLS initialization failed, %d\n", err
);
5297 mlx5e_health_create_reporters(priv
);
5299 /* If netdev is already registered (e.g. move from uplink to nic profile),
5300 * RTNL lock must be held before triggering netdev notifiers.
5305 /* update XDP supported features */
5306 mlx5e_set_xdp_feature(netdev
);
5314 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
5316 mlx5e_health_destroy_reporters(priv
);
5317 mlx5e_ktls_cleanup(priv
);
5318 mlx5e_fs_cleanup(priv
->fs
);
5319 debugfs_remove_recursive(priv
->dfs_root
);
5323 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
5325 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5326 enum mlx5e_rx_res_features features
;
5329 priv
->rx_res
= mlx5e_rx_res_alloc();
5333 mlx5e_create_q_counters(priv
);
5335 err
= mlx5e_open_drop_rq(priv
, &priv
->drop_rq
);
5337 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
5338 goto err_destroy_q_counters
;
5341 features
= MLX5E_RX_RES_FEATURE_PTP
;
5342 if (mlx5_tunnel_inner_ft_supported(mdev
))
5343 features
|= MLX5E_RX_RES_FEATURE_INNER_FT
;
5344 err
= mlx5e_rx_res_init(priv
->rx_res
, priv
->mdev
, features
,
5345 priv
->max_nch
, priv
->drop_rq
.rqn
,
5346 &priv
->channels
.params
.packet_merge
,
5347 priv
->channels
.params
.num_channels
);
5349 goto err_close_drop_rq
;
5351 err
= mlx5e_create_flow_steering(priv
->fs
, priv
->rx_res
, priv
->profile
,
5354 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
5355 goto err_destroy_rx_res
;
5358 err
= mlx5e_tc_nic_init(priv
);
5360 goto err_destroy_flow_steering
;
5362 err
= mlx5e_accel_init_rx(priv
);
5364 goto err_tc_nic_cleanup
;
5366 #ifdef CONFIG_MLX5_EN_ARFS
5367 priv
->netdev
->rx_cpu_rmap
= mlx5_eq_table_get_rmap(priv
->mdev
);
5373 mlx5e_tc_nic_cleanup(priv
);
5374 err_destroy_flow_steering
:
5375 mlx5e_destroy_flow_steering(priv
->fs
, !!(priv
->netdev
->hw_features
& NETIF_F_NTUPLE
),
5378 mlx5e_rx_res_destroy(priv
->rx_res
);
5380 mlx5e_close_drop_rq(&priv
->drop_rq
);
5381 err_destroy_q_counters
:
5382 mlx5e_destroy_q_counters(priv
);
5383 mlx5e_rx_res_free(priv
->rx_res
);
5384 priv
->rx_res
= NULL
;
5388 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
5390 mlx5e_accel_cleanup_rx(priv
);
5391 mlx5e_tc_nic_cleanup(priv
);
5392 mlx5e_destroy_flow_steering(priv
->fs
, !!(priv
->netdev
->hw_features
& NETIF_F_NTUPLE
),
5394 mlx5e_rx_res_destroy(priv
->rx_res
);
5395 mlx5e_close_drop_rq(&priv
->drop_rq
);
5396 mlx5e_destroy_q_counters(priv
);
5397 mlx5e_rx_res_free(priv
->rx_res
);
5398 priv
->rx_res
= NULL
;
5401 static void mlx5e_set_mqprio_rl(struct mlx5e_priv
*priv
)
5403 struct mlx5e_params
*params
;
5404 struct mlx5e_mqprio_rl
*rl
;
5406 params
= &priv
->channels
.params
;
5407 if (params
->mqprio
.mode
!= TC_MQPRIO_MODE_CHANNEL
)
5410 rl
= mlx5e_mqprio_rl_create(priv
->mdev
, params
->mqprio
.num_tc
,
5411 params
->mqprio
.channel
.max_rate
);
5414 priv
->mqprio_rl
= rl
;
5415 mlx5e_mqprio_rl_update_params(params
, rl
);
5418 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
5422 err
= mlx5e_create_tises(priv
);
5424 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
5428 err
= mlx5e_accel_init_tx(priv
);
5430 goto err_destroy_tises
;
5432 mlx5e_set_mqprio_rl(priv
);
5433 mlx5e_dcbnl_initialize(priv
);
5437 mlx5e_destroy_tises(priv
);
5441 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
5443 struct net_device
*netdev
= priv
->netdev
;
5444 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5447 mlx5e_fs_init_l2_addr(priv
->fs
, netdev
);
5448 mlx5e_ipsec_init(priv
);
5450 err
= mlx5e_macsec_init(priv
);
5452 mlx5_core_err(mdev
, "MACsec initialization failed, %d\n", err
);
5454 /* Marking the link as currently not needed by the Driver */
5455 if (!netif_running(netdev
))
5456 mlx5e_modify_admin_state(mdev
, MLX5_PORT_DOWN
);
5458 mlx5e_set_netdev_mtu_boundaries(priv
);
5459 mlx5e_set_dev_port_mtu(priv
);
5461 mlx5_lag_add_netdev(mdev
, netdev
);
5463 mlx5e_enable_async_events(priv
);
5464 mlx5e_enable_blocking_events(priv
);
5465 if (mlx5e_monitor_counter_supported(priv
))
5466 mlx5e_monitor_counter_init(priv
);
5468 mlx5e_hv_vhca_stats_create(priv
);
5469 if (netdev
->reg_state
!= NETREG_REGISTERED
)
5471 mlx5e_dcbnl_init_app(priv
);
5473 mlx5e_nic_set_rx_mode(priv
);
5476 if (netif_running(netdev
))
5478 udp_tunnel_nic_reset_ntf(priv
->netdev
);
5479 netif_device_attach(netdev
);
5483 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
5485 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5487 if (priv
->netdev
->reg_state
== NETREG_REGISTERED
)
5488 mlx5e_dcbnl_delete_app(priv
);
5491 if (netif_running(priv
->netdev
))
5492 mlx5e_close(priv
->netdev
);
5493 netif_device_detach(priv
->netdev
);
5496 mlx5e_nic_set_rx_mode(priv
);
5498 mlx5e_hv_vhca_stats_destroy(priv
);
5499 if (mlx5e_monitor_counter_supported(priv
))
5500 mlx5e_monitor_counter_cleanup(priv
);
5502 mlx5e_disable_blocking_events(priv
);
5503 if (priv
->en_trap
) {
5504 mlx5e_deactivate_trap(priv
);
5505 mlx5e_close_trap(priv
->en_trap
);
5506 priv
->en_trap
= NULL
;
5508 mlx5e_disable_async_events(priv
);
5509 mlx5_lag_remove_netdev(mdev
, priv
->netdev
);
5510 mlx5_vxlan_reset_to_default(mdev
->vxlan
);
5511 mlx5e_macsec_cleanup(priv
);
5512 mlx5e_ipsec_cleanup(priv
);
5515 int mlx5e_update_nic_rx(struct mlx5e_priv
*priv
)
5517 return mlx5e_refresh_tirs(priv
, false, false);
5520 static const struct mlx5e_profile mlx5e_nic_profile
= {
5521 .init
= mlx5e_nic_init
,
5522 .cleanup
= mlx5e_nic_cleanup
,
5523 .init_rx
= mlx5e_init_nic_rx
,
5524 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
5525 .init_tx
= mlx5e_init_nic_tx
,
5526 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
5527 .enable
= mlx5e_nic_enable
,
5528 .disable
= mlx5e_nic_disable
,
5529 .update_rx
= mlx5e_update_nic_rx
,
5530 .update_stats
= mlx5e_stats_update_ndo_stats
,
5531 .update_carrier
= mlx5e_update_carrier
,
5532 .rx_handlers
= &mlx5e_rx_handlers_nic
,
5533 .max_tc
= MLX5E_MAX_NUM_TC
,
5534 .stats_grps
= mlx5e_nic_stats_grps
,
5535 .stats_grps_num
= mlx5e_nic_stats_grps_num
,
5536 .features
= BIT(MLX5E_PROFILE_FEATURE_PTP_RX
) |
5537 BIT(MLX5E_PROFILE_FEATURE_PTP_TX
) |
5538 BIT(MLX5E_PROFILE_FEATURE_QOS_HTB
) |
5539 BIT(MLX5E_PROFILE_FEATURE_FS_VLAN
) |
5540 BIT(MLX5E_PROFILE_FEATURE_FS_TC
),
5543 static int mlx5e_profile_max_num_channels(struct mlx5_core_dev
*mdev
,
5544 const struct mlx5e_profile
*profile
)
5548 nch
= mlx5e_get_max_num_channels(mdev
);
5550 if (profile
->max_nch_limit
)
5551 nch
= min_t(int, nch
, profile
->max_nch_limit(mdev
));
5556 mlx5e_calc_max_nch(struct mlx5_core_dev
*mdev
, struct net_device
*netdev
,
5557 const struct mlx5e_profile
*profile
)
5560 unsigned int max_nch
, tmp
;
5562 /* core resources */
5563 max_nch
= mlx5e_profile_max_num_channels(mdev
, profile
);
5565 /* netdev rx queues */
5566 max_nch
= min_t(unsigned int, max_nch
, netdev
->num_rx_queues
);
5568 /* netdev tx queues */
5569 tmp
= netdev
->num_tx_queues
;
5570 if (mlx5_qos_is_supported(mdev
))
5571 tmp
-= mlx5e_qos_max_leaf_nodes(mdev
);
5572 if (MLX5_CAP_GEN(mdev
, ts_cqe_to_dest_cqn
))
5573 tmp
-= profile
->max_tc
;
5574 tmp
= tmp
/ profile
->max_tc
;
5575 max_nch
= min_t(unsigned int, max_nch
, tmp
);
5580 int mlx5e_get_pf_num_tirs(struct mlx5_core_dev
*mdev
)
5582 /* Indirect TIRS: 2 sets of TTCs (inner + outer steering)
5583 * and 1 set of direct TIRS
5585 return 2 * MLX5E_NUM_INDIR_TIRS
5586 + mlx5e_profile_max_num_channels(mdev
, &mlx5e_nic_profile
);
5589 void mlx5e_set_rx_mode_work(struct work_struct
*work
)
5591 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
5594 return mlx5e_fs_set_rx_mode_work(priv
->fs
, priv
->netdev
);
5597 /* mlx5e generic netdev management API (move to en_common.c) */
5598 int mlx5e_priv_init(struct mlx5e_priv
*priv
,
5599 const struct mlx5e_profile
*profile
,
5600 struct net_device
*netdev
,
5601 struct mlx5_core_dev
*mdev
)
5603 int nch
, num_txqs
, node
;
5606 num_txqs
= netdev
->num_tx_queues
;
5607 nch
= mlx5e_calc_max_nch(mdev
, netdev
, profile
);
5608 node
= dev_to_node(mlx5_core_dma_dev(mdev
));
5612 priv
->netdev
= netdev
;
5613 priv
->max_nch
= nch
;
5614 priv
->max_opened_tc
= 1;
5616 if (!alloc_cpumask_var(&priv
->scratchpad
.cpumask
, GFP_KERNEL
))
5619 mutex_init(&priv
->state_lock
);
5621 err
= mlx5e_selq_init(&priv
->selq
, &priv
->state_lock
);
5623 goto err_free_cpumask
;
5625 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
5626 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
5627 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
5628 INIT_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
5630 priv
->wq
= create_singlethread_workqueue("mlx5e");
5634 priv
->txq2sq
= kcalloc_node(num_txqs
, sizeof(*priv
->txq2sq
), GFP_KERNEL
, node
);
5636 goto err_destroy_workqueue
;
5638 priv
->tx_rates
= kcalloc_node(num_txqs
, sizeof(*priv
->tx_rates
), GFP_KERNEL
, node
);
5639 if (!priv
->tx_rates
)
5640 goto err_free_txq2sq
;
5642 priv
->channel_stats
=
5643 kcalloc_node(nch
, sizeof(*priv
->channel_stats
), GFP_KERNEL
, node
);
5644 if (!priv
->channel_stats
)
5645 goto err_free_tx_rates
;
5650 kfree(priv
->tx_rates
);
5652 kfree(priv
->txq2sq
);
5653 err_destroy_workqueue
:
5654 destroy_workqueue(priv
->wq
);
5656 mlx5e_selq_cleanup(&priv
->selq
);
5658 free_cpumask_var(priv
->scratchpad
.cpumask
);
5662 void mlx5e_priv_cleanup(struct mlx5e_priv
*priv
)
5666 /* bail if change profile failed and also rollback failed */
5670 for (i
= 0; i
< priv
->stats_nch
; i
++)
5671 kvfree(priv
->channel_stats
[i
]);
5672 kfree(priv
->channel_stats
);
5673 kfree(priv
->tx_rates
);
5674 kfree(priv
->txq2sq
);
5675 destroy_workqueue(priv
->wq
);
5676 mutex_lock(&priv
->state_lock
);
5677 mlx5e_selq_cleanup(&priv
->selq
);
5678 mutex_unlock(&priv
->state_lock
);
5679 free_cpumask_var(priv
->scratchpad
.cpumask
);
5681 for (i
= 0; i
< priv
->htb_max_qos_sqs
; i
++)
5682 kfree(priv
->htb_qos_sq_stats
[i
]);
5683 kvfree(priv
->htb_qos_sq_stats
);
5685 memset(priv
, 0, sizeof(*priv
));
5688 static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev
*mdev
,
5689 const struct mlx5e_profile
*profile
)
5691 unsigned int nch
, ptp_txqs
, qos_txqs
;
5693 nch
= mlx5e_profile_max_num_channels(mdev
, profile
);
5695 ptp_txqs
= MLX5_CAP_GEN(mdev
, ts_cqe_to_dest_cqn
) &&
5696 mlx5e_profile_feature_cap(profile
, PTP_TX
) ?
5697 profile
->max_tc
: 0;
5699 qos_txqs
= mlx5_qos_is_supported(mdev
) &&
5700 mlx5e_profile_feature_cap(profile
, QOS_HTB
) ?
5701 mlx5e_qos_max_leaf_nodes(mdev
) : 0;
5703 return nch
* profile
->max_tc
+ ptp_txqs
+ qos_txqs
;
5706 static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev
*mdev
,
5707 const struct mlx5e_profile
*profile
)
5709 return mlx5e_profile_max_num_channels(mdev
, profile
);
5713 mlx5e_create_netdev(struct mlx5_core_dev
*mdev
, const struct mlx5e_profile
*profile
)
5715 struct net_device
*netdev
;
5716 unsigned int txqs
, rxqs
;
5719 txqs
= mlx5e_get_max_num_txqs(mdev
, profile
);
5720 rxqs
= mlx5e_get_max_num_rxqs(mdev
, profile
);
5722 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
), txqs
, rxqs
);
5724 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
5728 err
= mlx5e_priv_init(netdev_priv(netdev
), profile
, netdev
, mdev
);
5730 mlx5_core_err(mdev
, "mlx5e_priv_init failed, err=%d\n", err
);
5731 goto err_free_netdev
;
5734 netif_carrier_off(netdev
);
5735 netif_tx_disable(netdev
);
5736 dev_net_set(netdev
, mlx5_core_net(mdev
));
5741 free_netdev(netdev
);
5746 static void mlx5e_update_features(struct net_device
*netdev
)
5748 if (netdev
->reg_state
!= NETREG_REGISTERED
)
5749 return; /* features will be updated on netdev registration */
5752 netdev_update_features(netdev
);
5756 static void mlx5e_reset_channels(struct net_device
*netdev
)
5758 netdev_reset_tc(netdev
);
5761 int mlx5e_attach_netdev(struct mlx5e_priv
*priv
)
5763 const bool take_rtnl
= priv
->netdev
->reg_state
== NETREG_REGISTERED
;
5764 const struct mlx5e_profile
*profile
= priv
->profile
;
5768 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5770 mlx5e_fs_set_state_destroy(priv
->fs
,
5771 !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
));
5773 /* Validate the max_wqe_size_sq capability. */
5774 if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv
->mdev
) < MLX5E_MAX_TX_WQEBBS
)) {
5775 mlx5_core_warn(priv
->mdev
, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %u\n",
5776 mlx5e_get_max_sq_wqebbs(priv
->mdev
), (unsigned int)MLX5E_MAX_TX_WQEBBS
);
5780 /* max number of channels may have changed */
5781 max_nch
= mlx5e_calc_max_nch(priv
->mdev
, priv
->netdev
, profile
);
5782 if (priv
->channels
.params
.num_channels
> max_nch
) {
5783 mlx5_core_warn(priv
->mdev
, "MLX5E: Reducing number of channels to %d\n", max_nch
);
5784 /* Reducing the number of channels - RXFH has to be reset, and
5785 * mlx5e_num_channels_changed below will build the RQT.
5787 priv
->netdev
->priv_flags
&= ~IFF_RXFH_CONFIGURED
;
5788 priv
->channels
.params
.num_channels
= max_nch
;
5789 if (priv
->channels
.params
.mqprio
.mode
== TC_MQPRIO_MODE_CHANNEL
) {
5790 mlx5_core_warn(priv
->mdev
, "MLX5E: Disabling MQPRIO channel mode\n");
5791 mlx5e_params_mqprio_reset(&priv
->channels
.params
);
5794 if (max_nch
!= priv
->max_nch
) {
5795 mlx5_core_warn(priv
->mdev
,
5796 "MLX5E: Updating max number of channels from %u to %u\n",
5797 priv
->max_nch
, max_nch
);
5798 priv
->max_nch
= max_nch
;
5801 /* 1. Set the real number of queues in the kernel the first time.
5802 * 2. Set our default XPS cpumask.
5805 * rtnl_lock is required by netif_set_real_num_*_queues in case the
5806 * netdev has been registered by this point (if this function was called
5807 * in the reload or resume flow).
5811 err
= mlx5e_num_channels_changed(priv
);
5817 err
= profile
->init_tx(priv
);
5821 err
= profile
->init_rx(priv
);
5823 goto err_cleanup_tx
;
5825 if (profile
->enable
)
5826 profile
->enable(priv
);
5828 mlx5e_update_features(priv
->netdev
);
5833 profile
->cleanup_tx(priv
);
5836 mlx5e_reset_channels(priv
->netdev
);
5837 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5839 mlx5e_fs_set_state_destroy(priv
->fs
,
5840 !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
));
5841 cancel_work_sync(&priv
->update_stats_work
);
5845 void mlx5e_detach_netdev(struct mlx5e_priv
*priv
)
5847 const struct mlx5e_profile
*profile
= priv
->profile
;
5849 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5851 mlx5e_fs_set_state_destroy(priv
->fs
,
5852 !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
));
5854 if (profile
->disable
)
5855 profile
->disable(priv
);
5856 flush_workqueue(priv
->wq
);
5858 profile
->cleanup_rx(priv
);
5859 profile
->cleanup_tx(priv
);
5860 mlx5e_reset_channels(priv
->netdev
);
5861 cancel_work_sync(&priv
->update_stats_work
);
5865 mlx5e_netdev_init_profile(struct net_device
*netdev
, struct mlx5_core_dev
*mdev
,
5866 const struct mlx5e_profile
*new_profile
, void *new_ppriv
)
5868 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5871 err
= mlx5e_priv_init(priv
, new_profile
, netdev
, mdev
);
5873 mlx5_core_err(mdev
, "mlx5e_priv_init failed, err=%d\n", err
);
5876 netif_carrier_off(netdev
);
5877 priv
->profile
= new_profile
;
5878 priv
->ppriv
= new_ppriv
;
5879 err
= new_profile
->init(priv
->mdev
, priv
->netdev
);
5886 mlx5e_priv_cleanup(priv
);
5891 mlx5e_netdev_attach_profile(struct net_device
*netdev
, struct mlx5_core_dev
*mdev
,
5892 const struct mlx5e_profile
*new_profile
, void *new_ppriv
)
5894 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5897 err
= mlx5e_netdev_init_profile(netdev
, mdev
, new_profile
, new_ppriv
);
5901 err
= mlx5e_attach_netdev(priv
);
5903 goto profile_cleanup
;
5907 new_profile
->cleanup(priv
);
5908 mlx5e_priv_cleanup(priv
);
5912 int mlx5e_netdev_change_profile(struct mlx5e_priv
*priv
,
5913 const struct mlx5e_profile
*new_profile
, void *new_ppriv
)
5915 const struct mlx5e_profile
*orig_profile
= priv
->profile
;
5916 struct net_device
*netdev
= priv
->netdev
;
5917 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5918 void *orig_ppriv
= priv
->ppriv
;
5919 int err
, rollback_err
;
5921 /* cleanup old profile */
5922 mlx5e_detach_netdev(priv
);
5923 priv
->profile
->cleanup(priv
);
5924 mlx5e_priv_cleanup(priv
);
5926 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
5927 mlx5e_netdev_init_profile(netdev
, mdev
, new_profile
, new_ppriv
);
5928 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5932 err
= mlx5e_netdev_attach_profile(netdev
, mdev
, new_profile
, new_ppriv
);
5933 if (err
) { /* roll back to original profile */
5934 netdev_warn(netdev
, "%s: new profile init failed, %d\n", __func__
, err
);
5941 rollback_err
= mlx5e_netdev_attach_profile(netdev
, mdev
, orig_profile
, orig_ppriv
);
5943 netdev_err(netdev
, "%s: failed to rollback to orig profile, %d\n",
5944 __func__
, rollback_err
);
5948 void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv
*priv
)
5950 mlx5e_netdev_change_profile(priv
, &mlx5e_nic_profile
, NULL
);
5953 void mlx5e_destroy_netdev(struct mlx5e_priv
*priv
)
5955 struct net_device
*netdev
= priv
->netdev
;
5957 mlx5e_priv_cleanup(priv
);
5958 free_netdev(netdev
);
5961 static int mlx5e_resume(struct auxiliary_device
*adev
)
5963 struct mlx5_adev
*edev
= container_of(adev
, struct mlx5_adev
, adev
);
5964 struct mlx5e_dev
*mlx5e_dev
= auxiliary_get_drvdata(adev
);
5965 struct mlx5e_priv
*priv
= mlx5e_dev
->priv
;
5966 struct net_device
*netdev
= priv
->netdev
;
5967 struct mlx5_core_dev
*mdev
= edev
->mdev
;
5970 if (netif_device_present(netdev
))
5973 err
= mlx5e_create_mdev_resources(mdev
);
5977 err
= mlx5e_attach_netdev(priv
);
5979 mlx5e_destroy_mdev_resources(mdev
);
5986 static int mlx5e_suspend(struct auxiliary_device
*adev
, pm_message_t state
)
5988 struct mlx5e_dev
*mlx5e_dev
= auxiliary_get_drvdata(adev
);
5989 struct mlx5e_priv
*priv
= mlx5e_dev
->priv
;
5990 struct net_device
*netdev
= priv
->netdev
;
5991 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5993 if (!netif_device_present(netdev
)) {
5994 if (test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
))
5995 mlx5e_destroy_mdev_resources(mdev
);
5999 mlx5e_detach_netdev(priv
);
6000 mlx5e_destroy_mdev_resources(mdev
);
6004 static int mlx5e_probe(struct auxiliary_device
*adev
,
6005 const struct auxiliary_device_id
*id
)
6007 struct mlx5_adev
*edev
= container_of(adev
, struct mlx5_adev
, adev
);
6008 const struct mlx5e_profile
*profile
= &mlx5e_nic_profile
;
6009 struct mlx5_core_dev
*mdev
= edev
->mdev
;
6010 struct mlx5e_dev
*mlx5e_dev
;
6011 struct net_device
*netdev
;
6012 pm_message_t state
= {};
6013 struct mlx5e_priv
*priv
;
6016 mlx5e_dev
= mlx5e_create_devlink(&adev
->dev
, mdev
);
6017 if (IS_ERR(mlx5e_dev
))
6018 return PTR_ERR(mlx5e_dev
);
6019 auxiliary_set_drvdata(adev
, mlx5e_dev
);
6021 err
= mlx5e_devlink_port_register(mlx5e_dev
, mdev
);
6023 mlx5_core_err(mdev
, "mlx5e_devlink_port_register failed, %d\n", err
);
6024 goto err_devlink_unregister
;
6027 netdev
= mlx5e_create_netdev(mdev
, profile
);
6029 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
6031 goto err_devlink_port_unregister
;
6033 SET_NETDEV_DEVLINK_PORT(netdev
, &mlx5e_dev
->dl_port
);
6035 mlx5e_build_nic_netdev(netdev
);
6037 priv
= netdev_priv(netdev
);
6038 mlx5e_dev
->priv
= priv
;
6040 priv
->profile
= profile
;
6043 err
= profile
->init(mdev
, netdev
);
6045 mlx5_core_err(mdev
, "mlx5e_nic_profile init failed, %d\n", err
);
6046 goto err_destroy_netdev
;
6049 err
= mlx5e_resume(adev
);
6051 mlx5_core_err(mdev
, "mlx5e_resume failed, %d\n", err
);
6052 goto err_profile_cleanup
;
6055 err
= register_netdev(netdev
);
6057 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
6061 mlx5e_dcbnl_init_app(priv
);
6062 mlx5_core_uplink_netdev_set(mdev
, netdev
);
6063 mlx5e_params_print_info(mdev
, &priv
->channels
.params
);
6067 mlx5e_suspend(adev
, state
);
6068 err_profile_cleanup
:
6069 profile
->cleanup(priv
);
6071 mlx5e_destroy_netdev(priv
);
6072 err_devlink_port_unregister
:
6073 mlx5e_devlink_port_unregister(mlx5e_dev
);
6074 err_devlink_unregister
:
6075 mlx5e_destroy_devlink(mlx5e_dev
);
6079 static void mlx5e_remove(struct auxiliary_device
*adev
)
6081 struct mlx5e_dev
*mlx5e_dev
= auxiliary_get_drvdata(adev
);
6082 struct mlx5e_priv
*priv
= mlx5e_dev
->priv
;
6083 pm_message_t state
= {};
6085 mlx5_core_uplink_netdev_set(priv
->mdev
, NULL
);
6086 mlx5e_dcbnl_delete_app(priv
);
6087 unregister_netdev(priv
->netdev
);
6088 mlx5e_suspend(adev
, state
);
6089 priv
->profile
->cleanup(priv
);
6090 mlx5e_destroy_netdev(priv
);
6091 mlx5e_devlink_port_unregister(mlx5e_dev
);
6092 mlx5e_destroy_devlink(mlx5e_dev
);
6095 static const struct auxiliary_device_id mlx5e_id_table
[] = {
6096 { .name
= MLX5_ADEV_NAME
".eth", },
6100 MODULE_DEVICE_TABLE(auxiliary
, mlx5e_id_table
);
6102 static struct auxiliary_driver mlx5e_driver
= {
6104 .probe
= mlx5e_probe
,
6105 .remove
= mlx5e_remove
,
6106 .suspend
= mlx5e_suspend
,
6107 .resume
= mlx5e_resume
,
6108 .id_table
= mlx5e_id_table
,
6111 int mlx5e_init(void)
6115 mlx5e_build_ptys2ethtool_map();
6116 ret
= auxiliary_driver_register(&mlx5e_driver
);
6120 ret
= mlx5e_rep_init();
6122 auxiliary_driver_unregister(&mlx5e_driver
);
6126 void mlx5e_cleanup(void)
6128 mlx5e_rep_cleanup();
6129 auxiliary_driver_unregister(&mlx5e_driver
);