2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <net/geneve.h>
38 #include <linux/bpf.h>
39 #include <linux/if_bridge.h>
40 #include <net/page_pool.h>
41 #include <net/xdp_sock.h>
47 #include "en_accel/ipsec.h"
48 #include "en_accel/ipsec_rxtx.h"
49 #include "en_accel/en_accel.h"
50 #include "en_accel/tls.h"
51 #include "accel/ipsec.h"
52 #include "accel/tls.h"
53 #include "lib/vxlan.h"
54 #include "lib/clock.h"
58 #include "en/monitor_stats.h"
59 #include "en/health.h"
60 #include "en/params.h"
61 #include "en/xsk/umem.h"
62 #include "en/xsk/setup.h"
63 #include "en/xsk/rx.h"
64 #include "en/xsk/tx.h"
65 #include "en/hv_vhca_stats.h"
66 #include "en/devlink.h"
70 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
72 bool striding_rq_umr
= MLX5_CAP_GEN(mdev
, striding_rq
) &&
73 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
74 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
75 u16 max_wqe_sz_cap
= MLX5_CAP_GEN(mdev
, max_wqe_sz_sq
);
76 bool inline_umr
= MLX5E_UMR_WQE_INLINE_SZ
<= max_wqe_sz_cap
;
81 mlx5_core_warn(mdev
, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
82 (int)MLX5E_UMR_WQE_INLINE_SZ
, max_wqe_sz_cap
);
88 void mlx5e_init_rq_type_params(struct mlx5_core_dev
*mdev
,
89 struct mlx5e_params
*params
)
91 params
->log_rq_mtu_frames
= is_kdump_kernel() ?
92 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
:
93 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
95 mlx5_core_info(mdev
, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
96 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
97 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
?
98 BIT(mlx5e_mpwqe_get_log_rq_size(params
, NULL
)) :
99 BIT(params
->log_rq_mtu_frames
),
100 BIT(mlx5e_mpwqe_get_log_stride_size(mdev
, params
, NULL
)),
101 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
104 bool mlx5e_striding_rq_possible(struct mlx5_core_dev
*mdev
,
105 struct mlx5e_params
*params
)
107 if (!mlx5e_check_fragmented_striding_rq_cap(mdev
))
110 if (MLX5_IPSEC_DEV(mdev
))
113 if (params
->xdp_prog
) {
114 /* XSK params are not considered here. If striding RQ is in use,
115 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
116 * be called with the known XSK params.
118 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
))
125 void mlx5e_set_rq_type(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
127 params
->rq_wq_type
= mlx5e_striding_rq_possible(mdev
, params
) &&
128 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
) ?
129 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
133 void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
135 struct mlx5_core_dev
*mdev
= priv
->mdev
;
138 port_state
= mlx5_query_vport_state(mdev
,
139 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT
,
142 if (port_state
== VPORT_STATE_UP
) {
143 netdev_info(priv
->netdev
, "Link up\n");
144 netif_carrier_on(priv
->netdev
);
146 netdev_info(priv
->netdev
, "Link down\n");
147 netif_carrier_off(priv
->netdev
);
151 static void mlx5e_update_carrier_work(struct work_struct
*work
)
153 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
154 update_carrier_work
);
156 mutex_lock(&priv
->state_lock
);
157 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
158 if (priv
->profile
->update_carrier
)
159 priv
->profile
->update_carrier(priv
);
160 mutex_unlock(&priv
->state_lock
);
163 void mlx5e_update_ndo_stats(struct mlx5e_priv
*priv
)
167 for (i
= mlx5e_nic_stats_grps_num(priv
) - 1; i
>= 0; i
--)
168 if (mlx5e_nic_stats_grps
[i
]->update_stats_mask
&
169 MLX5E_NDO_UPDATE_STATS
)
170 mlx5e_nic_stats_grps
[i
]->update_stats(priv
);
173 static void mlx5e_update_stats_work(struct work_struct
*work
)
175 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
178 mutex_lock(&priv
->state_lock
);
179 priv
->profile
->update_stats(priv
);
180 mutex_unlock(&priv
->state_lock
);
183 void mlx5e_queue_update_stats(struct mlx5e_priv
*priv
)
185 if (!priv
->profile
->update_stats
)
188 if (unlikely(test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
)))
191 queue_work(priv
->wq
, &priv
->update_stats_work
);
194 static int async_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
196 struct mlx5e_priv
*priv
= container_of(nb
, struct mlx5e_priv
, events_nb
);
197 struct mlx5_eqe
*eqe
= data
;
199 if (event
!= MLX5_EVENT_TYPE_PORT_CHANGE
)
202 switch (eqe
->sub_type
) {
203 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
204 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
205 queue_work(priv
->wq
, &priv
->update_carrier_work
);
214 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
216 priv
->events_nb
.notifier_call
= async_event
;
217 mlx5_notifier_register(priv
->mdev
, &priv
->events_nb
);
220 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
222 mlx5_notifier_unregister(priv
->mdev
, &priv
->events_nb
);
225 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq
*rq
,
226 struct mlx5e_icosq
*sq
,
227 struct mlx5e_umr_wqe
*wqe
)
229 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
230 struct mlx5_wqe_umr_ctrl_seg
*ucseg
= &wqe
->uctrl
;
231 u8 ds_cnt
= DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ
, MLX5_SEND_WQE_DS
);
233 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
) |
235 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
236 cseg
->imm
= rq
->mkey_be
;
238 ucseg
->flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
| MLX5_UMR_INLINE
;
239 ucseg
->xlt_octowords
=
240 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE
));
241 ucseg
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
244 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq
*rq
,
245 struct mlx5e_channel
*c
)
247 int wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
249 rq
->mpwqe
.info
= kvzalloc_node(array_size(wq_sz
,
250 sizeof(*rq
->mpwqe
.info
)),
251 GFP_KERNEL
, cpu_to_node(c
->cpu
));
255 mlx5e_build_umr_wqe(rq
, &c
->icosq
, &rq
->mpwqe
.umr_wqe
);
260 static int mlx5e_create_umr_mkey(struct mlx5_core_dev
*mdev
,
261 u64 npages
, u8 page_shift
,
262 struct mlx5_core_mkey
*umr_mkey
)
264 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
269 in
= kvzalloc(inlen
, GFP_KERNEL
);
273 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
275 MLX5_SET(mkc
, mkc
, free
, 1);
276 MLX5_SET(mkc
, mkc
, umr_en
, 1);
277 MLX5_SET(mkc
, mkc
, lw
, 1);
278 MLX5_SET(mkc
, mkc
, lr
, 1);
279 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_MTT
);
281 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
282 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.pdn
);
283 MLX5_SET64(mkc
, mkc
, len
, npages
<< page_shift
);
284 MLX5_SET(mkc
, mkc
, translations_octword_size
,
285 MLX5_MTT_OCTW(npages
));
286 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
288 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
294 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev
*mdev
, struct mlx5e_rq
*rq
)
296 u64 num_mtts
= MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
));
298 return mlx5e_create_umr_mkey(mdev
, num_mtts
, PAGE_SHIFT
, &rq
->umr_mkey
);
301 static inline u64
mlx5e_get_mpwqe_offset(struct mlx5e_rq
*rq
, u16 wqe_ix
)
303 return (wqe_ix
<< MLX5E_LOG_ALIGNED_MPWQE_PPW
) << PAGE_SHIFT
;
306 static void mlx5e_init_frags_partition(struct mlx5e_rq
*rq
)
308 struct mlx5e_wqe_frag_info next_frag
= {};
309 struct mlx5e_wqe_frag_info
*prev
= NULL
;
312 next_frag
.di
= &rq
->wqe
.di
[0];
314 for (i
= 0; i
< mlx5_wq_cyc_get_size(&rq
->wqe
.wq
); i
++) {
315 struct mlx5e_rq_frag_info
*frag_info
= &rq
->wqe
.info
.arr
[0];
316 struct mlx5e_wqe_frag_info
*frag
=
317 &rq
->wqe
.frags
[i
<< rq
->wqe
.info
.log_num_frags
];
320 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++, frag
++) {
321 if (next_frag
.offset
+ frag_info
[f
].frag_stride
> PAGE_SIZE
) {
323 next_frag
.offset
= 0;
325 prev
->last_in_page
= true;
330 next_frag
.offset
+= frag_info
[f
].frag_stride
;
336 prev
->last_in_page
= true;
339 static int mlx5e_init_di_list(struct mlx5e_rq
*rq
,
342 int len
= wq_sz
<< rq
->wqe
.info
.log_num_frags
;
344 rq
->wqe
.di
= kvzalloc_node(array_size(len
, sizeof(*rq
->wqe
.di
)),
345 GFP_KERNEL
, cpu_to_node(cpu
));
349 mlx5e_init_frags_partition(rq
);
354 static void mlx5e_free_di_list(struct mlx5e_rq
*rq
)
359 static void mlx5e_rq_err_cqe_work(struct work_struct
*recover_work
)
361 struct mlx5e_rq
*rq
= container_of(recover_work
, struct mlx5e_rq
, recover_work
);
363 mlx5e_reporter_rq_cqe_err(rq
);
366 static int mlx5e_alloc_rq(struct mlx5e_channel
*c
,
367 struct mlx5e_params
*params
,
368 struct mlx5e_xsk_param
*xsk
,
369 struct xdp_umem
*umem
,
370 struct mlx5e_rq_param
*rqp
,
373 struct page_pool_params pp_params
= { 0 };
374 struct mlx5_core_dev
*mdev
= c
->mdev
;
375 void *rqc
= rqp
->rqc
;
376 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
377 u32 num_xsk_frames
= 0;
384 rqp
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
386 rq
->wq_type
= params
->rq_wq_type
;
388 rq
->netdev
= c
->netdev
;
389 rq
->tstamp
= c
->tstamp
;
390 rq
->clock
= &mdev
->clock
;
394 rq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
395 rq
->xdpsq
= &c
->rq_xdpsq
;
399 rq
->stats
= &c
->priv
->channel_stats
[c
->ix
].xskrq
;
401 rq
->stats
= &c
->priv
->channel_stats
[c
->ix
].rq
;
402 INIT_WORK(&rq
->recover_work
, mlx5e_rq_err_cqe_work
);
404 if (params
->xdp_prog
)
405 bpf_prog_inc(params
->xdp_prog
);
406 rq
->xdp_prog
= params
->xdp_prog
;
410 rq_xdp_ix
+= params
->num_channels
* MLX5E_RQ_GROUP_XSK
;
411 err
= xdp_rxq_info_reg(&rq
->xdp_rxq
, rq
->netdev
, rq_xdp_ix
);
413 goto err_rq_wq_destroy
;
415 rq
->buff
.map_dir
= rq
->xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
416 rq
->buff
.headroom
= mlx5e_get_rq_headroom(mdev
, params
, xsk
);
417 rq
->buff
.umem_headroom
= xsk
? xsk
->headroom
: 0;
418 pool_size
= 1 << params
->log_rq_mtu_frames
;
420 switch (rq
->wq_type
) {
421 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
422 err
= mlx5_wq_ll_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->mpwqe
.wq
,
427 rq
->mpwqe
.wq
.db
= &rq
->mpwqe
.wq
.db
[MLX5_RCV_DBR
];
429 wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
432 num_xsk_frames
= wq_sz
<<
433 mlx5e_mpwqe_get_log_num_strides(mdev
, params
, xsk
);
435 pool_size
= MLX5_MPWRQ_PAGES_PER_WQE
<<
436 mlx5e_mpwqe_get_log_rq_size(params
, xsk
);
438 rq
->post_wqes
= mlx5e_post_rx_mpwqes
;
439 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
441 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe_mpwqe
;
442 #ifdef CONFIG_MLX5_EN_IPSEC
443 if (MLX5_IPSEC_DEV(mdev
)) {
445 netdev_err(c
->netdev
, "MPWQE RQ with IPSec offload not supported\n");
446 goto err_rq_wq_destroy
;
449 if (!rq
->handle_rx_cqe
) {
451 netdev_err(c
->netdev
, "RX handler of MPWQE RQ is not set, err %d\n", err
);
452 goto err_rq_wq_destroy
;
455 rq
->mpwqe
.skb_from_cqe_mpwrq
= xsk
?
456 mlx5e_xsk_skb_from_cqe_mpwrq_linear
:
457 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
) ?
458 mlx5e_skb_from_cqe_mpwrq_linear
:
459 mlx5e_skb_from_cqe_mpwrq_nonlinear
;
461 rq
->mpwqe
.log_stride_sz
= mlx5e_mpwqe_get_log_stride_size(mdev
, params
, xsk
);
462 rq
->mpwqe
.num_strides
=
463 BIT(mlx5e_mpwqe_get_log_num_strides(mdev
, params
, xsk
));
465 err
= mlx5e_create_rq_umr_mkey(mdev
, rq
);
467 goto err_rq_wq_destroy
;
468 rq
->mkey_be
= cpu_to_be32(rq
->umr_mkey
.key
);
470 err
= mlx5e_rq_alloc_mpwqe_info(rq
, c
);
474 default: /* MLX5_WQ_TYPE_CYCLIC */
475 err
= mlx5_wq_cyc_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->wqe
.wq
,
480 rq
->wqe
.wq
.db
= &rq
->wqe
.wq
.db
[MLX5_RCV_DBR
];
482 wq_sz
= mlx5_wq_cyc_get_size(&rq
->wqe
.wq
);
485 num_xsk_frames
= wq_sz
<< rq
->wqe
.info
.log_num_frags
;
487 rq
->wqe
.info
= rqp
->frags_info
;
489 kvzalloc_node(array_size(sizeof(*rq
->wqe
.frags
),
490 (wq_sz
<< rq
->wqe
.info
.log_num_frags
)),
491 GFP_KERNEL
, cpu_to_node(c
->cpu
));
492 if (!rq
->wqe
.frags
) {
497 err
= mlx5e_init_di_list(rq
, wq_sz
, c
->cpu
);
501 rq
->post_wqes
= mlx5e_post_rx_wqes
;
502 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
504 #ifdef CONFIG_MLX5_EN_IPSEC
506 rq
->handle_rx_cqe
= mlx5e_ipsec_handle_rx_cqe
;
509 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe
;
510 if (!rq
->handle_rx_cqe
) {
512 netdev_err(c
->netdev
, "RX handler of RQ is not set, err %d\n", err
);
516 rq
->wqe
.skb_from_cqe
= xsk
?
517 mlx5e_xsk_skb_from_cqe_linear
:
518 mlx5e_rx_is_linear_skb(params
, NULL
) ?
519 mlx5e_skb_from_cqe_linear
:
520 mlx5e_skb_from_cqe_nonlinear
;
521 rq
->mkey_be
= c
->mkey_be
;
525 err
= mlx5e_xsk_resize_reuseq(umem
, num_xsk_frames
);
527 mlx5_core_err(mdev
, "Unable to allocate the Reuse Ring for %u frames\n",
532 rq
->zca
.free
= mlx5e_xsk_zca_free
;
533 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
537 /* Create a page_pool and register it with rxq */
539 pp_params
.flags
= 0; /* No-internal DMA mapping in page_pool */
540 pp_params
.pool_size
= pool_size
;
541 pp_params
.nid
= cpu_to_node(c
->cpu
);
542 pp_params
.dev
= c
->pdev
;
543 pp_params
.dma_dir
= rq
->buff
.map_dir
;
545 /* page_pool can be used even when there is no rq->xdp_prog,
546 * given page_pool does not handle DMA mapping there is no
547 * required state to clear. And page_pool gracefully handle
550 rq
->page_pool
= page_pool_create(&pp_params
);
551 if (IS_ERR(rq
->page_pool
)) {
552 err
= PTR_ERR(rq
->page_pool
);
553 rq
->page_pool
= NULL
;
556 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
557 MEM_TYPE_PAGE_POOL
, rq
->page_pool
);
562 for (i
= 0; i
< wq_sz
; i
++) {
563 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
564 struct mlx5e_rx_wqe_ll
*wqe
=
565 mlx5_wq_ll_get_wqe(&rq
->mpwqe
.wq
, i
);
567 rq
->mpwqe
.num_strides
<< rq
->mpwqe
.log_stride_sz
;
568 u64 dma_offset
= mlx5e_get_mpwqe_offset(rq
, i
);
570 wqe
->data
[0].addr
= cpu_to_be64(dma_offset
+ rq
->buff
.headroom
);
571 wqe
->data
[0].byte_count
= cpu_to_be32(byte_count
);
572 wqe
->data
[0].lkey
= rq
->mkey_be
;
574 struct mlx5e_rx_wqe_cyc
*wqe
=
575 mlx5_wq_cyc_get_wqe(&rq
->wqe
.wq
, i
);
578 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++) {
579 u32 frag_size
= rq
->wqe
.info
.arr
[f
].frag_size
|
580 MLX5_HW_START_PADDING
;
582 wqe
->data
[f
].byte_count
= cpu_to_be32(frag_size
);
583 wqe
->data
[f
].lkey
= rq
->mkey_be
;
585 /* check if num_frags is not a pow of two */
586 if (rq
->wqe
.info
.num_frags
< (1 << rq
->wqe
.info
.log_num_frags
)) {
587 wqe
->data
[f
].byte_count
= 0;
588 wqe
->data
[f
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
589 wqe
->data
[f
].addr
= 0;
594 INIT_WORK(&rq
->dim
.work
, mlx5e_rx_dim_work
);
596 switch (params
->rx_cq_moderation
.cq_period_mode
) {
597 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
598 rq
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_CQE
;
600 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE
:
602 rq
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
605 rq
->page_cache
.head
= 0;
606 rq
->page_cache
.tail
= 0;
611 switch (rq
->wq_type
) {
612 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
613 kvfree(rq
->mpwqe
.info
);
614 mlx5_core_destroy_mkey(mdev
, &rq
->umr_mkey
);
616 default: /* MLX5_WQ_TYPE_CYCLIC */
617 kvfree(rq
->wqe
.frags
);
618 mlx5e_free_di_list(rq
);
623 bpf_prog_put(rq
->xdp_prog
);
624 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
625 page_pool_destroy(rq
->page_pool
);
626 mlx5_wq_destroy(&rq
->wq_ctrl
);
631 static void mlx5e_free_rq(struct mlx5e_rq
*rq
)
636 bpf_prog_put(rq
->xdp_prog
);
638 switch (rq
->wq_type
) {
639 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
640 kvfree(rq
->mpwqe
.info
);
641 mlx5_core_destroy_mkey(rq
->mdev
, &rq
->umr_mkey
);
643 default: /* MLX5_WQ_TYPE_CYCLIC */
644 kvfree(rq
->wqe
.frags
);
645 mlx5e_free_di_list(rq
);
648 for (i
= rq
->page_cache
.head
; i
!= rq
->page_cache
.tail
;
649 i
= (i
+ 1) & (MLX5E_CACHE_SIZE
- 1)) {
650 struct mlx5e_dma_info
*dma_info
= &rq
->page_cache
.page_cache
[i
];
652 /* With AF_XDP, page_cache is not used, so this loop is not
653 * entered, and it's safe to call mlx5e_page_release_dynamic
656 mlx5e_page_release_dynamic(rq
, dma_info
, false);
659 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
660 page_pool_destroy(rq
->page_pool
);
661 mlx5_wq_destroy(&rq
->wq_ctrl
);
664 static int mlx5e_create_rq(struct mlx5e_rq
*rq
,
665 struct mlx5e_rq_param
*param
)
667 struct mlx5_core_dev
*mdev
= rq
->mdev
;
675 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
676 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
677 in
= kvzalloc(inlen
, GFP_KERNEL
);
681 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
682 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
684 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
686 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
687 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
688 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
689 MLX5_ADAPTER_PAGE_SHIFT
);
690 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
692 mlx5_fill_page_frag_array(&rq
->wq_ctrl
.buf
,
693 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
695 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
702 int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
, int next_state
)
704 struct mlx5_core_dev
*mdev
= rq
->mdev
;
711 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
712 in
= kvzalloc(inlen
, GFP_KERNEL
);
716 if (curr_state
== MLX5_RQC_STATE_RST
&& next_state
== MLX5_RQC_STATE_RDY
)
717 mlx5e_rqwq_reset(rq
);
719 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
721 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
722 MLX5_SET(rqc
, rqc
, state
, next_state
);
724 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
731 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq
*rq
, bool enable
)
733 struct mlx5e_channel
*c
= rq
->channel
;
734 struct mlx5e_priv
*priv
= c
->priv
;
735 struct mlx5_core_dev
*mdev
= priv
->mdev
;
742 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
743 in
= kvzalloc(inlen
, GFP_KERNEL
);
747 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
749 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
750 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
751 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS
);
752 MLX5_SET(rqc
, rqc
, scatter_fcs
, enable
);
753 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
755 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
762 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
764 struct mlx5e_channel
*c
= rq
->channel
;
765 struct mlx5_core_dev
*mdev
= c
->mdev
;
771 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
772 in
= kvzalloc(inlen
, GFP_KERNEL
);
776 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
778 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
779 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
780 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
781 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
782 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
784 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
791 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
793 mlx5_core_destroy_rq(rq
->mdev
, rq
->rqn
);
796 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
, int wait_time
)
798 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(wait_time
);
799 struct mlx5e_channel
*c
= rq
->channel
;
801 u16 min_wqes
= mlx5_min_rx_wqes(rq
->wq_type
, mlx5e_rqwq_get_size(rq
));
804 if (mlx5e_rqwq_get_cur_sz(rq
) >= min_wqes
)
808 } while (time_before(jiffies
, exp_time
));
810 netdev_warn(c
->netdev
, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
811 c
->ix
, rq
->rqn
, mlx5e_rqwq_get_cur_sz(rq
), min_wqes
);
813 mlx5e_reporter_rx_timeout(rq
);
817 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq
*rq
)
819 struct mlx5_wq_ll
*wq
;
823 if (rq
->wq_type
!= MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
829 /* Outstanding UMR WQEs (in progress) start at wq->head */
830 for (i
= 0; i
< rq
->mpwqe
.umr_in_progress
; i
++) {
831 rq
->dealloc_wqe(rq
, head
);
832 head
= mlx5_wq_ll_get_wqe_next_ix(wq
, head
);
835 rq
->mpwqe
.actual_wq_head
= wq
->head
;
836 rq
->mpwqe
.umr_in_progress
= 0;
837 rq
->mpwqe
.umr_completed
= 0;
840 void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
845 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
846 struct mlx5_wq_ll
*wq
= &rq
->mpwqe
.wq
;
848 mlx5e_free_rx_in_progress_descs(rq
);
850 while (!mlx5_wq_ll_is_empty(wq
)) {
851 struct mlx5e_rx_wqe_ll
*wqe
;
853 wqe_ix_be
= *wq
->tail_next
;
854 wqe_ix
= be16_to_cpu(wqe_ix_be
);
855 wqe
= mlx5_wq_ll_get_wqe(wq
, wqe_ix
);
856 rq
->dealloc_wqe(rq
, wqe_ix
);
857 mlx5_wq_ll_pop(wq
, wqe_ix_be
,
858 &wqe
->next
.next_wqe_index
);
861 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
863 while (!mlx5_wq_cyc_is_empty(wq
)) {
864 wqe_ix
= mlx5_wq_cyc_get_tail(wq
);
865 rq
->dealloc_wqe(rq
, wqe_ix
);
872 int mlx5e_open_rq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
873 struct mlx5e_rq_param
*param
, struct mlx5e_xsk_param
*xsk
,
874 struct xdp_umem
*umem
, struct mlx5e_rq
*rq
)
878 err
= mlx5e_alloc_rq(c
, params
, xsk
, umem
, param
, rq
);
882 err
= mlx5e_create_rq(rq
, param
);
886 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
890 if (MLX5_CAP_ETH(c
->mdev
, cqe_checksum_full
))
891 __set_bit(MLX5E_RQ_STATE_CSUM_FULL
, &c
->rq
.state
);
893 if (params
->rx_dim_enabled
)
894 __set_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
);
896 /* We disable csum_complete when XDP is enabled since
897 * XDP programs might manipulate packets which will render
898 * skb->checksum incorrect.
900 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
) || c
->xdp
)
901 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE
, &c
->rq
.state
);
906 mlx5e_destroy_rq(rq
);
913 void mlx5e_activate_rq(struct mlx5e_rq
*rq
)
915 set_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
916 mlx5e_trigger_irq(&rq
->channel
->icosq
);
919 void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
)
921 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
922 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
925 void mlx5e_close_rq(struct mlx5e_rq
*rq
)
927 cancel_work_sync(&rq
->dim
.work
);
928 cancel_work_sync(&rq
->channel
->icosq
.recover_work
);
929 cancel_work_sync(&rq
->recover_work
);
930 mlx5e_destroy_rq(rq
);
931 mlx5e_free_rx_descs(rq
);
935 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq
*sq
)
937 kvfree(sq
->db
.xdpi_fifo
.xi
);
938 kvfree(sq
->db
.wqe_info
);
941 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq
*sq
, int numa
)
943 struct mlx5e_xdp_info_fifo
*xdpi_fifo
= &sq
->db
.xdpi_fifo
;
944 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
945 int dsegs_per_wq
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
947 xdpi_fifo
->xi
= kvzalloc_node(sizeof(*xdpi_fifo
->xi
) * dsegs_per_wq
,
952 xdpi_fifo
->pc
= &sq
->xdpi_fifo_pc
;
953 xdpi_fifo
->cc
= &sq
->xdpi_fifo_cc
;
954 xdpi_fifo
->mask
= dsegs_per_wq
- 1;
959 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq
*sq
, int numa
)
961 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
964 sq
->db
.wqe_info
= kvzalloc_node(sizeof(*sq
->db
.wqe_info
) * wq_sz
,
966 if (!sq
->db
.wqe_info
)
969 err
= mlx5e_alloc_xdpsq_fifo(sq
, numa
);
971 mlx5e_free_xdpsq_db(sq
);
978 static int mlx5e_alloc_xdpsq(struct mlx5e_channel
*c
,
979 struct mlx5e_params
*params
,
980 struct xdp_umem
*umem
,
981 struct mlx5e_sq_param
*param
,
982 struct mlx5e_xdpsq
*sq
,
985 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
986 struct mlx5_core_dev
*mdev
= c
->mdev
;
987 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
991 sq
->mkey_be
= c
->mkey_be
;
993 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
994 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
995 sq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
998 sq
->stats
= sq
->umem
?
999 &c
->priv
->channel_stats
[c
->ix
].xsksq
:
1001 &c
->priv
->channel_stats
[c
->ix
].xdpsq
:
1002 &c
->priv
->channel_stats
[c
->ix
].rq_xdpsq
;
1004 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1005 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1008 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1010 err
= mlx5e_alloc_xdpsq_db(sq
, cpu_to_node(c
->cpu
));
1012 goto err_sq_wq_destroy
;
1017 mlx5_wq_destroy(&sq
->wq_ctrl
);
1022 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq
*sq
)
1024 mlx5e_free_xdpsq_db(sq
);
1025 mlx5_wq_destroy(&sq
->wq_ctrl
);
1028 static void mlx5e_free_icosq_db(struct mlx5e_icosq
*sq
)
1030 kvfree(sq
->db
.ico_wqe
);
1033 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq
*sq
, int numa
)
1035 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1037 sq
->db
.ico_wqe
= kvzalloc_node(array_size(wq_sz
,
1038 sizeof(*sq
->db
.ico_wqe
)),
1040 if (!sq
->db
.ico_wqe
)
1046 static void mlx5e_icosq_err_cqe_work(struct work_struct
*recover_work
)
1048 struct mlx5e_icosq
*sq
= container_of(recover_work
, struct mlx5e_icosq
,
1051 mlx5e_reporter_icosq_cqe_err(sq
);
1054 static int mlx5e_alloc_icosq(struct mlx5e_channel
*c
,
1055 struct mlx5e_sq_param
*param
,
1056 struct mlx5e_icosq
*sq
)
1058 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1059 struct mlx5_core_dev
*mdev
= c
->mdev
;
1060 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1064 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1066 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1067 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1070 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1072 err
= mlx5e_alloc_icosq_db(sq
, cpu_to_node(c
->cpu
));
1074 goto err_sq_wq_destroy
;
1076 INIT_WORK(&sq
->recover_work
, mlx5e_icosq_err_cqe_work
);
1081 mlx5_wq_destroy(&sq
->wq_ctrl
);
1086 static void mlx5e_free_icosq(struct mlx5e_icosq
*sq
)
1088 mlx5e_free_icosq_db(sq
);
1089 mlx5_wq_destroy(&sq
->wq_ctrl
);
1092 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
)
1094 kvfree(sq
->db
.wqe_info
);
1095 kvfree(sq
->db
.dma_fifo
);
1098 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
)
1100 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1101 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1103 sq
->db
.dma_fifo
= kvzalloc_node(array_size(df_sz
,
1104 sizeof(*sq
->db
.dma_fifo
)),
1106 sq
->db
.wqe_info
= kvzalloc_node(array_size(wq_sz
,
1107 sizeof(*sq
->db
.wqe_info
)),
1109 if (!sq
->db
.dma_fifo
|| !sq
->db
.wqe_info
) {
1110 mlx5e_free_txqsq_db(sq
);
1114 sq
->dma_fifo_mask
= df_sz
- 1;
1119 static void mlx5e_tx_err_cqe_work(struct work_struct
*recover_work
);
1120 static int mlx5e_alloc_txqsq(struct mlx5e_channel
*c
,
1122 struct mlx5e_params
*params
,
1123 struct mlx5e_sq_param
*param
,
1124 struct mlx5e_txqsq
*sq
,
1127 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1128 struct mlx5_core_dev
*mdev
= c
->mdev
;
1129 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1133 sq
->tstamp
= c
->tstamp
;
1134 sq
->clock
= &mdev
->clock
;
1135 sq
->mkey_be
= c
->mkey_be
;
1138 sq
->txq_ix
= txq_ix
;
1139 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1140 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1141 sq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
1142 sq
->stats
= &c
->priv
->channel_stats
[c
->ix
].sq
[tc
];
1143 sq
->stop_room
= MLX5E_SQ_STOP_ROOM
;
1144 INIT_WORK(&sq
->recover_work
, mlx5e_tx_err_cqe_work
);
1145 if (!MLX5_CAP_ETH(mdev
, wqe_vlan_insert
))
1146 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE
, &sq
->state
);
1147 if (MLX5_IPSEC_DEV(c
->priv
->mdev
))
1148 set_bit(MLX5E_SQ_STATE_IPSEC
, &sq
->state
);
1149 #ifdef CONFIG_MLX5_EN_TLS
1150 if (mlx5_accel_is_tls_device(c
->priv
->mdev
)) {
1151 set_bit(MLX5E_SQ_STATE_TLS
, &sq
->state
);
1152 sq
->stop_room
+= MLX5E_SQ_TLS_ROOM
+
1153 mlx5e_ktls_dumps_num_wqebbs(sq
, MAX_SKB_FRAGS
,
1154 TLS_MAX_PAYLOAD_SIZE
);
1158 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1159 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1162 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1164 err
= mlx5e_alloc_txqsq_db(sq
, cpu_to_node(c
->cpu
));
1166 goto err_sq_wq_destroy
;
1168 INIT_WORK(&sq
->dim
.work
, mlx5e_tx_dim_work
);
1169 sq
->dim
.mode
= params
->tx_cq_moderation
.cq_period_mode
;
1174 mlx5_wq_destroy(&sq
->wq_ctrl
);
1179 static void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
)
1181 mlx5e_free_txqsq_db(sq
);
1182 mlx5_wq_destroy(&sq
->wq_ctrl
);
1185 struct mlx5e_create_sq_param
{
1186 struct mlx5_wq_ctrl
*wq_ctrl
;
1193 static int mlx5e_create_sq(struct mlx5_core_dev
*mdev
,
1194 struct mlx5e_sq_param
*param
,
1195 struct mlx5e_create_sq_param
*csp
,
1204 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
1205 sizeof(u64
) * csp
->wq_ctrl
->buf
.npages
;
1206 in
= kvzalloc(inlen
, GFP_KERNEL
);
1210 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1211 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1213 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
1214 MLX5_SET(sqc
, sqc
, tis_lst_sz
, csp
->tis_lst_sz
);
1215 MLX5_SET(sqc
, sqc
, tis_num_0
, csp
->tisn
);
1216 MLX5_SET(sqc
, sqc
, cqn
, csp
->cqn
);
1218 if (MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
1219 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, csp
->min_inline_mode
);
1221 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1222 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1224 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1225 MLX5_SET(wq
, wq
, uar_page
, mdev
->mlx5e_res
.bfreg
.index
);
1226 MLX5_SET(wq
, wq
, log_wq_pg_sz
, csp
->wq_ctrl
->buf
.page_shift
-
1227 MLX5_ADAPTER_PAGE_SHIFT
);
1228 MLX5_SET64(wq
, wq
, dbr_addr
, csp
->wq_ctrl
->db
.dma
);
1230 mlx5_fill_page_frag_array(&csp
->wq_ctrl
->buf
,
1231 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1233 err
= mlx5_core_create_sq(mdev
, in
, inlen
, sqn
);
1240 int mlx5e_modify_sq(struct mlx5_core_dev
*mdev
, u32 sqn
,
1241 struct mlx5e_modify_sq_param
*p
)
1248 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
1249 in
= kvzalloc(inlen
, GFP_KERNEL
);
1253 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
1255 MLX5_SET(modify_sq_in
, in
, sq_state
, p
->curr_state
);
1256 MLX5_SET(sqc
, sqc
, state
, p
->next_state
);
1257 if (p
->rl_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1258 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
1259 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, p
->rl_index
);
1262 err
= mlx5_core_modify_sq(mdev
, sqn
, in
, inlen
);
1269 static void mlx5e_destroy_sq(struct mlx5_core_dev
*mdev
, u32 sqn
)
1271 mlx5_core_destroy_sq(mdev
, sqn
);
1274 static int mlx5e_create_sq_rdy(struct mlx5_core_dev
*mdev
,
1275 struct mlx5e_sq_param
*param
,
1276 struct mlx5e_create_sq_param
*csp
,
1279 struct mlx5e_modify_sq_param msp
= {0};
1282 err
= mlx5e_create_sq(mdev
, param
, csp
, sqn
);
1286 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1287 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1288 err
= mlx5e_modify_sq(mdev
, *sqn
, &msp
);
1290 mlx5e_destroy_sq(mdev
, *sqn
);
1295 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1296 struct mlx5e_txqsq
*sq
, u32 rate
);
1298 static int mlx5e_open_txqsq(struct mlx5e_channel
*c
,
1301 struct mlx5e_params
*params
,
1302 struct mlx5e_sq_param
*param
,
1303 struct mlx5e_txqsq
*sq
,
1306 struct mlx5e_create_sq_param csp
= {};
1310 err
= mlx5e_alloc_txqsq(c
, txq_ix
, params
, param
, sq
, tc
);
1316 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1317 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1318 csp
.min_inline_mode
= sq
->min_inline_mode
;
1319 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1321 goto err_free_txqsq
;
1323 tx_rate
= c
->priv
->tx_rates
[sq
->txq_ix
];
1325 mlx5e_set_sq_maxrate(c
->netdev
, sq
, tx_rate
);
1327 if (params
->tx_dim_enabled
)
1328 sq
->state
|= BIT(MLX5E_SQ_STATE_AM
);
1333 mlx5e_free_txqsq(sq
);
1338 void mlx5e_activate_txqsq(struct mlx5e_txqsq
*sq
)
1340 sq
->txq
= netdev_get_tx_queue(sq
->channel
->netdev
, sq
->txq_ix
);
1341 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1342 netdev_tx_reset_queue(sq
->txq
);
1343 netif_tx_start_queue(sq
->txq
);
1346 void mlx5e_tx_disable_queue(struct netdev_queue
*txq
)
1348 __netif_tx_lock_bh(txq
);
1349 netif_tx_stop_queue(txq
);
1350 __netif_tx_unlock_bh(txq
);
1353 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq
*sq
)
1355 struct mlx5e_channel
*c
= sq
->channel
;
1356 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1358 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1359 /* prevent netif_tx_wake_queue */
1360 napi_synchronize(&c
->napi
);
1362 mlx5e_tx_disable_queue(sq
->txq
);
1364 /* last doorbell out, godspeed .. */
1365 if (mlx5e_wqc_has_room_for(wq
, sq
->cc
, sq
->pc
, 1)) {
1366 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
1367 struct mlx5e_tx_wqe_info
*wi
;
1368 struct mlx5e_tx_wqe
*nop
;
1370 wi
= &sq
->db
.wqe_info
[pi
];
1372 memset(wi
, 0, sizeof(*wi
));
1374 nop
= mlx5e_post_nop(wq
, sq
->sqn
, &sq
->pc
);
1375 mlx5e_notify_hw(wq
, sq
->pc
, sq
->uar_map
, &nop
->ctrl
);
1379 static void mlx5e_close_txqsq(struct mlx5e_txqsq
*sq
)
1381 struct mlx5e_channel
*c
= sq
->channel
;
1382 struct mlx5_core_dev
*mdev
= c
->mdev
;
1383 struct mlx5_rate_limit rl
= {0};
1385 cancel_work_sync(&sq
->dim
.work
);
1386 cancel_work_sync(&sq
->recover_work
);
1387 mlx5e_destroy_sq(mdev
, sq
->sqn
);
1388 if (sq
->rate_limit
) {
1389 rl
.rate
= sq
->rate_limit
;
1390 mlx5_rl_remove_rate(mdev
, &rl
);
1392 mlx5e_free_txqsq_descs(sq
);
1393 mlx5e_free_txqsq(sq
);
1396 static void mlx5e_tx_err_cqe_work(struct work_struct
*recover_work
)
1398 struct mlx5e_txqsq
*sq
= container_of(recover_work
, struct mlx5e_txqsq
,
1401 mlx5e_reporter_tx_err_cqe(sq
);
1404 int mlx5e_open_icosq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
1405 struct mlx5e_sq_param
*param
, struct mlx5e_icosq
*sq
)
1407 struct mlx5e_create_sq_param csp
= {};
1410 err
= mlx5e_alloc_icosq(c
, param
, sq
);
1414 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1415 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1416 csp
.min_inline_mode
= params
->tx_min_inline_mode
;
1417 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1419 goto err_free_icosq
;
1424 mlx5e_free_icosq(sq
);
1429 void mlx5e_activate_icosq(struct mlx5e_icosq
*icosq
)
1431 set_bit(MLX5E_SQ_STATE_ENABLED
, &icosq
->state
);
1434 void mlx5e_deactivate_icosq(struct mlx5e_icosq
*icosq
)
1436 struct mlx5e_channel
*c
= icosq
->channel
;
1438 clear_bit(MLX5E_SQ_STATE_ENABLED
, &icosq
->state
);
1439 napi_synchronize(&c
->napi
);
1442 void mlx5e_close_icosq(struct mlx5e_icosq
*sq
)
1444 struct mlx5e_channel
*c
= sq
->channel
;
1446 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1447 mlx5e_free_icosq(sq
);
1450 int mlx5e_open_xdpsq(struct mlx5e_channel
*c
, struct mlx5e_params
*params
,
1451 struct mlx5e_sq_param
*param
, struct xdp_umem
*umem
,
1452 struct mlx5e_xdpsq
*sq
, bool is_redirect
)
1454 struct mlx5e_create_sq_param csp
= {};
1457 err
= mlx5e_alloc_xdpsq(c
, params
, umem
, param
, sq
, is_redirect
);
1462 csp
.tisn
= c
->priv
->tisn
[c
->lag_port
][0]; /* tc = 0 */
1463 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1464 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1465 csp
.min_inline_mode
= sq
->min_inline_mode
;
1466 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1467 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1469 goto err_free_xdpsq
;
1471 mlx5e_set_xmit_fp(sq
, param
->is_mpw
);
1473 if (!param
->is_mpw
) {
1474 unsigned int ds_cnt
= MLX5E_XDP_TX_DS_COUNT
;
1475 unsigned int inline_hdr_sz
= 0;
1478 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
1479 inline_hdr_sz
= MLX5E_XDP_MIN_INLINE
;
1483 /* Pre initialize fixed WQE fields */
1484 for (i
= 0; i
< mlx5_wq_cyc_get_size(&sq
->wq
); i
++) {
1485 struct mlx5e_xdp_wqe_info
*wi
= &sq
->db
.wqe_info
[i
];
1486 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, i
);
1487 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
1488 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
1489 struct mlx5_wqe_data_seg
*dseg
;
1491 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
1492 eseg
->inline_hdr
.sz
= cpu_to_be16(inline_hdr_sz
);
1494 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ (ds_cnt
- 1);
1495 dseg
->lkey
= sq
->mkey_be
;
1505 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1506 mlx5e_free_xdpsq(sq
);
1511 void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
)
1513 struct mlx5e_channel
*c
= sq
->channel
;
1515 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1516 napi_synchronize(&c
->napi
);
1518 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1519 mlx5e_free_xdpsq_descs(sq
);
1520 mlx5e_free_xdpsq(sq
);
1523 static int mlx5e_alloc_cq_common(struct mlx5_core_dev
*mdev
,
1524 struct mlx5e_cq_param
*param
,
1525 struct mlx5e_cq
*cq
)
1527 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1533 err
= mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1537 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1543 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1544 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1545 *mcq
->set_ci_db
= 0;
1547 mcq
->vector
= param
->eq_ix
;
1548 mcq
->comp
= mlx5e_completion_event
;
1549 mcq
->event
= mlx5e_cq_error_event
;
1552 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
1553 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
1563 static int mlx5e_alloc_cq(struct mlx5e_channel
*c
,
1564 struct mlx5e_cq_param
*param
,
1565 struct mlx5e_cq
*cq
)
1567 struct mlx5_core_dev
*mdev
= c
->priv
->mdev
;
1570 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
1571 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1572 param
->eq_ix
= c
->ix
;
1574 err
= mlx5e_alloc_cq_common(mdev
, param
, cq
);
1576 cq
->napi
= &c
->napi
;
1582 static void mlx5e_free_cq(struct mlx5e_cq
*cq
)
1584 mlx5_wq_destroy(&cq
->wq_ctrl
);
1587 static int mlx5e_create_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
1589 u32 out
[MLX5_ST_SZ_DW(create_cq_out
)];
1590 struct mlx5_core_dev
*mdev
= cq
->mdev
;
1591 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1596 unsigned int irqn_not_used
;
1600 err
= mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
1604 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
1605 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
1606 in
= kvzalloc(inlen
, GFP_KERNEL
);
1610 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
1612 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
1614 mlx5_fill_page_frag_array(&cq
->wq_ctrl
.buf
,
1615 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
1617 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
1618 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
1619 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
1620 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
1621 MLX5_ADAPTER_PAGE_SHIFT
);
1622 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
1624 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
, out
, sizeof(out
));
1636 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
1638 mlx5_core_destroy_cq(cq
->mdev
, &cq
->mcq
);
1641 int mlx5e_open_cq(struct mlx5e_channel
*c
, struct dim_cq_moder moder
,
1642 struct mlx5e_cq_param
*param
, struct mlx5e_cq
*cq
)
1644 struct mlx5_core_dev
*mdev
= c
->mdev
;
1647 err
= mlx5e_alloc_cq(c
, param
, cq
);
1651 err
= mlx5e_create_cq(cq
, param
);
1655 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
1656 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
, moder
.usec
, moder
.pkts
);
1665 void mlx5e_close_cq(struct mlx5e_cq
*cq
)
1667 mlx5e_destroy_cq(cq
);
1671 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
1672 struct mlx5e_params
*params
,
1673 struct mlx5e_channel_param
*cparam
)
1678 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1679 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1680 &cparam
->tx_cq
, &c
->sq
[tc
].cq
);
1682 goto err_close_tx_cqs
;
1688 for (tc
--; tc
>= 0; tc
--)
1689 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1694 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1698 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1699 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1702 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1703 struct mlx5e_params
*params
,
1704 struct mlx5e_channel_param
*cparam
)
1708 for (tc
= 0; tc
< params
->num_tc
; tc
++) {
1709 int txq_ix
= c
->ix
+ tc
* params
->num_channels
;
1711 err
= mlx5e_open_txqsq(c
, c
->priv
->tisn
[c
->lag_port
][tc
], txq_ix
,
1712 params
, &cparam
->sq
, &c
->sq
[tc
], tc
);
1720 for (tc
--; tc
>= 0; tc
--)
1721 mlx5e_close_txqsq(&c
->sq
[tc
]);
1726 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1730 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1731 mlx5e_close_txqsq(&c
->sq
[tc
]);
1734 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1735 struct mlx5e_txqsq
*sq
, u32 rate
)
1737 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1738 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1739 struct mlx5e_modify_sq_param msp
= {0};
1740 struct mlx5_rate_limit rl
= {0};
1744 if (rate
== sq
->rate_limit
)
1748 if (sq
->rate_limit
) {
1749 rl
.rate
= sq
->rate_limit
;
1750 /* remove current rl index to free space to next ones */
1751 mlx5_rl_remove_rate(mdev
, &rl
);
1758 err
= mlx5_rl_add_rate(mdev
, &rl_index
, &rl
);
1760 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1766 msp
.curr_state
= MLX5_SQC_STATE_RDY
;
1767 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1768 msp
.rl_index
= rl_index
;
1769 msp
.rl_update
= true;
1770 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
1772 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1774 /* remove the rate from the table */
1776 mlx5_rl_remove_rate(mdev
, &rl
);
1780 sq
->rate_limit
= rate
;
1784 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1786 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1787 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1788 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[index
];
1791 if (!mlx5_rl_is_supported(mdev
)) {
1792 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1796 /* rate is given in Mb/sec, HW config is in Kb/sec */
1799 /* Check whether rate in valid range, 0 is always valid */
1800 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1801 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1805 mutex_lock(&priv
->state_lock
);
1806 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1807 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1809 priv
->tx_rates
[index
] = rate
;
1810 mutex_unlock(&priv
->state_lock
);
1815 static int mlx5e_open_queues(struct mlx5e_channel
*c
,
1816 struct mlx5e_params
*params
,
1817 struct mlx5e_channel_param
*cparam
)
1819 struct dim_cq_moder icocq_moder
= {0, 0};
1822 err
= mlx5e_open_cq(c
, icocq_moder
, &cparam
->icosq_cq
, &c
->icosq
.cq
);
1826 err
= mlx5e_open_tx_cqs(c
, params
, cparam
);
1828 goto err_close_icosq_cq
;
1830 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
, &cparam
->tx_cq
, &c
->xdpsq
.cq
);
1832 goto err_close_tx_cqs
;
1834 err
= mlx5e_open_cq(c
, params
->rx_cq_moderation
, &cparam
->rx_cq
, &c
->rq
.cq
);
1836 goto err_close_xdp_tx_cqs
;
1838 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1839 err
= c
->xdp
? mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1840 &cparam
->tx_cq
, &c
->rq_xdpsq
.cq
) : 0;
1842 goto err_close_rx_cq
;
1844 napi_enable(&c
->napi
);
1846 err
= mlx5e_open_icosq(c
, params
, &cparam
->icosq
, &c
->icosq
);
1848 goto err_disable_napi
;
1850 err
= mlx5e_open_sqs(c
, params
, cparam
);
1852 goto err_close_icosq
;
1855 err
= mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, NULL
,
1856 &c
->rq_xdpsq
, false);
1861 err
= mlx5e_open_rq(c
, params
, &cparam
->rq
, NULL
, NULL
, &c
->rq
);
1863 goto err_close_xdp_sq
;
1865 err
= mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, NULL
, &c
->xdpsq
, true);
1872 mlx5e_close_rq(&c
->rq
);
1876 mlx5e_close_xdpsq(&c
->rq_xdpsq
);
1882 mlx5e_close_icosq(&c
->icosq
);
1885 napi_disable(&c
->napi
);
1888 mlx5e_close_cq(&c
->rq_xdpsq
.cq
);
1891 mlx5e_close_cq(&c
->rq
.cq
);
1893 err_close_xdp_tx_cqs
:
1894 mlx5e_close_cq(&c
->xdpsq
.cq
);
1897 mlx5e_close_tx_cqs(c
);
1900 mlx5e_close_cq(&c
->icosq
.cq
);
1905 static void mlx5e_close_queues(struct mlx5e_channel
*c
)
1907 mlx5e_close_xdpsq(&c
->xdpsq
);
1908 mlx5e_close_rq(&c
->rq
);
1910 mlx5e_close_xdpsq(&c
->rq_xdpsq
);
1912 mlx5e_close_icosq(&c
->icosq
);
1913 napi_disable(&c
->napi
);
1915 mlx5e_close_cq(&c
->rq_xdpsq
.cq
);
1916 mlx5e_close_cq(&c
->rq
.cq
);
1917 mlx5e_close_cq(&c
->xdpsq
.cq
);
1918 mlx5e_close_tx_cqs(c
);
1919 mlx5e_close_cq(&c
->icosq
.cq
);
1922 static u8
mlx5e_enumerate_lag_port(struct mlx5_core_dev
*mdev
, int ix
)
1924 u16 port_aff_bias
= mlx5_core_is_pf(mdev
) ? 0 : MLX5_CAP_GEN(mdev
, vhca_id
);
1926 return (ix
+ port_aff_bias
) % mlx5e_get_num_lag_ports(mdev
);
1929 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1930 struct mlx5e_params
*params
,
1931 struct mlx5e_channel_param
*cparam
,
1932 struct xdp_umem
*umem
,
1933 struct mlx5e_channel
**cp
)
1935 int cpu
= cpumask_first(mlx5_comp_irq_get_affinity_mask(priv
->mdev
, ix
));
1936 struct net_device
*netdev
= priv
->netdev
;
1937 struct mlx5e_xsk_param xsk
;
1938 struct mlx5e_channel
*c
;
1943 err
= mlx5_vector2eqn(priv
->mdev
, ix
, &eqn
, &irq
);
1947 c
= kvzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1952 c
->mdev
= priv
->mdev
;
1953 c
->tstamp
= &priv
->tstamp
;
1956 c
->pdev
= priv
->mdev
->device
;
1957 c
->netdev
= priv
->netdev
;
1958 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1959 c
->num_tc
= params
->num_tc
;
1960 c
->xdp
= !!params
->xdp_prog
;
1961 c
->stats
= &priv
->channel_stats
[ix
].ch
;
1962 c
->irq_desc
= irq_to_desc(irq
);
1963 c
->lag_port
= mlx5e_enumerate_lag_port(priv
->mdev
, ix
);
1965 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1967 err
= mlx5e_open_queues(c
, params
, cparam
);
1972 mlx5e_build_xsk_param(umem
, &xsk
);
1973 err
= mlx5e_open_xsk(priv
, params
, &xsk
, umem
, c
);
1975 goto err_close_queues
;
1983 mlx5e_close_queues(c
);
1986 netif_napi_del(&c
->napi
);
1993 static void mlx5e_activate_channel(struct mlx5e_channel
*c
)
1997 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1998 mlx5e_activate_txqsq(&c
->sq
[tc
]);
1999 mlx5e_activate_icosq(&c
->icosq
);
2000 mlx5e_activate_rq(&c
->rq
);
2002 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
))
2003 mlx5e_activate_xsk(c
);
2006 static void mlx5e_deactivate_channel(struct mlx5e_channel
*c
)
2010 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
))
2011 mlx5e_deactivate_xsk(c
);
2013 mlx5e_deactivate_rq(&c
->rq
);
2014 mlx5e_deactivate_icosq(&c
->icosq
);
2015 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2016 mlx5e_deactivate_txqsq(&c
->sq
[tc
]);
2019 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
2021 if (test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
))
2023 mlx5e_close_queues(c
);
2024 netif_napi_del(&c
->napi
);
2029 #define DEFAULT_FRAG_SIZE (2048)
2031 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev
*mdev
,
2032 struct mlx5e_params
*params
,
2033 struct mlx5e_xsk_param
*xsk
,
2034 struct mlx5e_rq_frags_info
*info
)
2036 u32 byte_count
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
2037 int frag_size_max
= DEFAULT_FRAG_SIZE
;
2041 #ifdef CONFIG_MLX5_EN_IPSEC
2042 if (MLX5_IPSEC_DEV(mdev
))
2043 byte_count
+= MLX5E_METADATA_ETHER_LEN
;
2046 if (mlx5e_rx_is_linear_skb(params
, xsk
)) {
2049 frag_stride
= mlx5e_rx_get_linear_frag_sz(params
, xsk
);
2050 frag_stride
= roundup_pow_of_two(frag_stride
);
2052 info
->arr
[0].frag_size
= byte_count
;
2053 info
->arr
[0].frag_stride
= frag_stride
;
2054 info
->num_frags
= 1;
2055 info
->wqe_bulk
= PAGE_SIZE
/ frag_stride
;
2059 if (byte_count
> PAGE_SIZE
+
2060 (MLX5E_MAX_RX_FRAGS
- 1) * frag_size_max
)
2061 frag_size_max
= PAGE_SIZE
;
2064 while (buf_size
< byte_count
) {
2065 int frag_size
= byte_count
- buf_size
;
2067 if (i
< MLX5E_MAX_RX_FRAGS
- 1)
2068 frag_size
= min(frag_size
, frag_size_max
);
2070 info
->arr
[i
].frag_size
= frag_size
;
2071 info
->arr
[i
].frag_stride
= roundup_pow_of_two(frag_size
);
2073 buf_size
+= frag_size
;
2076 info
->num_frags
= i
;
2077 /* number of different wqes sharing a page */
2078 info
->wqe_bulk
= 1 + (info
->num_frags
% 2);
2081 info
->wqe_bulk
= max_t(u8
, info
->wqe_bulk
, 8);
2082 info
->log_num_frags
= order_base_2(info
->num_frags
);
2085 static inline u8
mlx5e_get_rqwq_log_stride(u8 wq_type
, int ndsegs
)
2087 int sz
= sizeof(struct mlx5_wqe_data_seg
) * ndsegs
;
2090 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2091 sz
+= sizeof(struct mlx5e_rx_wqe_ll
);
2093 default: /* MLX5_WQ_TYPE_CYCLIC */
2094 sz
+= sizeof(struct mlx5e_rx_wqe_cyc
);
2097 return order_base_2(sz
);
2100 static u8
mlx5e_get_rq_log_wq_sz(void *rqc
)
2102 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2104 return MLX5_GET(wq
, wq
, log_wq_sz
);
2107 void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
2108 struct mlx5e_params
*params
,
2109 struct mlx5e_xsk_param
*xsk
,
2110 struct mlx5e_rq_param
*param
)
2112 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2113 void *rqc
= param
->rqc
;
2114 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2117 switch (params
->rq_wq_type
) {
2118 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2119 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
2120 mlx5e_mpwqe_get_log_num_strides(mdev
, params
, xsk
) -
2121 MLX5_MPWQE_LOG_NUM_STRIDES_BASE
);
2122 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
2123 mlx5e_mpwqe_get_log_stride_size(mdev
, params
, xsk
) -
2124 MLX5_MPWQE_LOG_STRIDE_SZ_BASE
);
2125 MLX5_SET(wq
, wq
, log_wq_sz
, mlx5e_mpwqe_get_log_rq_size(params
, xsk
));
2127 default: /* MLX5_WQ_TYPE_CYCLIC */
2128 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_rq_mtu_frames
);
2129 mlx5e_build_rq_frags_info(mdev
, params
, xsk
, ¶m
->frags_info
);
2130 ndsegs
= param
->frags_info
.num_frags
;
2133 MLX5_SET(wq
, wq
, wq_type
, params
->rq_wq_type
);
2134 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
2135 MLX5_SET(wq
, wq
, log_wq_stride
,
2136 mlx5e_get_rqwq_log_stride(params
->rq_wq_type
, ndsegs
));
2137 MLX5_SET(wq
, wq
, pd
, mdev
->mlx5e_res
.pdn
);
2138 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
2139 MLX5_SET(rqc
, rqc
, vsd
, params
->vlan_strip_disable
);
2140 MLX5_SET(rqc
, rqc
, scatter_fcs
, params
->scatter_fcs_en
);
2142 param
->wq
.buf_numa_node
= dev_to_node(mdev
->device
);
2145 static void mlx5e_build_drop_rq_param(struct mlx5e_priv
*priv
,
2146 struct mlx5e_rq_param
*param
)
2148 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2149 void *rqc
= param
->rqc
;
2150 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2152 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
2153 MLX5_SET(wq
, wq
, log_wq_stride
,
2154 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC
, 1));
2155 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->drop_rq_q_counter
);
2157 param
->wq
.buf_numa_node
= dev_to_node(mdev
->device
);
2160 void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
2161 struct mlx5e_sq_param
*param
)
2163 void *sqc
= param
->sqc
;
2164 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2166 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
2167 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
2169 param
->wq
.buf_numa_node
= dev_to_node(priv
->mdev
->device
);
2172 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
2173 struct mlx5e_params
*params
,
2174 struct mlx5e_sq_param
*param
)
2176 void *sqc
= param
->sqc
;
2177 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2180 allow_swp
= mlx5_geneve_tx_allowed(priv
->mdev
) ||
2181 !!MLX5_IPSEC_DEV(priv
->mdev
);
2182 mlx5e_build_sq_param_common(priv
, param
);
2183 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
2184 MLX5_SET(sqc
, sqc
, allow_swp
, allow_swp
);
2187 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
2188 struct mlx5e_cq_param
*param
)
2190 void *cqc
= param
->cqc
;
2192 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->priv
.uar
->index
);
2193 if (MLX5_CAP_GEN(priv
->mdev
, cqe_128_always
) && cache_line_size() >= 128)
2194 MLX5_SET(cqc
, cqc
, cqe_sz
, CQE_STRIDE_128_PAD
);
2197 void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
2198 struct mlx5e_params
*params
,
2199 struct mlx5e_xsk_param
*xsk
,
2200 struct mlx5e_cq_param
*param
)
2202 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2203 void *cqc
= param
->cqc
;
2206 switch (params
->rq_wq_type
) {
2207 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2208 log_cq_size
= mlx5e_mpwqe_get_log_rq_size(params
, xsk
) +
2209 mlx5e_mpwqe_get_log_num_strides(mdev
, params
, xsk
);
2211 default: /* MLX5_WQ_TYPE_CYCLIC */
2212 log_cq_size
= params
->log_rq_mtu_frames
;
2215 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
2216 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
2217 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
2218 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
2221 mlx5e_build_common_cq_param(priv
, param
);
2222 param
->cq_period_mode
= params
->rx_cq_moderation
.cq_period_mode
;
2225 void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
2226 struct mlx5e_params
*params
,
2227 struct mlx5e_cq_param
*param
)
2229 void *cqc
= param
->cqc
;
2231 MLX5_SET(cqc
, cqc
, log_cq_size
, params
->log_sq_size
);
2233 mlx5e_build_common_cq_param(priv
, param
);
2234 param
->cq_period_mode
= params
->tx_cq_moderation
.cq_period_mode
;
2237 void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
2239 struct mlx5e_cq_param
*param
)
2241 void *cqc
= param
->cqc
;
2243 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
2245 mlx5e_build_common_cq_param(priv
, param
);
2247 param
->cq_period_mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
2250 void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
2252 struct mlx5e_sq_param
*param
)
2254 void *sqc
= param
->sqc
;
2255 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2257 mlx5e_build_sq_param_common(priv
, param
);
2259 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
2260 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
2263 void mlx5e_build_xdpsq_param(struct mlx5e_priv
*priv
,
2264 struct mlx5e_params
*params
,
2265 struct mlx5e_sq_param
*param
)
2267 void *sqc
= param
->sqc
;
2268 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2270 mlx5e_build_sq_param_common(priv
, param
);
2271 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
2272 param
->is_mpw
= MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_XDP_TX_MPWQE
);
2275 static u8
mlx5e_build_icosq_log_wq_sz(struct mlx5e_params
*params
,
2276 struct mlx5e_rq_param
*rqp
)
2278 switch (params
->rq_wq_type
) {
2279 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2280 return order_base_2(MLX5E_UMR_WQEBBS
) +
2281 mlx5e_get_rq_log_wq_sz(rqp
->rqc
);
2282 default: /* MLX5_WQ_TYPE_CYCLIC */
2283 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
2287 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
2288 struct mlx5e_params
*params
,
2289 struct mlx5e_channel_param
*cparam
)
2293 mlx5e_build_rq_param(priv
, params
, NULL
, &cparam
->rq
);
2295 icosq_log_wq_sz
= mlx5e_build_icosq_log_wq_sz(params
, &cparam
->rq
);
2297 mlx5e_build_sq_param(priv
, params
, &cparam
->sq
);
2298 mlx5e_build_xdpsq_param(priv
, params
, &cparam
->xdp_sq
);
2299 mlx5e_build_icosq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq
);
2300 mlx5e_build_rx_cq_param(priv
, params
, NULL
, &cparam
->rx_cq
);
2301 mlx5e_build_tx_cq_param(priv
, params
, &cparam
->tx_cq
);
2302 mlx5e_build_ico_cq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq_cq
);
2305 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
2306 struct mlx5e_channels
*chs
)
2308 struct mlx5e_channel_param
*cparam
;
2312 chs
->num
= chs
->params
.num_channels
;
2314 chs
->c
= kcalloc(chs
->num
, sizeof(struct mlx5e_channel
*), GFP_KERNEL
);
2315 cparam
= kvzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
2316 if (!chs
->c
|| !cparam
)
2319 mlx5e_build_channel_param(priv
, &chs
->params
, cparam
);
2320 for (i
= 0; i
< chs
->num
; i
++) {
2321 struct xdp_umem
*umem
= NULL
;
2323 if (chs
->params
.xdp_prog
)
2324 umem
= mlx5e_xsk_get_umem(&chs
->params
, chs
->params
.xsk
, i
);
2326 err
= mlx5e_open_channel(priv
, i
, &chs
->params
, cparam
, umem
, &chs
->c
[i
]);
2328 goto err_close_channels
;
2331 mlx5e_health_channels_update(priv
);
2336 for (i
--; i
>= 0; i
--)
2337 mlx5e_close_channel(chs
->c
[i
]);
2346 static void mlx5e_activate_channels(struct mlx5e_channels
*chs
)
2350 for (i
= 0; i
< chs
->num
; i
++)
2351 mlx5e_activate_channel(chs
->c
[i
]);
2354 #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
2356 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels
*chs
)
2361 for (i
= 0; i
< chs
->num
; i
++) {
2362 int timeout
= err
? 0 : MLX5E_RQ_WQES_TIMEOUT
;
2364 err
|= mlx5e_wait_for_min_rx_wqes(&chs
->c
[i
]->rq
, timeout
);
2366 /* Don't wait on the XSK RQ, because the newer xdpsock sample
2367 * doesn't provide any Fill Ring entries at the setup stage.
2371 return err
? -ETIMEDOUT
: 0;
2374 static void mlx5e_deactivate_channels(struct mlx5e_channels
*chs
)
2378 for (i
= 0; i
< chs
->num
; i
++)
2379 mlx5e_deactivate_channel(chs
->c
[i
]);
2382 void mlx5e_close_channels(struct mlx5e_channels
*chs
)
2386 for (i
= 0; i
< chs
->num
; i
++)
2387 mlx5e_close_channel(chs
->c
[i
]);
2394 mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
, struct mlx5e_rqt
*rqt
)
2396 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2403 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
2404 in
= kvzalloc(inlen
, GFP_KERNEL
);
2408 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
2410 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2411 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
2413 for (i
= 0; i
< sz
; i
++)
2414 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], priv
->drop_rq
.rqn
);
2416 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &rqt
->rqtn
);
2418 rqt
->enabled
= true;
2424 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
)
2426 rqt
->enabled
= false;
2427 mlx5_core_destroy_rqt(priv
->mdev
, rqt
->rqtn
);
2430 int mlx5e_create_indirect_rqt(struct mlx5e_priv
*priv
)
2432 struct mlx5e_rqt
*rqt
= &priv
->indir_rqt
;
2435 err
= mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, rqt
);
2437 mlx5_core_warn(priv
->mdev
, "create indirect rqts failed, %d\n", err
);
2441 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
)
2446 for (ix
= 0; ix
< priv
->max_nch
; ix
++) {
2447 err
= mlx5e_create_rqt(priv
, 1 /*size */, &tirs
[ix
].rqt
);
2449 goto err_destroy_rqts
;
2455 mlx5_core_warn(priv
->mdev
, "create rqts failed, %d\n", err
);
2456 for (ix
--; ix
>= 0; ix
--)
2457 mlx5e_destroy_rqt(priv
, &tirs
[ix
].rqt
);
2462 void mlx5e_destroy_direct_rqts(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
)
2466 for (i
= 0; i
< priv
->max_nch
; i
++)
2467 mlx5e_destroy_rqt(priv
, &tirs
[i
].rqt
);
2470 static int mlx5e_rx_hash_fn(int hfunc
)
2472 return (hfunc
== ETH_RSS_HASH_TOP
) ?
2473 MLX5_RX_HASH_FN_TOEPLITZ
:
2474 MLX5_RX_HASH_FN_INVERTED_XOR8
;
2477 int mlx5e_bits_invert(unsigned long a
, int size
)
2482 for (i
= 0; i
< size
; i
++)
2483 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
2488 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, int sz
,
2489 struct mlx5e_redirect_rqt_param rrp
, void *rqtc
)
2493 for (i
= 0; i
< sz
; i
++) {
2499 if (rrp
.rss
.hfunc
== ETH_RSS_HASH_XOR
)
2500 ix
= mlx5e_bits_invert(i
, ilog2(sz
));
2502 ix
= priv
->rss_params
.indirection_rqt
[ix
];
2503 rqn
= rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2507 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
2511 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
,
2512 struct mlx5e_redirect_rqt_param rrp
)
2514 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2520 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
2521 in
= kvzalloc(inlen
, GFP_KERNEL
);
2525 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
2527 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2528 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
2529 mlx5e_fill_rqt_rqns(priv
, sz
, rrp
, rqtc
);
2530 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
2536 static u32
mlx5e_get_direct_rqn(struct mlx5e_priv
*priv
, int ix
,
2537 struct mlx5e_redirect_rqt_param rrp
)
2542 if (ix
>= rrp
.rss
.channels
->num
)
2543 return priv
->drop_rq
.rqn
;
2545 return rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2548 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
,
2549 struct mlx5e_redirect_rqt_param rrp
)
2554 if (priv
->indir_rqt
.enabled
) {
2556 rqtn
= priv
->indir_rqt
.rqtn
;
2557 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, rrp
);
2560 for (ix
= 0; ix
< priv
->max_nch
; ix
++) {
2561 struct mlx5e_redirect_rqt_param direct_rrp
= {
2564 .rqn
= mlx5e_get_direct_rqn(priv
, ix
, rrp
)
2568 /* Direct RQ Tables */
2569 if (!priv
->direct_tir
[ix
].rqt
.enabled
)
2572 rqtn
= priv
->direct_tir
[ix
].rqt
.rqtn
;
2573 mlx5e_redirect_rqt(priv
, rqtn
, 1, direct_rrp
);
2577 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv
*priv
,
2578 struct mlx5e_channels
*chs
)
2580 struct mlx5e_redirect_rqt_param rrp
= {
2585 .hfunc
= priv
->rss_params
.hfunc
,
2590 mlx5e_redirect_rqts(priv
, rrp
);
2593 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv
*priv
)
2595 struct mlx5e_redirect_rqt_param drop_rrp
= {
2598 .rqn
= priv
->drop_rq
.rqn
,
2602 mlx5e_redirect_rqts(priv
, drop_rrp
);
2605 static const struct mlx5e_tirc_config tirc_default_config
[MLX5E_NUM_INDIR_TIRS
] = {
2606 [MLX5E_TT_IPV4_TCP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2607 .l4_prot_type
= MLX5_L4_PROT_TYPE_TCP
,
2608 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2610 [MLX5E_TT_IPV6_TCP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2611 .l4_prot_type
= MLX5_L4_PROT_TYPE_TCP
,
2612 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2614 [MLX5E_TT_IPV4_UDP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2615 .l4_prot_type
= MLX5_L4_PROT_TYPE_UDP
,
2616 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2618 [MLX5E_TT_IPV6_UDP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2619 .l4_prot_type
= MLX5_L4_PROT_TYPE_UDP
,
2620 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2622 [MLX5E_TT_IPV4_IPSEC_AH
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2624 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2626 [MLX5E_TT_IPV6_IPSEC_AH
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2628 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2630 [MLX5E_TT_IPV4_IPSEC_ESP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2632 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2634 [MLX5E_TT_IPV6_IPSEC_ESP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2636 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2638 [MLX5E_TT_IPV4
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2640 .rx_hash_fields
= MLX5_HASH_IP
,
2642 [MLX5E_TT_IPV6
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2644 .rx_hash_fields
= MLX5_HASH_IP
,
2648 struct mlx5e_tirc_config
mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt
)
2650 return tirc_default_config
[tt
];
2653 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params
*params
, void *tirc
)
2655 if (!params
->lro_en
)
2658 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2660 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
2661 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
2662 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
2663 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
2664 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
- ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
2665 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
, params
->lro_timeout
);
2668 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params
*rss_params
,
2669 const struct mlx5e_tirc_config
*ttconfig
,
2670 void *tirc
, bool inner
)
2672 void *hfso
= inner
? MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
) :
2673 MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
2675 MLX5_SET(tirc
, tirc
, rx_hash_fn
, mlx5e_rx_hash_fn(rss_params
->hfunc
));
2676 if (rss_params
->hfunc
== ETH_RSS_HASH_TOP
) {
2677 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
2678 rx_hash_toeplitz_key
);
2679 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
2680 rx_hash_toeplitz_key
);
2682 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
2683 memcpy(rss_key
, rss_params
->toeplitz_hash_key
, len
);
2685 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2686 ttconfig
->l3_prot_type
);
2687 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2688 ttconfig
->l4_prot_type
);
2689 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2690 ttconfig
->rx_hash_fields
);
2693 static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config
*ttconfig
,
2694 enum mlx5e_traffic_types tt
,
2697 *ttconfig
= tirc_default_config
[tt
];
2698 ttconfig
->rx_hash_fields
= rx_hash_fields
;
2701 void mlx5e_modify_tirs_hash(struct mlx5e_priv
*priv
, void *in
, int inlen
)
2703 void *tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2704 struct mlx5e_rss_params
*rss
= &priv
->rss_params
;
2705 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2706 int ctxlen
= MLX5_ST_SZ_BYTES(tirc
);
2707 struct mlx5e_tirc_config ttconfig
;
2710 MLX5_SET(modify_tir_in
, in
, bitmask
.hash
, 1);
2712 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2713 memset(tirc
, 0, ctxlen
);
2714 mlx5e_update_rx_hash_fields(&ttconfig
, tt
,
2715 rss
->rx_hash_fields
[tt
]);
2716 mlx5e_build_indir_tir_ctx_hash(rss
, &ttconfig
, tirc
, false);
2717 mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
, inlen
);
2720 /* Verify inner tirs resources allocated */
2721 if (!priv
->inner_indir_tir
[0].tirn
)
2724 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2725 memset(tirc
, 0, ctxlen
);
2726 mlx5e_update_rx_hash_fields(&ttconfig
, tt
,
2727 rss
->rx_hash_fields
[tt
]);
2728 mlx5e_build_indir_tir_ctx_hash(rss
, &ttconfig
, tirc
, true);
2729 mlx5_core_modify_tir(mdev
, priv
->inner_indir_tir
[tt
].tirn
, in
,
2734 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
2736 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2745 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
2746 in
= kvzalloc(inlen
, GFP_KERNEL
);
2750 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
2751 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2753 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2755 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2756 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
2762 for (ix
= 0; ix
< priv
->max_nch
; ix
++) {
2763 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
2775 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro
);
2777 static int mlx5e_set_mtu(struct mlx5_core_dev
*mdev
,
2778 struct mlx5e_params
*params
, u16 mtu
)
2780 u16 hw_mtu
= MLX5E_SW2HW_MTU(params
, mtu
);
2783 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
2787 /* Update vport context MTU */
2788 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
2792 static void mlx5e_query_mtu(struct mlx5_core_dev
*mdev
,
2793 struct mlx5e_params
*params
, u16
*mtu
)
2798 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
2799 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
2800 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
2802 *mtu
= MLX5E_HW2SW_MTU(params
, hw_mtu
);
2805 int mlx5e_set_dev_port_mtu(struct mlx5e_priv
*priv
)
2807 struct mlx5e_params
*params
= &priv
->channels
.params
;
2808 struct net_device
*netdev
= priv
->netdev
;
2809 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2813 err
= mlx5e_set_mtu(mdev
, params
, params
->sw_mtu
);
2817 mlx5e_query_mtu(mdev
, params
, &mtu
);
2818 if (mtu
!= params
->sw_mtu
)
2819 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
2820 __func__
, mtu
, params
->sw_mtu
);
2822 params
->sw_mtu
= mtu
;
2826 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu
);
2828 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv
*priv
)
2830 struct mlx5e_params
*params
= &priv
->channels
.params
;
2831 struct net_device
*netdev
= priv
->netdev
;
2832 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2835 /* MTU range: 68 - hw-specific max */
2836 netdev
->min_mtu
= ETH_MIN_MTU
;
2838 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
2839 netdev
->max_mtu
= min_t(unsigned int, MLX5E_HW2SW_MTU(params
, max_mtu
),
2843 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
2845 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2846 int nch
= priv
->channels
.params
.num_channels
;
2847 int ntc
= priv
->channels
.params
.num_tc
;
2850 netdev_reset_tc(netdev
);
2855 netdev_set_num_tc(netdev
, ntc
);
2857 /* Map netdev TCs to offset 0
2858 * We have our own UP to TXQ mapping for QoS
2860 for (tc
= 0; tc
< ntc
; tc
++)
2861 netdev_set_tc_queue(netdev
, tc
, nch
, 0);
2864 static void mlx5e_update_netdev_queues(struct mlx5e_priv
*priv
, u16 count
)
2866 int num_txqs
= count
* priv
->channels
.params
.num_tc
;
2867 int num_rxqs
= count
* priv
->profile
->rq_groups
;
2868 struct net_device
*netdev
= priv
->netdev
;
2870 mlx5e_netdev_set_tcs(netdev
);
2871 netif_set_real_num_tx_queues(netdev
, num_txqs
);
2872 netif_set_real_num_rx_queues(netdev
, num_rxqs
);
2875 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv
*priv
,
2876 struct mlx5e_params
*params
)
2878 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2879 int num_comp_vectors
, ix
, irq
;
2881 num_comp_vectors
= mlx5_comp_vectors_count(mdev
);
2883 for (ix
= 0; ix
< params
->num_channels
; ix
++) {
2884 cpumask_clear(priv
->scratchpad
.cpumask
);
2886 for (irq
= ix
; irq
< num_comp_vectors
; irq
+= params
->num_channels
) {
2887 int cpu
= cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev
, irq
));
2889 cpumask_set_cpu(cpu
, priv
->scratchpad
.cpumask
);
2892 netif_set_xps_queue(priv
->netdev
, priv
->scratchpad
.cpumask
, ix
);
2896 int mlx5e_num_channels_changed(struct mlx5e_priv
*priv
)
2898 u16 count
= priv
->channels
.params
.num_channels
;
2900 mlx5e_update_netdev_queues(priv
, count
);
2901 mlx5e_set_default_xps_cpumasks(priv
, &priv
->channels
.params
);
2903 if (!netif_is_rxfh_configured(priv
->netdev
))
2904 mlx5e_build_default_indir_rqt(priv
->rss_params
.indirection_rqt
,
2905 MLX5E_INDIR_RQT_SIZE
, count
);
2910 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed
);
2912 static void mlx5e_build_txq_maps(struct mlx5e_priv
*priv
)
2916 ch
= priv
->channels
.num
;
2918 for (i
= 0; i
< ch
; i
++) {
2921 for (tc
= 0; tc
< priv
->channels
.params
.num_tc
; tc
++) {
2922 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
2923 struct mlx5e_txqsq
*sq
= &c
->sq
[tc
];
2925 priv
->txq2sq
[sq
->txq_ix
] = sq
;
2926 priv
->channel_tc2realtxq
[i
][tc
] = i
+ tc
* ch
;
2931 void mlx5e_activate_priv_channels(struct mlx5e_priv
*priv
)
2933 mlx5e_build_txq_maps(priv
);
2934 mlx5e_activate_channels(&priv
->channels
);
2935 mlx5e_xdp_tx_enable(priv
);
2936 netif_tx_start_all_queues(priv
->netdev
);
2938 if (mlx5e_is_vport_rep(priv
))
2939 mlx5e_add_sqs_fwd_rules(priv
);
2941 mlx5e_wait_channels_min_rx_wqes(&priv
->channels
);
2942 mlx5e_redirect_rqts_to_channels(priv
, &priv
->channels
);
2944 mlx5e_xsk_redirect_rqts_to_channels(priv
, &priv
->channels
);
2947 void mlx5e_deactivate_priv_channels(struct mlx5e_priv
*priv
)
2949 mlx5e_xsk_redirect_rqts_to_drop(priv
, &priv
->channels
);
2951 mlx5e_redirect_rqts_to_drop(priv
);
2953 if (mlx5e_is_vport_rep(priv
))
2954 mlx5e_remove_sqs_fwd_rules(priv
);
2956 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2957 * polling for inactive tx queues.
2959 netif_tx_stop_all_queues(priv
->netdev
);
2960 netif_tx_disable(priv
->netdev
);
2961 mlx5e_xdp_tx_disable(priv
);
2962 mlx5e_deactivate_channels(&priv
->channels
);
2965 static int mlx5e_switch_priv_channels(struct mlx5e_priv
*priv
,
2966 struct mlx5e_channels
*new_chs
,
2967 mlx5e_fp_preactivate preactivate
,
2970 struct net_device
*netdev
= priv
->netdev
;
2971 struct mlx5e_channels old_chs
;
2975 carrier_ok
= netif_carrier_ok(netdev
);
2976 netif_carrier_off(netdev
);
2978 mlx5e_deactivate_priv_channels(priv
);
2980 old_chs
= priv
->channels
;
2981 priv
->channels
= *new_chs
;
2983 /* New channels are ready to roll, call the preactivate hook if needed
2984 * to modify HW settings or update kernel parameters.
2987 err
= preactivate(priv
, context
);
2989 priv
->channels
= old_chs
;
2994 mlx5e_close_channels(&old_chs
);
2995 priv
->profile
->update_rx(priv
);
2998 mlx5e_activate_priv_channels(priv
);
3000 /* return carrier back if needed */
3002 netif_carrier_on(netdev
);
3007 int mlx5e_safe_switch_channels(struct mlx5e_priv
*priv
,
3008 struct mlx5e_channels
*new_chs
,
3009 mlx5e_fp_preactivate preactivate
,
3014 err
= mlx5e_open_channels(priv
, new_chs
);
3018 err
= mlx5e_switch_priv_channels(priv
, new_chs
, preactivate
, context
);
3025 mlx5e_close_channels(new_chs
);
3030 int mlx5e_safe_reopen_channels(struct mlx5e_priv
*priv
)
3032 struct mlx5e_channels new_channels
= {};
3034 new_channels
.params
= priv
->channels
.params
;
3035 return mlx5e_safe_switch_channels(priv
, &new_channels
, NULL
, NULL
);
3038 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
)
3040 priv
->tstamp
.tx_type
= HWTSTAMP_TX_OFF
;
3041 priv
->tstamp
.rx_filter
= HWTSTAMP_FILTER_NONE
;
3044 int mlx5e_open_locked(struct net_device
*netdev
)
3046 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3049 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3051 err
= mlx5e_open_channels(priv
, &priv
->channels
);
3053 goto err_clear_state_opened_flag
;
3055 priv
->profile
->update_rx(priv
);
3056 mlx5e_activate_priv_channels(priv
);
3057 if (priv
->profile
->update_carrier
)
3058 priv
->profile
->update_carrier(priv
);
3060 mlx5e_queue_update_stats(priv
);
3063 err_clear_state_opened_flag
:
3064 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3068 int mlx5e_open(struct net_device
*netdev
)
3070 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3073 mutex_lock(&priv
->state_lock
);
3074 err
= mlx5e_open_locked(netdev
);
3076 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_UP
);
3077 mutex_unlock(&priv
->state_lock
);
3079 if (mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
3080 udp_tunnel_get_rx_info(netdev
);
3085 int mlx5e_close_locked(struct net_device
*netdev
)
3087 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3089 /* May already be CLOSED in case a previous configuration operation
3090 * (e.g RX/TX queue size change) that involves close&open failed.
3092 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3095 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3097 netif_carrier_off(priv
->netdev
);
3098 mlx5e_deactivate_priv_channels(priv
);
3099 mlx5e_close_channels(&priv
->channels
);
3104 int mlx5e_close(struct net_device
*netdev
)
3106 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3109 if (!netif_device_present(netdev
))
3112 mutex_lock(&priv
->state_lock
);
3113 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_DOWN
);
3114 err
= mlx5e_close_locked(netdev
);
3115 mutex_unlock(&priv
->state_lock
);
3120 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev
*mdev
,
3121 struct mlx5e_rq
*rq
,
3122 struct mlx5e_rq_param
*param
)
3124 void *rqc
= param
->rqc
;
3125 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
3128 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
3130 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wqe
.wq
,
3135 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3136 xdp_rxq_info_unused(&rq
->xdp_rxq
);
3143 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev
*mdev
,
3144 struct mlx5e_cq
*cq
,
3145 struct mlx5e_cq_param
*param
)
3147 param
->wq
.buf_numa_node
= dev_to_node(mdev
->device
);
3148 param
->wq
.db_numa_node
= dev_to_node(mdev
->device
);
3150 return mlx5e_alloc_cq_common(mdev
, param
, cq
);
3153 int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
,
3154 struct mlx5e_rq
*drop_rq
)
3156 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3157 struct mlx5e_cq_param cq_param
= {};
3158 struct mlx5e_rq_param rq_param
= {};
3159 struct mlx5e_cq
*cq
= &drop_rq
->cq
;
3162 mlx5e_build_drop_rq_param(priv
, &rq_param
);
3164 err
= mlx5e_alloc_drop_cq(mdev
, cq
, &cq_param
);
3168 err
= mlx5e_create_cq(cq
, &cq_param
);
3172 err
= mlx5e_alloc_drop_rq(mdev
, drop_rq
, &rq_param
);
3174 goto err_destroy_cq
;
3176 err
= mlx5e_create_rq(drop_rq
, &rq_param
);
3180 err
= mlx5e_modify_rq_state(drop_rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
3182 mlx5_core_warn(priv
->mdev
, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err
);
3187 mlx5e_free_rq(drop_rq
);
3190 mlx5e_destroy_cq(cq
);
3198 void mlx5e_close_drop_rq(struct mlx5e_rq
*drop_rq
)
3200 mlx5e_destroy_rq(drop_rq
);
3201 mlx5e_free_rq(drop_rq
);
3202 mlx5e_destroy_cq(&drop_rq
->cq
);
3203 mlx5e_free_cq(&drop_rq
->cq
);
3206 int mlx5e_create_tis(struct mlx5_core_dev
*mdev
, void *in
, u32
*tisn
)
3208 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
3210 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
3212 if (MLX5_GET(tisc
, tisc
, tls_en
))
3213 MLX5_SET(tisc
, tisc
, pd
, mdev
->mlx5e_res
.pdn
);
3215 if (mlx5_lag_is_lacp_owner(mdev
))
3216 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
3218 return mlx5_core_create_tis(mdev
, in
, MLX5_ST_SZ_BYTES(create_tis_in
), tisn
);
3221 void mlx5e_destroy_tis(struct mlx5_core_dev
*mdev
, u32 tisn
)
3223 mlx5_core_destroy_tis(mdev
, tisn
);
3226 void mlx5e_destroy_tises(struct mlx5e_priv
*priv
)
3230 for (i
= 0; i
< mlx5e_get_num_lag_ports(priv
->mdev
); i
++)
3231 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
3232 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[i
][tc
]);
3235 static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev
*mdev
)
3237 return MLX5_CAP_GEN(mdev
, lag_tx_port_affinity
) && mlx5e_get_num_lag_ports(mdev
) > 1;
3240 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
3245 for (i
= 0; i
< mlx5e_get_num_lag_ports(priv
->mdev
); i
++) {
3246 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
3247 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {};
3250 tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
3252 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
3254 if (mlx5e_lag_should_assign_affinity(priv
->mdev
))
3255 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, i
+ 1);
3257 err
= mlx5e_create_tis(priv
->mdev
, in
, &priv
->tisn
[i
][tc
]);
3259 goto err_close_tises
;
3266 for (; i
>= 0; i
--) {
3267 for (tc
--; tc
>= 0; tc
--)
3268 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[i
][tc
]);
3269 tc
= priv
->profile
->max_tc
;
3275 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
3277 mlx5e_destroy_tises(priv
);
3280 static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv
*priv
,
3281 u32 rqtn
, u32
*tirc
)
3283 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
3284 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
3285 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
3286 MLX5_SET(tirc
, tirc
, tunneled_offload_en
,
3287 priv
->channels
.params
.tunneled_offload_en
);
3289 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
3292 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
,
3293 enum mlx5e_traffic_types tt
,
3296 mlx5e_build_indir_tir_ctx_common(priv
, priv
->indir_rqt
.rqtn
, tirc
);
3297 mlx5e_build_indir_tir_ctx_hash(&priv
->rss_params
,
3298 &tirc_default_config
[tt
], tirc
, false);
3301 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32 rqtn
, u32
*tirc
)
3303 mlx5e_build_indir_tir_ctx_common(priv
, rqtn
, tirc
);
3304 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
3307 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv
*priv
,
3308 enum mlx5e_traffic_types tt
,
3311 mlx5e_build_indir_tir_ctx_common(priv
, priv
->indir_rqt
.rqtn
, tirc
);
3312 mlx5e_build_indir_tir_ctx_hash(&priv
->rss_params
,
3313 &tirc_default_config
[tt
], tirc
, true);
3316 int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
, bool inner_ttc
)
3318 struct mlx5e_tir
*tir
;
3326 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
3327 in
= kvzalloc(inlen
, GFP_KERNEL
);
3331 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
3332 memset(in
, 0, inlen
);
3333 tir
= &priv
->indir_tir
[tt
];
3334 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3335 mlx5e_build_indir_tir_ctx(priv
, tt
, tirc
);
3336 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3338 mlx5_core_warn(priv
->mdev
, "create indirect tirs failed, %d\n", err
);
3339 goto err_destroy_inner_tirs
;
3343 if (!inner_ttc
|| !mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
3346 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++) {
3347 memset(in
, 0, inlen
);
3348 tir
= &priv
->inner_indir_tir
[i
];
3349 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3350 mlx5e_build_inner_indir_tir_ctx(priv
, i
, tirc
);
3351 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3353 mlx5_core_warn(priv
->mdev
, "create inner indirect tirs failed, %d\n", err
);
3354 goto err_destroy_inner_tirs
;
3363 err_destroy_inner_tirs
:
3364 for (i
--; i
>= 0; i
--)
3365 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
3367 for (tt
--; tt
>= 0; tt
--)
3368 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
3375 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
)
3377 struct mlx5e_tir
*tir
;
3384 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
3385 in
= kvzalloc(inlen
, GFP_KERNEL
);
3389 for (ix
= 0; ix
< priv
->max_nch
; ix
++) {
3390 memset(in
, 0, inlen
);
3392 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3393 mlx5e_build_direct_tir_ctx(priv
, tir
->rqt
.rqtn
, tirc
);
3394 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3396 goto err_destroy_ch_tirs
;
3401 err_destroy_ch_tirs
:
3402 mlx5_core_warn(priv
->mdev
, "create tirs failed, %d\n", err
);
3403 for (ix
--; ix
>= 0; ix
--)
3404 mlx5e_destroy_tir(priv
->mdev
, &tirs
[ix
]);
3412 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
)
3416 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3417 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
3419 /* Verify inner tirs resources allocated */
3420 if (!priv
->inner_indir_tir
[0].tirn
)
3423 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3424 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
3427 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
, struct mlx5e_tir
*tirs
)
3431 for (i
= 0; i
< priv
->max_nch
; i
++)
3432 mlx5e_destroy_tir(priv
->mdev
, &tirs
[i
]);
3435 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels
*chs
, bool enable
)
3440 for (i
= 0; i
< chs
->num
; i
++) {
3441 err
= mlx5e_modify_rq_scatter_fcs(&chs
->c
[i
]->rq
, enable
);
3449 static int mlx5e_modify_channels_vsd(struct mlx5e_channels
*chs
, bool vsd
)
3454 for (i
= 0; i
< chs
->num
; i
++) {
3455 err
= mlx5e_modify_rq_vsd(&chs
->c
[i
]->rq
, vsd
);
3463 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv
*priv
,
3464 struct tc_mqprio_qopt
*mqprio
)
3466 struct mlx5e_channels new_channels
= {};
3467 u8 tc
= mqprio
->num_tc
;
3470 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
3472 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
3475 mutex_lock(&priv
->state_lock
);
3477 new_channels
.params
= priv
->channels
.params
;
3478 new_channels
.params
.num_tc
= tc
? tc
: 1;
3480 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
3481 priv
->channels
.params
= new_channels
.params
;
3485 err
= mlx5e_safe_switch_channels(priv
, &new_channels
,
3486 mlx5e_num_channels_changed_ctx
, NULL
);
3490 priv
->max_opened_tc
= max_t(u8
, priv
->max_opened_tc
,
3491 new_channels
.params
.num_tc
);
3493 mutex_unlock(&priv
->state_lock
);
3497 #ifdef CONFIG_MLX5_ESWITCH
3498 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv
*priv
,
3499 struct flow_cls_offload
*cls_flower
,
3500 unsigned long flags
)
3502 switch (cls_flower
->command
) {
3503 case FLOW_CLS_REPLACE
:
3504 return mlx5e_configure_flower(priv
->netdev
, priv
, cls_flower
,
3506 case FLOW_CLS_DESTROY
:
3507 return mlx5e_delete_flower(priv
->netdev
, priv
, cls_flower
,
3509 case FLOW_CLS_STATS
:
3510 return mlx5e_stats_flower(priv
->netdev
, priv
, cls_flower
,
3517 static int mlx5e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
3520 unsigned long flags
= MLX5_TC_FLAG(INGRESS
) | MLX5_TC_FLAG(NIC_OFFLOAD
);
3521 struct mlx5e_priv
*priv
= cb_priv
;
3524 case TC_SETUP_CLSFLOWER
:
3525 return mlx5e_setup_tc_cls_flower(priv
, type_data
, flags
);
3532 static LIST_HEAD(mlx5e_block_cb_list
);
3534 static int mlx5e_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
3537 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3540 #ifdef CONFIG_MLX5_ESWITCH
3541 case TC_SETUP_BLOCK
: {
3542 struct flow_block_offload
*f
= type_data
;
3544 f
->unlocked_driver_cb
= true;
3545 return flow_block_cb_setup_simple(type_data
,
3546 &mlx5e_block_cb_list
,
3547 mlx5e_setup_tc_block_cb
,
3551 case TC_SETUP_QDISC_MQPRIO
:
3552 return mlx5e_setup_tc_mqprio(priv
, type_data
);
3558 void mlx5e_fold_sw_stats64(struct mlx5e_priv
*priv
, struct rtnl_link_stats64
*s
)
3562 for (i
= 0; i
< priv
->max_nch
; i
++) {
3563 struct mlx5e_channel_stats
*channel_stats
= &priv
->channel_stats
[i
];
3564 struct mlx5e_rq_stats
*xskrq_stats
= &channel_stats
->xskrq
;
3565 struct mlx5e_rq_stats
*rq_stats
= &channel_stats
->rq
;
3568 s
->rx_packets
+= rq_stats
->packets
+ xskrq_stats
->packets
;
3569 s
->rx_bytes
+= rq_stats
->bytes
+ xskrq_stats
->bytes
;
3571 for (j
= 0; j
< priv
->max_opened_tc
; j
++) {
3572 struct mlx5e_sq_stats
*sq_stats
= &channel_stats
->sq
[j
];
3574 s
->tx_packets
+= sq_stats
->packets
;
3575 s
->tx_bytes
+= sq_stats
->bytes
;
3576 s
->tx_dropped
+= sq_stats
->dropped
;
3582 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3584 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3585 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
3586 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
3588 /* In switchdev mode, monitor counters doesn't monitor
3589 * rx/tx stats of 802_3. The update stats mechanism
3590 * should keep the 802_3 layout counters updated
3592 if (!mlx5e_monitor_counter_supported(priv
) ||
3593 mlx5e_is_uplink_rep(priv
)) {
3594 /* update HW stats in background for next time */
3595 mlx5e_queue_update_stats(priv
);
3598 if (mlx5e_is_uplink_rep(priv
)) {
3599 stats
->rx_packets
= PPORT_802_3_GET(pstats
, a_frames_received_ok
);
3600 stats
->rx_bytes
= PPORT_802_3_GET(pstats
, a_octets_received_ok
);
3601 stats
->tx_packets
= PPORT_802_3_GET(pstats
, a_frames_transmitted_ok
);
3602 stats
->tx_bytes
= PPORT_802_3_GET(pstats
, a_octets_transmitted_ok
);
3604 mlx5e_fold_sw_stats64(priv
, stats
);
3607 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
3609 stats
->rx_length_errors
=
3610 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
3611 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
3612 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
3613 stats
->rx_crc_errors
=
3614 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
3615 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
3616 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
3617 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
3618 stats
->rx_frame_errors
;
3619 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
3621 /* vport multicast also counts packets that are dropped due to steering
3622 * or rx out of buffer
3625 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
3628 static void mlx5e_set_rx_mode(struct net_device
*dev
)
3630 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3632 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3635 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
3637 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3638 struct sockaddr
*saddr
= addr
;
3640 if (!is_valid_ether_addr(saddr
->sa_data
))
3641 return -EADDRNOTAVAIL
;
3643 netif_addr_lock_bh(netdev
);
3644 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
3645 netif_addr_unlock_bh(netdev
);
3647 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3652 #define MLX5E_SET_FEATURE(features, feature, enable) \
3655 *features |= feature; \
3657 *features &= ~feature; \
3660 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
3662 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
3664 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3665 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3666 struct mlx5e_channels new_channels
= {};
3667 struct mlx5e_params
*old_params
;
3671 mutex_lock(&priv
->state_lock
);
3673 if (enable
&& priv
->xsk
.refcnt
) {
3674 netdev_warn(netdev
, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n",
3680 old_params
= &priv
->channels
.params
;
3681 if (enable
&& !MLX5E_GET_PFLAG(old_params
, MLX5E_PFLAG_RX_STRIDING_RQ
)) {
3682 netdev_warn(netdev
, "can't set LRO with legacy RQ\n");
3687 reset
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3689 new_channels
.params
= *old_params
;
3690 new_channels
.params
.lro_en
= enable
;
3692 if (old_params
->rq_wq_type
!= MLX5_WQ_TYPE_CYCLIC
) {
3693 if (mlx5e_rx_mpwqe_is_linear_skb(mdev
, old_params
, NULL
) ==
3694 mlx5e_rx_mpwqe_is_linear_skb(mdev
, &new_channels
.params
, NULL
))
3699 *old_params
= new_channels
.params
;
3700 err
= mlx5e_modify_tirs_lro(priv
);
3704 err
= mlx5e_safe_switch_channels(priv
, &new_channels
,
3705 mlx5e_modify_tirs_lro_ctx
, NULL
);
3707 mutex_unlock(&priv
->state_lock
);
3711 static int set_feature_cvlan_filter(struct net_device
*netdev
, bool enable
)
3713 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3716 mlx5e_enable_cvlan_filter(priv
);
3718 mlx5e_disable_cvlan_filter(priv
);
3723 #ifdef CONFIG_MLX5_ESWITCH
3724 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
3726 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3728 if (!enable
&& mlx5e_tc_num_filters(priv
, MLX5_TC_FLAG(NIC_OFFLOAD
))) {
3730 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3738 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
3740 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3741 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3743 return mlx5_set_port_fcs(mdev
, !enable
);
3746 static int set_feature_rx_fcs(struct net_device
*netdev
, bool enable
)
3748 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3751 mutex_lock(&priv
->state_lock
);
3753 priv
->channels
.params
.scatter_fcs_en
= enable
;
3754 err
= mlx5e_modify_channels_scatter_fcs(&priv
->channels
, enable
);
3756 priv
->channels
.params
.scatter_fcs_en
= !enable
;
3758 mutex_unlock(&priv
->state_lock
);
3763 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
3765 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3768 mutex_lock(&priv
->state_lock
);
3770 priv
->channels
.params
.vlan_strip_disable
= !enable
;
3771 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3774 err
= mlx5e_modify_channels_vsd(&priv
->channels
, !enable
);
3776 priv
->channels
.params
.vlan_strip_disable
= enable
;
3779 mutex_unlock(&priv
->state_lock
);
3784 #ifdef CONFIG_MLX5_EN_ARFS
3785 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
3787 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3791 err
= mlx5e_arfs_enable(priv
);
3793 err
= mlx5e_arfs_disable(priv
);
3799 static int mlx5e_handle_feature(struct net_device
*netdev
,
3800 netdev_features_t
*features
,
3801 netdev_features_t wanted_features
,
3802 netdev_features_t feature
,
3803 mlx5e_feature_handler feature_handler
)
3805 netdev_features_t changes
= wanted_features
^ netdev
->features
;
3806 bool enable
= !!(wanted_features
& feature
);
3809 if (!(changes
& feature
))
3812 err
= feature_handler(netdev
, enable
);
3814 netdev_err(netdev
, "%s feature %pNF failed, err %d\n",
3815 enable
? "Enable" : "Disable", &feature
, err
);
3819 MLX5E_SET_FEATURE(features
, feature
, enable
);
3823 int mlx5e_set_features(struct net_device
*netdev
, netdev_features_t features
)
3825 netdev_features_t oper_features
= netdev
->features
;
3828 #define MLX5E_HANDLE_FEATURE(feature, handler) \
3829 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
3831 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_LRO
, set_feature_lro
);
3832 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER
,
3833 set_feature_cvlan_filter
);
3834 #ifdef CONFIG_MLX5_ESWITCH
3835 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC
, set_feature_tc_num_filters
);
3837 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL
, set_feature_rx_all
);
3838 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS
, set_feature_rx_fcs
);
3839 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX
, set_feature_rx_vlan
);
3840 #ifdef CONFIG_MLX5_EN_ARFS
3841 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE
, set_feature_arfs
);
3845 netdev
->features
= oper_features
;
3852 static netdev_features_t
mlx5e_fix_features(struct net_device
*netdev
,
3853 netdev_features_t features
)
3855 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3856 struct mlx5e_params
*params
;
3858 mutex_lock(&priv
->state_lock
);
3859 params
= &priv
->channels
.params
;
3860 if (!bitmap_empty(priv
->fs
.vlan
.active_svlans
, VLAN_N_VID
)) {
3861 /* HW strips the outer C-tag header, this is a problem
3862 * for S-tag traffic.
3864 features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
3865 if (!params
->vlan_strip_disable
)
3866 netdev_warn(netdev
, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3868 if (!MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
)) {
3869 if (features
& NETIF_F_LRO
) {
3870 netdev_warn(netdev
, "Disabling LRO, not supported in legacy RQ\n");
3871 features
&= ~NETIF_F_LRO
;
3875 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
3876 features
&= ~NETIF_F_RXHASH
;
3877 if (netdev
->features
& NETIF_F_RXHASH
)
3878 netdev_warn(netdev
, "Disabling rxhash, not supported when CQE compress is active\n");
3881 mutex_unlock(&priv
->state_lock
);
3886 static bool mlx5e_xsk_validate_mtu(struct net_device
*netdev
,
3887 struct mlx5e_channels
*chs
,
3888 struct mlx5e_params
*new_params
,
3889 struct mlx5_core_dev
*mdev
)
3893 for (ix
= 0; ix
< chs
->params
.num_channels
; ix
++) {
3894 struct xdp_umem
*umem
= mlx5e_xsk_get_umem(&chs
->params
, chs
->params
.xsk
, ix
);
3895 struct mlx5e_xsk_param xsk
;
3900 mlx5e_build_xsk_param(umem
, &xsk
);
3902 if (!mlx5e_validate_xsk_param(new_params
, &xsk
, mdev
)) {
3903 u32 hr
= mlx5e_get_linear_rq_headroom(new_params
, &xsk
);
3904 int max_mtu_frame
, max_mtu_page
, max_mtu
;
3906 /* Two criteria must be met:
3907 * 1. HW MTU + all headrooms <= XSK frame size.
3908 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
3910 max_mtu_frame
= MLX5E_HW2SW_MTU(new_params
, xsk
.chunk_size
- hr
);
3911 max_mtu_page
= mlx5e_xdp_max_mtu(new_params
, &xsk
);
3912 max_mtu
= min(max_mtu_frame
, max_mtu_page
);
3914 netdev_err(netdev
, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n",
3915 new_params
->sw_mtu
, ix
, max_mtu
);
3923 int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
,
3924 mlx5e_fp_preactivate preactivate
)
3926 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3927 struct mlx5e_channels new_channels
= {};
3928 struct mlx5e_params
*params
;
3932 mutex_lock(&priv
->state_lock
);
3934 params
= &priv
->channels
.params
;
3936 reset
= !params
->lro_en
;
3937 reset
= reset
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3939 new_channels
.params
= *params
;
3940 new_channels
.params
.sw_mtu
= new_mtu
;
3942 if (params
->xdp_prog
&&
3943 !mlx5e_rx_is_linear_skb(&new_channels
.params
, NULL
)) {
3944 netdev_err(netdev
, "MTU(%d) > %d is not allowed while XDP enabled\n",
3945 new_mtu
, mlx5e_xdp_max_mtu(params
, NULL
));
3950 if (priv
->xsk
.refcnt
&&
3951 !mlx5e_xsk_validate_mtu(netdev
, &priv
->channels
,
3952 &new_channels
.params
, priv
->mdev
)) {
3957 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
3958 bool is_linear
= mlx5e_rx_mpwqe_is_linear_skb(priv
->mdev
,
3959 &new_channels
.params
,
3961 u8 ppw_old
= mlx5e_mpwqe_log_pkts_per_wqe(params
, NULL
);
3962 u8 ppw_new
= mlx5e_mpwqe_log_pkts_per_wqe(&new_channels
.params
, NULL
);
3964 /* If XSK is active, XSK RQs are linear. */
3965 is_linear
|= priv
->xsk
.refcnt
;
3967 /* Always reset in linear mode - hw_mtu is used in data path. */
3968 reset
= reset
&& (is_linear
|| (ppw_old
!= ppw_new
));
3972 params
->sw_mtu
= new_mtu
;
3974 preactivate(priv
, NULL
);
3975 netdev
->mtu
= params
->sw_mtu
;
3979 err
= mlx5e_safe_switch_channels(priv
, &new_channels
, preactivate
, NULL
);
3983 netdev
->mtu
= new_channels
.params
.sw_mtu
;
3986 mutex_unlock(&priv
->state_lock
);
3990 static int mlx5e_change_nic_mtu(struct net_device
*netdev
, int new_mtu
)
3992 return mlx5e_change_mtu(netdev
, new_mtu
, mlx5e_set_dev_port_mtu_ctx
);
3995 int mlx5e_hwstamp_set(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
3997 struct hwtstamp_config config
;
4000 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
) ||
4001 (mlx5_clock_get_ptp_index(priv
->mdev
) == -1))
4004 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
4007 /* TX HW timestamp */
4008 switch (config
.tx_type
) {
4009 case HWTSTAMP_TX_OFF
:
4010 case HWTSTAMP_TX_ON
:
4016 mutex_lock(&priv
->state_lock
);
4017 /* RX HW timestamp */
4018 switch (config
.rx_filter
) {
4019 case HWTSTAMP_FILTER_NONE
:
4020 /* Reset CQE compression to Admin default */
4021 mlx5e_modify_rx_cqe_compression_locked(priv
, priv
->channels
.params
.rx_cqe_compress_def
);
4023 case HWTSTAMP_FILTER_ALL
:
4024 case HWTSTAMP_FILTER_SOME
:
4025 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
4026 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
4027 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
4028 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
4029 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
4030 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
4031 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
4032 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
4033 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
4034 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
4035 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
4036 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
4037 case HWTSTAMP_FILTER_NTP_ALL
:
4038 /* Disable CQE compression */
4039 if (MLX5E_GET_PFLAG(&priv
->channels
.params
, MLX5E_PFLAG_RX_CQE_COMPRESS
))
4040 netdev_warn(priv
->netdev
, "Disabling RX cqe compression\n");
4041 err
= mlx5e_modify_rx_cqe_compression_locked(priv
, false);
4043 netdev_err(priv
->netdev
, "Failed disabling cqe compression err=%d\n", err
);
4044 mutex_unlock(&priv
->state_lock
);
4047 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
4050 mutex_unlock(&priv
->state_lock
);
4054 memcpy(&priv
->tstamp
, &config
, sizeof(config
));
4055 mutex_unlock(&priv
->state_lock
);
4057 /* might need to fix some features */
4058 netdev_update_features(priv
->netdev
);
4060 return copy_to_user(ifr
->ifr_data
, &config
,
4061 sizeof(config
)) ? -EFAULT
: 0;
4064 int mlx5e_hwstamp_get(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
4066 struct hwtstamp_config
*cfg
= &priv
->tstamp
;
4068 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
4071 return copy_to_user(ifr
->ifr_data
, cfg
, sizeof(*cfg
)) ? -EFAULT
: 0;
4074 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
4076 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4080 return mlx5e_hwstamp_set(priv
, ifr
);
4082 return mlx5e_hwstamp_get(priv
, ifr
);
4088 #ifdef CONFIG_MLX5_ESWITCH
4089 int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
4091 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4092 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4094 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
4097 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
4100 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4101 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4103 if (vlan_proto
!= htons(ETH_P_8021Q
))
4104 return -EPROTONOSUPPORT
;
4106 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
4110 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
4112 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4113 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4115 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
4118 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
4120 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4121 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4123 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
4126 int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
,
4129 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4130 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4132 return mlx5_eswitch_set_vport_rate(mdev
->priv
.eswitch
, vf
+ 1,
4133 max_tx_rate
, min_tx_rate
);
4136 static int mlx5_vport_link2ifla(u8 esw_link
)
4139 case MLX5_VPORT_ADMIN_STATE_DOWN
:
4140 return IFLA_VF_LINK_STATE_DISABLE
;
4141 case MLX5_VPORT_ADMIN_STATE_UP
:
4142 return IFLA_VF_LINK_STATE_ENABLE
;
4144 return IFLA_VF_LINK_STATE_AUTO
;
4147 static int mlx5_ifla_link2vport(u8 ifla_link
)
4149 switch (ifla_link
) {
4150 case IFLA_VF_LINK_STATE_DISABLE
:
4151 return MLX5_VPORT_ADMIN_STATE_DOWN
;
4152 case IFLA_VF_LINK_STATE_ENABLE
:
4153 return MLX5_VPORT_ADMIN_STATE_UP
;
4155 return MLX5_VPORT_ADMIN_STATE_AUTO
;
4158 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
4161 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4162 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4164 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
4165 mlx5_ifla_link2vport(link_state
));
4168 int mlx5e_get_vf_config(struct net_device
*dev
,
4169 int vf
, struct ifla_vf_info
*ivi
)
4171 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4172 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4175 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
4178 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
4182 int mlx5e_get_vf_stats(struct net_device
*dev
,
4183 int vf
, struct ifla_vf_stats
*vf_stats
)
4185 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4186 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4188 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
4193 struct mlx5e_vxlan_work
{
4194 struct work_struct work
;
4195 struct mlx5e_priv
*priv
;
4199 static void mlx5e_vxlan_add_work(struct work_struct
*work
)
4201 struct mlx5e_vxlan_work
*vxlan_work
=
4202 container_of(work
, struct mlx5e_vxlan_work
, work
);
4203 struct mlx5e_priv
*priv
= vxlan_work
->priv
;
4204 u16 port
= vxlan_work
->port
;
4206 mutex_lock(&priv
->state_lock
);
4207 mlx5_vxlan_add_port(priv
->mdev
->vxlan
, port
);
4208 mutex_unlock(&priv
->state_lock
);
4213 static void mlx5e_vxlan_del_work(struct work_struct
*work
)
4215 struct mlx5e_vxlan_work
*vxlan_work
=
4216 container_of(work
, struct mlx5e_vxlan_work
, work
);
4217 struct mlx5e_priv
*priv
= vxlan_work
->priv
;
4218 u16 port
= vxlan_work
->port
;
4220 mutex_lock(&priv
->state_lock
);
4221 mlx5_vxlan_del_port(priv
->mdev
->vxlan
, port
);
4222 mutex_unlock(&priv
->state_lock
);
4226 static void mlx5e_vxlan_queue_work(struct mlx5e_priv
*priv
, u16 port
, int add
)
4228 struct mlx5e_vxlan_work
*vxlan_work
;
4230 vxlan_work
= kmalloc(sizeof(*vxlan_work
), GFP_ATOMIC
);
4235 INIT_WORK(&vxlan_work
->work
, mlx5e_vxlan_add_work
);
4237 INIT_WORK(&vxlan_work
->work
, mlx5e_vxlan_del_work
);
4239 vxlan_work
->priv
= priv
;
4240 vxlan_work
->port
= port
;
4241 queue_work(priv
->wq
, &vxlan_work
->work
);
4244 void mlx5e_add_vxlan_port(struct net_device
*netdev
, struct udp_tunnel_info
*ti
)
4246 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4248 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
4251 if (!mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
4254 mlx5e_vxlan_queue_work(priv
, be16_to_cpu(ti
->port
), 1);
4257 void mlx5e_del_vxlan_port(struct net_device
*netdev
, struct udp_tunnel_info
*ti
)
4259 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4261 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
4264 if (!mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
4267 mlx5e_vxlan_queue_work(priv
, be16_to_cpu(ti
->port
), 0);
4270 static netdev_features_t
mlx5e_tunnel_features_check(struct mlx5e_priv
*priv
,
4271 struct sk_buff
*skb
,
4272 netdev_features_t features
)
4274 unsigned int offset
= 0;
4275 struct udphdr
*udph
;
4279 switch (vlan_get_protocol(skb
)) {
4280 case htons(ETH_P_IP
):
4281 proto
= ip_hdr(skb
)->protocol
;
4283 case htons(ETH_P_IPV6
):
4284 proto
= ipv6_find_hdr(skb
, &offset
, -1, NULL
, NULL
);
4295 if (mlx5e_tunnel_proto_supported(priv
->mdev
, IPPROTO_IPIP
))
4299 udph
= udp_hdr(skb
);
4300 port
= be16_to_cpu(udph
->dest
);
4302 /* Verify if UDP port is being offloaded by HW */
4303 if (mlx5_vxlan_lookup_port(priv
->mdev
->vxlan
, port
))
4306 #if IS_ENABLED(CONFIG_GENEVE)
4307 /* Support Geneve offload for default UDP port */
4308 if (port
== GENEVE_UDP_PORT
&& mlx5_geneve_tx_allowed(priv
->mdev
))
4314 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4315 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
4318 netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
4319 struct net_device
*netdev
,
4320 netdev_features_t features
)
4322 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4324 features
= vlan_features_check(skb
, features
);
4325 features
= vxlan_features_check(skb
, features
);
4327 #ifdef CONFIG_MLX5_EN_IPSEC
4328 if (mlx5e_ipsec_feature_check(skb
, netdev
, features
))
4332 /* Validate if the tunneled packet is being offloaded by HW */
4333 if (skb
->encapsulation
&&
4334 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
4335 return mlx5e_tunnel_features_check(priv
, skb
, features
);
4340 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
4342 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
4344 bool report_failed
= false;
4349 mutex_lock(&priv
->state_lock
);
4351 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
4354 for (i
= 0; i
< priv
->channels
.num
* priv
->channels
.params
.num_tc
; i
++) {
4355 struct netdev_queue
*dev_queue
=
4356 netdev_get_tx_queue(priv
->netdev
, i
);
4357 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[i
];
4359 if (!netif_xmit_stopped(dev_queue
))
4362 if (mlx5e_reporter_tx_timeout(sq
))
4363 report_failed
= true;
4369 err
= mlx5e_safe_reopen_channels(priv
);
4371 netdev_err(priv
->netdev
,
4372 "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
4376 mutex_unlock(&priv
->state_lock
);
4380 static void mlx5e_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
4382 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4384 netdev_err(dev
, "TX timeout detected\n");
4385 queue_work(priv
->wq
, &priv
->tx_timeout_work
);
4388 static int mlx5e_xdp_allowed(struct mlx5e_priv
*priv
, struct bpf_prog
*prog
)
4390 struct net_device
*netdev
= priv
->netdev
;
4391 struct mlx5e_channels new_channels
= {};
4393 if (priv
->channels
.params
.lro_en
) {
4394 netdev_warn(netdev
, "can't set XDP while LRO is on, disable LRO first\n");
4398 if (MLX5_IPSEC_DEV(priv
->mdev
)) {
4399 netdev_warn(netdev
, "can't set XDP with IPSec offload\n");
4403 new_channels
.params
= priv
->channels
.params
;
4404 new_channels
.params
.xdp_prog
= prog
;
4406 /* No XSK params: AF_XDP can't be enabled yet at the point of setting
4409 if (!mlx5e_rx_is_linear_skb(&new_channels
.params
, NULL
)) {
4410 netdev_warn(netdev
, "XDP is not allowed with MTU(%d) > %d\n",
4411 new_channels
.params
.sw_mtu
,
4412 mlx5e_xdp_max_mtu(&new_channels
.params
, NULL
));
4419 static int mlx5e_xdp_set(struct net_device
*netdev
, struct bpf_prog
*prog
)
4421 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4422 struct bpf_prog
*old_prog
;
4423 bool reset
, was_opened
;
4427 mutex_lock(&priv
->state_lock
);
4430 err
= mlx5e_xdp_allowed(priv
, prog
);
4435 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
4436 /* no need for full reset when exchanging programs */
4437 reset
= (!priv
->channels
.params
.xdp_prog
|| !prog
);
4439 if (was_opened
&& !reset
)
4440 /* num_channels is invariant here, so we can take the
4441 * batched reference right upfront.
4443 bpf_prog_add(prog
, priv
->channels
.num
);
4445 if (was_opened
&& reset
) {
4446 struct mlx5e_channels new_channels
= {};
4448 new_channels
.params
= priv
->channels
.params
;
4449 new_channels
.params
.xdp_prog
= prog
;
4450 mlx5e_set_rq_type(priv
->mdev
, &new_channels
.params
);
4451 old_prog
= priv
->channels
.params
.xdp_prog
;
4453 err
= mlx5e_safe_switch_channels(priv
, &new_channels
, NULL
, NULL
);
4457 /* exchange programs, extra prog reference we got from caller
4458 * as long as we don't fail from this point onwards.
4460 old_prog
= xchg(&priv
->channels
.params
.xdp_prog
, prog
);
4464 bpf_prog_put(old_prog
);
4466 if (!was_opened
&& reset
) /* change RQ type according to priv->xdp_prog */
4467 mlx5e_set_rq_type(priv
->mdev
, &priv
->channels
.params
);
4469 if (!was_opened
|| reset
)
4472 /* exchanging programs w/o reset, we update ref counts on behalf
4473 * of the channels RQs here.
4475 for (i
= 0; i
< priv
->channels
.num
; i
++) {
4476 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
4477 bool xsk_open
= test_bit(MLX5E_CHANNEL_STATE_XSK
, c
->state
);
4479 clear_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
4481 clear_bit(MLX5E_RQ_STATE_ENABLED
, &c
->xskrq
.state
);
4482 napi_synchronize(&c
->napi
);
4483 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
4485 old_prog
= xchg(&c
->rq
.xdp_prog
, prog
);
4487 bpf_prog_put(old_prog
);
4490 old_prog
= xchg(&c
->xskrq
.xdp_prog
, prog
);
4492 bpf_prog_put(old_prog
);
4495 set_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
4497 set_bit(MLX5E_RQ_STATE_ENABLED
, &c
->xskrq
.state
);
4498 /* napi_schedule in case we have missed anything */
4499 napi_schedule(&c
->napi
);
4503 mutex_unlock(&priv
->state_lock
);
4507 static u32
mlx5e_xdp_query(struct net_device
*dev
)
4509 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4510 const struct bpf_prog
*xdp_prog
;
4513 mutex_lock(&priv
->state_lock
);
4514 xdp_prog
= priv
->channels
.params
.xdp_prog
;
4516 prog_id
= xdp_prog
->aux
->id
;
4517 mutex_unlock(&priv
->state_lock
);
4522 static int mlx5e_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4524 switch (xdp
->command
) {
4525 case XDP_SETUP_PROG
:
4526 return mlx5e_xdp_set(dev
, xdp
->prog
);
4527 case XDP_QUERY_PROG
:
4528 xdp
->prog_id
= mlx5e_xdp_query(dev
);
4530 case XDP_SETUP_XSK_UMEM
:
4531 return mlx5e_xsk_setup_umem(dev
, xdp
->xsk
.umem
,
4538 #ifdef CONFIG_MLX5_ESWITCH
4539 static int mlx5e_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
4540 struct net_device
*dev
, u32 filter_mask
,
4543 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4544 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4548 err
= mlx5_eswitch_get_vepa(mdev
->priv
.eswitch
, &setting
);
4551 mode
= setting
? BRIDGE_MODE_VEPA
: BRIDGE_MODE_VEB
;
4552 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
,
4554 0, 0, nlflags
, filter_mask
, NULL
);
4557 static int mlx5e_bridge_setlink(struct net_device
*dev
, struct nlmsghdr
*nlh
,
4558 u16 flags
, struct netlink_ext_ack
*extack
)
4560 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4561 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4562 struct nlattr
*attr
, *br_spec
;
4563 u16 mode
= BRIDGE_MODE_UNDEF
;
4567 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
4571 nla_for_each_nested(attr
, br_spec
, rem
) {
4572 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
4575 if (nla_len(attr
) < sizeof(mode
))
4578 mode
= nla_get_u16(attr
);
4579 if (mode
> BRIDGE_MODE_VEPA
)
4585 if (mode
== BRIDGE_MODE_UNDEF
)
4588 setting
= (mode
== BRIDGE_MODE_VEPA
) ? 1 : 0;
4589 return mlx5_eswitch_set_vepa(mdev
->priv
.eswitch
, setting
);
4593 const struct net_device_ops mlx5e_netdev_ops
= {
4594 .ndo_open
= mlx5e_open
,
4595 .ndo_stop
= mlx5e_close
,
4596 .ndo_start_xmit
= mlx5e_xmit
,
4597 .ndo_setup_tc
= mlx5e_setup_tc
,
4598 .ndo_select_queue
= mlx5e_select_queue
,
4599 .ndo_get_stats64
= mlx5e_get_stats
,
4600 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
4601 .ndo_set_mac_address
= mlx5e_set_mac
,
4602 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
4603 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
4604 .ndo_set_features
= mlx5e_set_features
,
4605 .ndo_fix_features
= mlx5e_fix_features
,
4606 .ndo_change_mtu
= mlx5e_change_nic_mtu
,
4607 .ndo_do_ioctl
= mlx5e_ioctl
,
4608 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
4609 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
4610 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
4611 .ndo_features_check
= mlx5e_features_check
,
4612 .ndo_tx_timeout
= mlx5e_tx_timeout
,
4613 .ndo_bpf
= mlx5e_xdp
,
4614 .ndo_xdp_xmit
= mlx5e_xdp_xmit
,
4615 .ndo_xsk_wakeup
= mlx5e_xsk_wakeup
,
4616 #ifdef CONFIG_MLX5_EN_ARFS
4617 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
4619 #ifdef CONFIG_MLX5_ESWITCH
4620 .ndo_bridge_setlink
= mlx5e_bridge_setlink
,
4621 .ndo_bridge_getlink
= mlx5e_bridge_getlink
,
4623 /* SRIOV E-Switch NDOs */
4624 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
4625 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
4626 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
4627 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
4628 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
4629 .ndo_get_vf_config
= mlx5e_get_vf_config
,
4630 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
4631 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
4633 .ndo_get_devlink_port
= mlx5e_get_devlink_port
,
4636 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
4638 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
4640 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
4641 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
4642 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
4643 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
4644 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
4645 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
4646 MLX5_CAP_FLOWTABLE(mdev
,
4647 flow_table_properties_nic_receive
.max_ft_level
)
4649 mlx5_core_warn(mdev
,
4650 "Not creating net device, some required device capabilities are missing\n");
4653 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
4654 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
4655 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
4656 mlx5_core_warn(mdev
, "CQ moderation is not supported\n");
4661 void mlx5e_build_default_indir_rqt(u32
*indirection_rqt
, int len
,
4666 for (i
= 0; i
< len
; i
++)
4667 indirection_rqt
[i
] = i
% num_channels
;
4670 static bool slow_pci_heuristic(struct mlx5_core_dev
*mdev
)
4675 mlx5e_port_max_linkspeed(mdev
, &link_speed
);
4676 pci_bw
= pcie_bandwidth_available(mdev
->pdev
, NULL
, NULL
, NULL
);
4677 mlx5_core_dbg_once(mdev
, "Max link speed = %d, PCI BW = %d\n",
4678 link_speed
, pci_bw
);
4680 #define MLX5E_SLOW_PCI_RATIO (2)
4682 return link_speed
&& pci_bw
&&
4683 link_speed
> MLX5E_SLOW_PCI_RATIO
* pci_bw
;
4686 static struct dim_cq_moder
mlx5e_get_def_tx_moderation(u8 cq_period_mode
)
4688 struct dim_cq_moder moder
;
4690 moder
.cq_period_mode
= cq_period_mode
;
4691 moder
.pkts
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
4692 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
4693 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4694 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE
;
4699 static struct dim_cq_moder
mlx5e_get_def_rx_moderation(u8 cq_period_mode
)
4701 struct dim_cq_moder moder
;
4703 moder
.cq_period_mode
= cq_period_mode
;
4704 moder
.pkts
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
4705 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
4706 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4707 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
4712 static u8
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode
)
4714 return cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
?
4715 DIM_CQ_PERIOD_MODE_START_FROM_CQE
:
4716 DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
4719 void mlx5e_reset_tx_moderation(struct mlx5e_params
*params
, u8 cq_period_mode
)
4721 if (params
->tx_dim_enabled
) {
4722 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
4724 params
->tx_cq_moderation
= net_dim_get_def_tx_moderation(dim_period_mode
);
4726 params
->tx_cq_moderation
= mlx5e_get_def_tx_moderation(cq_period_mode
);
4730 void mlx5e_reset_rx_moderation(struct mlx5e_params
*params
, u8 cq_period_mode
)
4732 if (params
->rx_dim_enabled
) {
4733 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
4735 params
->rx_cq_moderation
= net_dim_get_def_rx_moderation(dim_period_mode
);
4737 params
->rx_cq_moderation
= mlx5e_get_def_rx_moderation(cq_period_mode
);
4741 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4743 mlx5e_reset_tx_moderation(params
, cq_period_mode
);
4744 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_TX_CQE_BASED_MODER
,
4745 params
->tx_cq_moderation
.cq_period_mode
==
4746 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4749 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4751 mlx5e_reset_rx_moderation(params
, cq_period_mode
);
4752 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
4753 params
->rx_cq_moderation
.cq_period_mode
==
4754 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4757 static u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
)
4761 /* The supported periods are organized in ascending order */
4762 for (i
= 0; i
< MLX5E_LRO_TIMEOUT_ARR_SIZE
- 1; i
++)
4763 if (MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]) >= wanted_timeout
)
4766 return MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]);
4769 void mlx5e_build_rq_params(struct mlx5_core_dev
*mdev
,
4770 struct mlx5e_params
*params
)
4772 /* Prefer Striding RQ, unless any of the following holds:
4773 * - Striding RQ configuration is not possible/supported.
4774 * - Slow PCI heuristic.
4775 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
4777 * No XSK params: checking the availability of striding RQ in general.
4779 if (!slow_pci_heuristic(mdev
) &&
4780 mlx5e_striding_rq_possible(mdev
, params
) &&
4781 (mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
) ||
4782 !mlx5e_rx_is_linear_skb(params
, NULL
)))
4783 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
, true);
4784 mlx5e_set_rq_type(mdev
, params
);
4785 mlx5e_init_rq_type_params(mdev
, params
);
4788 void mlx5e_build_rss_params(struct mlx5e_rss_params
*rss_params
,
4791 enum mlx5e_traffic_types tt
;
4793 rss_params
->hfunc
= ETH_RSS_HASH_TOP
;
4794 netdev_rss_key_fill(rss_params
->toeplitz_hash_key
,
4795 sizeof(rss_params
->toeplitz_hash_key
));
4796 mlx5e_build_default_indir_rqt(rss_params
->indirection_rqt
,
4797 MLX5E_INDIR_RQT_SIZE
, num_channels
);
4798 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++)
4799 rss_params
->rx_hash_fields
[tt
] =
4800 tirc_default_config
[tt
].rx_hash_fields
;
4803 void mlx5e_build_nic_params(struct mlx5e_priv
*priv
,
4804 struct mlx5e_xsk
*xsk
,
4805 struct mlx5e_rss_params
*rss_params
,
4806 struct mlx5e_params
*params
,
4809 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4810 u8 rx_cq_period_mode
;
4812 params
->sw_mtu
= mtu
;
4813 params
->hard_mtu
= MLX5E_ETH_HARD_MTU
;
4814 params
->num_channels
= min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS
/ 2,
4819 params
->log_sq_size
= is_kdump_kernel() ?
4820 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
:
4821 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
4824 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_XDP_TX_MPWQE
,
4825 MLX5_CAP_ETH(mdev
, enhanced_multi_pkt_send_wqe
));
4827 /* set CQE compression */
4828 params
->rx_cqe_compress_def
= false;
4829 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
4830 MLX5_CAP_GEN(mdev
, vport_group_manager
))
4831 params
->rx_cqe_compress_def
= slow_pci_heuristic(mdev
);
4833 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
, params
->rx_cqe_compress_def
);
4834 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
, false);
4837 mlx5e_build_rq_params(mdev
, params
);
4840 if (MLX5_CAP_ETH(mdev
, lro_cap
) &&
4841 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
4842 /* No XSK params: checking the availability of striding RQ in general. */
4843 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
))
4844 params
->lro_en
= !slow_pci_heuristic(mdev
);
4846 params
->lro_timeout
= mlx5e_choose_lro_timeout(mdev
, MLX5E_DEFAULT_LRO_TIMEOUT
);
4848 /* CQ moderation params */
4849 rx_cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
4850 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
4851 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
4852 params
->rx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
4853 params
->tx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
4854 mlx5e_set_rx_cq_mode_params(params
, rx_cq_period_mode
);
4855 mlx5e_set_tx_cq_mode_params(params
, MLX5_CQ_PERIOD_MODE_START_FROM_EQE
);
4858 mlx5_query_min_inline(mdev
, ¶ms
->tx_min_inline_mode
);
4861 mlx5e_build_rss_params(rss_params
, params
->num_channels
);
4862 params
->tunneled_offload_en
=
4863 mlx5e_tunnel_inner_ft_supported(mdev
);
4869 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
4871 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4873 mlx5_query_mac_address(priv
->mdev
, netdev
->dev_addr
);
4874 if (is_zero_ether_addr(netdev
->dev_addr
) &&
4875 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
4876 eth_hw_addr_random(netdev
);
4877 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
4881 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
4883 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4884 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4888 SET_NETDEV_DEV(netdev
, mdev
->device
);
4890 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
4892 #ifdef CONFIG_MLX5_CORE_EN_DCB
4893 if (MLX5_CAP_GEN(mdev
, vport_group_manager
) && MLX5_CAP_GEN(mdev
, qos
))
4894 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
4897 netdev
->watchdog_timeo
= 15 * HZ
;
4899 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
4901 netdev
->vlan_features
|= NETIF_F_SG
;
4902 netdev
->vlan_features
|= NETIF_F_HW_CSUM
;
4903 netdev
->vlan_features
|= NETIF_F_GRO
;
4904 netdev
->vlan_features
|= NETIF_F_TSO
;
4905 netdev
->vlan_features
|= NETIF_F_TSO6
;
4906 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
4907 netdev
->vlan_features
|= NETIF_F_RXHASH
;
4909 netdev
->mpls_features
|= NETIF_F_SG
;
4910 netdev
->mpls_features
|= NETIF_F_HW_CSUM
;
4911 netdev
->mpls_features
|= NETIF_F_TSO
;
4912 netdev
->mpls_features
|= NETIF_F_TSO6
;
4914 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
4915 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
4917 if (!!MLX5_CAP_ETH(mdev
, lro_cap
) &&
4918 mlx5e_check_fragmented_striding_rq_cap(mdev
))
4919 netdev
->vlan_features
|= NETIF_F_LRO
;
4921 netdev
->hw_features
= netdev
->vlan_features
;
4922 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
4923 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
4924 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
4925 netdev
->hw_features
|= NETIF_F_HW_VLAN_STAG_TX
;
4927 if (mlx5_vxlan_allowed(mdev
->vxlan
) || mlx5_geneve_tx_allowed(mdev
) ||
4928 mlx5e_any_tunnel_proto_supported(mdev
)) {
4929 netdev
->hw_enc_features
|= NETIF_F_HW_CSUM
;
4930 netdev
->hw_enc_features
|= NETIF_F_TSO
;
4931 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
4932 netdev
->hw_enc_features
|= NETIF_F_GSO_PARTIAL
;
4935 if (mlx5_vxlan_allowed(mdev
->vxlan
) || mlx5_geneve_tx_allowed(mdev
)) {
4936 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4937 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4938 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4939 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4940 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4941 netdev
->vlan_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4942 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4945 if (mlx5e_tunnel_proto_supported(mdev
, IPPROTO_GRE
)) {
4946 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
4947 NETIF_F_GSO_GRE_CSUM
;
4948 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE
|
4949 NETIF_F_GSO_GRE_CSUM
;
4950 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE
|
4951 NETIF_F_GSO_GRE_CSUM
;
4954 if (mlx5e_tunnel_proto_supported(mdev
, IPPROTO_IPIP
)) {
4955 netdev
->hw_features
|= NETIF_F_GSO_IPXIP4
|
4957 netdev
->hw_enc_features
|= NETIF_F_GSO_IPXIP4
|
4959 netdev
->gso_partial_features
|= NETIF_F_GSO_IPXIP4
|
4963 netdev
->hw_features
|= NETIF_F_GSO_PARTIAL
;
4964 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_L4
;
4965 netdev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
4966 netdev
->features
|= NETIF_F_GSO_UDP_L4
;
4968 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
4971 netdev
->hw_features
|= NETIF_F_RXALL
;
4973 if (MLX5_CAP_ETH(mdev
, scatter_fcs
))
4974 netdev
->hw_features
|= NETIF_F_RXFCS
;
4976 netdev
->features
= netdev
->hw_features
;
4977 if (!priv
->channels
.params
.lro_en
)
4978 netdev
->features
&= ~NETIF_F_LRO
;
4981 netdev
->features
&= ~NETIF_F_RXALL
;
4983 if (!priv
->channels
.params
.scatter_fcs_en
)
4984 netdev
->features
&= ~NETIF_F_RXFCS
;
4986 /* prefere CQE compression over rxhash */
4987 if (MLX5E_GET_PFLAG(&priv
->channels
.params
, MLX5E_PFLAG_RX_CQE_COMPRESS
))
4988 netdev
->features
&= ~NETIF_F_RXHASH
;
4990 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4991 if (FT_CAP(flow_modify_en
) &&
4992 FT_CAP(modify_root
) &&
4993 FT_CAP(identified_miss_table_mode
) &&
4994 FT_CAP(flow_table_modify
)) {
4995 #ifdef CONFIG_MLX5_ESWITCH
4996 netdev
->hw_features
|= NETIF_F_HW_TC
;
4998 #ifdef CONFIG_MLX5_EN_ARFS
4999 netdev
->hw_features
|= NETIF_F_NTUPLE
;
5003 netdev
->features
|= NETIF_F_HIGHDMA
;
5004 netdev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
5006 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5008 mlx5e_set_netdev_dev_addr(netdev
);
5009 mlx5e_ipsec_build_netdev(priv
);
5010 mlx5e_tls_build_netdev(priv
);
5013 void mlx5e_create_q_counters(struct mlx5e_priv
*priv
)
5015 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5018 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
5020 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
5021 priv
->q_counter
= 0;
5024 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->drop_rq_q_counter
);
5026 mlx5_core_warn(mdev
, "alloc drop RQ counter failed, %d\n", err
);
5027 priv
->drop_rq_q_counter
= 0;
5031 void mlx5e_destroy_q_counters(struct mlx5e_priv
*priv
)
5033 if (priv
->q_counter
)
5034 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
5036 if (priv
->drop_rq_q_counter
)
5037 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->drop_rq_q_counter
);
5040 static int mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
5041 struct net_device
*netdev
,
5042 const struct mlx5e_profile
*profile
,
5045 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
5046 struct mlx5e_rss_params
*rss
= &priv
->rss_params
;
5049 err
= mlx5e_netdev_init(netdev
, priv
, mdev
, profile
, ppriv
);
5053 mlx5e_build_nic_params(priv
, &priv
->xsk
, rss
, &priv
->channels
.params
,
5056 mlx5e_timestamp_init(priv
);
5058 err
= mlx5e_ipsec_init(priv
);
5060 mlx5_core_err(mdev
, "IPSec initialization failed, %d\n", err
);
5061 err
= mlx5e_tls_init(priv
);
5063 mlx5_core_err(mdev
, "TLS initialization failed, %d\n", err
);
5064 mlx5e_build_nic_netdev(netdev
);
5065 mlx5e_health_create_reporters(priv
);
5070 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
5072 mlx5e_health_destroy_reporters(priv
);
5073 mlx5e_tls_cleanup(priv
);
5074 mlx5e_ipsec_cleanup(priv
);
5075 mlx5e_netdev_cleanup(priv
->netdev
, priv
);
5078 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
5080 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5083 mlx5e_create_q_counters(priv
);
5085 err
= mlx5e_open_drop_rq(priv
, &priv
->drop_rq
);
5087 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
5088 goto err_destroy_q_counters
;
5091 err
= mlx5e_create_indirect_rqt(priv
);
5093 goto err_close_drop_rq
;
5095 err
= mlx5e_create_direct_rqts(priv
, priv
->direct_tir
);
5097 goto err_destroy_indirect_rqts
;
5099 err
= mlx5e_create_indirect_tirs(priv
, true);
5101 goto err_destroy_direct_rqts
;
5103 err
= mlx5e_create_direct_tirs(priv
, priv
->direct_tir
);
5105 goto err_destroy_indirect_tirs
;
5107 err
= mlx5e_create_direct_rqts(priv
, priv
->xsk_tir
);
5109 goto err_destroy_direct_tirs
;
5111 err
= mlx5e_create_direct_tirs(priv
, priv
->xsk_tir
);
5113 goto err_destroy_xsk_rqts
;
5115 err
= mlx5e_create_flow_steering(priv
);
5117 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
5118 goto err_destroy_xsk_tirs
;
5121 err
= mlx5e_tc_nic_init(priv
);
5123 goto err_destroy_flow_steering
;
5127 err_destroy_flow_steering
:
5128 mlx5e_destroy_flow_steering(priv
);
5129 err_destroy_xsk_tirs
:
5130 mlx5e_destroy_direct_tirs(priv
, priv
->xsk_tir
);
5131 err_destroy_xsk_rqts
:
5132 mlx5e_destroy_direct_rqts(priv
, priv
->xsk_tir
);
5133 err_destroy_direct_tirs
:
5134 mlx5e_destroy_direct_tirs(priv
, priv
->direct_tir
);
5135 err_destroy_indirect_tirs
:
5136 mlx5e_destroy_indirect_tirs(priv
);
5137 err_destroy_direct_rqts
:
5138 mlx5e_destroy_direct_rqts(priv
, priv
->direct_tir
);
5139 err_destroy_indirect_rqts
:
5140 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
5142 mlx5e_close_drop_rq(&priv
->drop_rq
);
5143 err_destroy_q_counters
:
5144 mlx5e_destroy_q_counters(priv
);
5148 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
5150 mlx5e_tc_nic_cleanup(priv
);
5151 mlx5e_destroy_flow_steering(priv
);
5152 mlx5e_destroy_direct_tirs(priv
, priv
->xsk_tir
);
5153 mlx5e_destroy_direct_rqts(priv
, priv
->xsk_tir
);
5154 mlx5e_destroy_direct_tirs(priv
, priv
->direct_tir
);
5155 mlx5e_destroy_indirect_tirs(priv
);
5156 mlx5e_destroy_direct_rqts(priv
, priv
->direct_tir
);
5157 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
5158 mlx5e_close_drop_rq(&priv
->drop_rq
);
5159 mlx5e_destroy_q_counters(priv
);
5162 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
5166 err
= mlx5e_create_tises(priv
);
5168 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
5172 #ifdef CONFIG_MLX5_CORE_EN_DCB
5173 mlx5e_dcbnl_initialize(priv
);
5178 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
5180 struct net_device
*netdev
= priv
->netdev
;
5181 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5183 mlx5e_init_l2_addr(priv
);
5185 /* Marking the link as currently not needed by the Driver */
5186 if (!netif_running(netdev
))
5187 mlx5_set_port_admin_status(mdev
, MLX5_PORT_DOWN
);
5189 mlx5e_set_netdev_mtu_boundaries(priv
);
5190 mlx5e_set_dev_port_mtu(priv
);
5192 mlx5_lag_add(mdev
, netdev
);
5194 mlx5e_enable_async_events(priv
);
5195 if (mlx5e_monitor_counter_supported(priv
))
5196 mlx5e_monitor_counter_init(priv
);
5198 mlx5e_hv_vhca_stats_create(priv
);
5199 if (netdev
->reg_state
!= NETREG_REGISTERED
)
5201 #ifdef CONFIG_MLX5_CORE_EN_DCB
5202 mlx5e_dcbnl_init_app(priv
);
5205 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
5208 if (netif_running(netdev
))
5210 netif_device_attach(netdev
);
5214 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
5216 struct mlx5_core_dev
*mdev
= priv
->mdev
;
5218 #ifdef CONFIG_MLX5_CORE_EN_DCB
5219 if (priv
->netdev
->reg_state
== NETREG_REGISTERED
)
5220 mlx5e_dcbnl_delete_app(priv
);
5224 if (netif_running(priv
->netdev
))
5225 mlx5e_close(priv
->netdev
);
5226 netif_device_detach(priv
->netdev
);
5229 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
5231 mlx5e_hv_vhca_stats_destroy(priv
);
5232 if (mlx5e_monitor_counter_supported(priv
))
5233 mlx5e_monitor_counter_cleanup(priv
);
5235 mlx5e_disable_async_events(priv
);
5236 mlx5_lag_remove(mdev
);
5239 int mlx5e_update_nic_rx(struct mlx5e_priv
*priv
)
5241 return mlx5e_refresh_tirs(priv
, false);
5244 static const struct mlx5e_profile mlx5e_nic_profile
= {
5245 .init
= mlx5e_nic_init
,
5246 .cleanup
= mlx5e_nic_cleanup
,
5247 .init_rx
= mlx5e_init_nic_rx
,
5248 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
5249 .init_tx
= mlx5e_init_nic_tx
,
5250 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
5251 .enable
= mlx5e_nic_enable
,
5252 .disable
= mlx5e_nic_disable
,
5253 .update_rx
= mlx5e_update_nic_rx
,
5254 .update_stats
= mlx5e_update_ndo_stats
,
5255 .update_carrier
= mlx5e_update_carrier
,
5256 .rx_handlers
.handle_rx_cqe
= mlx5e_handle_rx_cqe
,
5257 .rx_handlers
.handle_rx_cqe_mpwqe
= mlx5e_handle_rx_cqe_mpwrq
,
5258 .max_tc
= MLX5E_MAX_NUM_TC
,
5259 .rq_groups
= MLX5E_NUM_RQ_GROUPS(XSK
),
5260 .stats_grps
= mlx5e_nic_stats_grps
,
5261 .stats_grps_num
= mlx5e_nic_stats_grps_num
,
5264 /* mlx5e generic netdev management API (move to en_common.c) */
5266 /* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
5267 int mlx5e_netdev_init(struct net_device
*netdev
,
5268 struct mlx5e_priv
*priv
,
5269 struct mlx5_core_dev
*mdev
,
5270 const struct mlx5e_profile
*profile
,
5275 priv
->netdev
= netdev
;
5276 priv
->profile
= profile
;
5277 priv
->ppriv
= ppriv
;
5278 priv
->msglevel
= MLX5E_MSG_LEVEL
;
5279 priv
->max_nch
= netdev
->num_rx_queues
/ max_t(u8
, profile
->rq_groups
, 1);
5280 priv
->max_opened_tc
= 1;
5282 if (!alloc_cpumask_var(&priv
->scratchpad
.cpumask
, GFP_KERNEL
))
5285 mutex_init(&priv
->state_lock
);
5286 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
5287 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
5288 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
5289 INIT_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
5291 priv
->wq
= create_singlethread_workqueue("mlx5e");
5293 goto err_free_cpumask
;
5296 netif_carrier_off(netdev
);
5298 #ifdef CONFIG_MLX5_EN_ARFS
5299 netdev
->rx_cpu_rmap
= mlx5_eq_table_get_rmap(mdev
);
5305 free_cpumask_var(priv
->scratchpad
.cpumask
);
5310 void mlx5e_netdev_cleanup(struct net_device
*netdev
, struct mlx5e_priv
*priv
)
5312 destroy_workqueue(priv
->wq
);
5313 free_cpumask_var(priv
->scratchpad
.cpumask
);
5316 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
5317 const struct mlx5e_profile
*profile
,
5321 struct net_device
*netdev
;
5324 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
5325 nch
* profile
->max_tc
,
5326 nch
* profile
->rq_groups
);
5328 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
5332 err
= profile
->init(mdev
, netdev
, profile
, ppriv
);
5334 mlx5_core_err(mdev
, "failed to init mlx5e profile %d\n", err
);
5335 goto err_free_netdev
;
5341 free_netdev(netdev
);
5346 int mlx5e_attach_netdev(struct mlx5e_priv
*priv
)
5348 const bool take_rtnl
= priv
->netdev
->reg_state
== NETREG_REGISTERED
;
5349 const struct mlx5e_profile
*profile
;
5353 profile
= priv
->profile
;
5354 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5356 /* max number of channels may have changed */
5357 max_nch
= mlx5e_get_max_num_channels(priv
->mdev
);
5358 if (priv
->channels
.params
.num_channels
> max_nch
) {
5359 mlx5_core_warn(priv
->mdev
, "MLX5E: Reducing number of channels to %d\n", max_nch
);
5360 /* Reducing the number of channels - RXFH has to be reset, and
5361 * mlx5e_num_channels_changed below will build the RQT.
5363 priv
->netdev
->priv_flags
&= ~IFF_RXFH_CONFIGURED
;
5364 priv
->channels
.params
.num_channels
= max_nch
;
5366 /* 1. Set the real number of queues in the kernel the first time.
5367 * 2. Set our default XPS cpumask.
5370 * rtnl_lock is required by netif_set_real_num_*_queues in case the
5371 * netdev has been registered by this point (if this function was called
5372 * in the reload or resume flow).
5376 mlx5e_num_channels_changed(priv
);
5380 err
= profile
->init_tx(priv
);
5384 err
= profile
->init_rx(priv
);
5386 goto err_cleanup_tx
;
5388 if (profile
->enable
)
5389 profile
->enable(priv
);
5394 profile
->cleanup_tx(priv
);
5400 void mlx5e_detach_netdev(struct mlx5e_priv
*priv
)
5402 const struct mlx5e_profile
*profile
= priv
->profile
;
5404 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5406 if (profile
->disable
)
5407 profile
->disable(priv
);
5408 flush_workqueue(priv
->wq
);
5410 profile
->cleanup_rx(priv
);
5411 profile
->cleanup_tx(priv
);
5412 cancel_work_sync(&priv
->update_stats_work
);
5415 void mlx5e_destroy_netdev(struct mlx5e_priv
*priv
)
5417 const struct mlx5e_profile
*profile
= priv
->profile
;
5418 struct net_device
*netdev
= priv
->netdev
;
5420 if (profile
->cleanup
)
5421 profile
->cleanup(priv
);
5422 free_netdev(netdev
);
5425 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
5426 * hardware contexts and to connect it to the current netdev.
5428 static int mlx5e_attach(struct mlx5_core_dev
*mdev
, void *vpriv
)
5430 struct mlx5e_priv
*priv
= vpriv
;
5431 struct net_device
*netdev
= priv
->netdev
;
5434 if (netif_device_present(netdev
))
5437 err
= mlx5e_create_mdev_resources(mdev
);
5441 err
= mlx5e_attach_netdev(priv
);
5443 mlx5e_destroy_mdev_resources(mdev
);
5450 static void mlx5e_detach(struct mlx5_core_dev
*mdev
, void *vpriv
)
5452 struct mlx5e_priv
*priv
= vpriv
;
5453 struct net_device
*netdev
= priv
->netdev
;
5455 #ifdef CONFIG_MLX5_ESWITCH
5456 if (MLX5_ESWITCH_MANAGER(mdev
) && vpriv
== mdev
)
5460 if (!netif_device_present(netdev
))
5463 mlx5e_detach_netdev(priv
);
5464 mlx5e_destroy_mdev_resources(mdev
);
5467 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
5469 struct net_device
*netdev
;
5474 err
= mlx5e_check_required_hca_cap(mdev
);
5478 #ifdef CONFIG_MLX5_ESWITCH
5479 if (MLX5_ESWITCH_MANAGER(mdev
) &&
5480 mlx5_eswitch_mode(mdev
->priv
.eswitch
) == MLX5_ESWITCH_OFFLOADS
) {
5481 mlx5e_rep_register_vport_reps(mdev
);
5486 nch
= mlx5e_get_max_num_channels(mdev
);
5487 netdev
= mlx5e_create_netdev(mdev
, &mlx5e_nic_profile
, nch
, NULL
);
5489 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
5493 dev_net_set(netdev
, mlx5_core_net(mdev
));
5494 priv
= netdev_priv(netdev
);
5496 err
= mlx5e_attach(mdev
, priv
);
5498 mlx5_core_err(mdev
, "mlx5e_attach failed, %d\n", err
);
5499 goto err_destroy_netdev
;
5502 err
= mlx5e_devlink_port_register(priv
);
5504 mlx5_core_err(mdev
, "mlx5e_devlink_port_register failed, %d\n", err
);
5508 err
= register_netdev(netdev
);
5510 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
5511 goto err_devlink_port_unregister
;
5514 mlx5e_devlink_port_type_eth_set(priv
);
5516 #ifdef CONFIG_MLX5_CORE_EN_DCB
5517 mlx5e_dcbnl_init_app(priv
);
5521 err_devlink_port_unregister
:
5522 mlx5e_devlink_port_unregister(priv
);
5524 mlx5e_detach(mdev
, priv
);
5526 mlx5e_destroy_netdev(priv
);
5530 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
5532 struct mlx5e_priv
*priv
;
5534 #ifdef CONFIG_MLX5_ESWITCH
5535 if (MLX5_ESWITCH_MANAGER(mdev
) && vpriv
== mdev
) {
5536 mlx5e_rep_unregister_vport_reps(mdev
);
5541 #ifdef CONFIG_MLX5_CORE_EN_DCB
5542 mlx5e_dcbnl_delete_app(priv
);
5544 unregister_netdev(priv
->netdev
);
5545 mlx5e_devlink_port_unregister(priv
);
5546 mlx5e_detach(mdev
, vpriv
);
5547 mlx5e_destroy_netdev(priv
);
5550 static struct mlx5_interface mlx5e_interface
= {
5552 .remove
= mlx5e_remove
,
5553 .attach
= mlx5e_attach
,
5554 .detach
= mlx5e_detach
,
5555 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
5558 void mlx5e_init(void)
5560 mlx5e_ipsec_build_inverse_table();
5561 mlx5e_build_ptys2ethtool_map();
5562 mlx5_register_interface(&mlx5e_interface
);
5565 void mlx5e_cleanup(void)
5567 mlx5_unregister_interface(&mlx5e_interface
);