2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
38 #include <linux/if_bridge.h>
39 #include <net/page_pool.h>
44 #include "en_accel/ipsec.h"
45 #include "en_accel/ipsec_rxtx.h"
46 #include "en_accel/tls.h"
47 #include "accel/ipsec.h"
48 #include "accel/tls.h"
49 #include "lib/vxlan.h"
50 #include "lib/clock.h"
54 #include "en/monitor_stats.h"
55 #include "en/reporter.h"
57 struct mlx5e_rq_param
{
58 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
59 struct mlx5_wq_param wq
;
60 struct mlx5e_rq_frags_info frags_info
;
63 struct mlx5e_sq_param
{
64 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
65 struct mlx5_wq_param wq
;
69 struct mlx5e_cq_param
{
70 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
71 struct mlx5_wq_param wq
;
76 struct mlx5e_channel_param
{
77 struct mlx5e_rq_param rq
;
78 struct mlx5e_sq_param sq
;
79 struct mlx5e_sq_param xdp_sq
;
80 struct mlx5e_sq_param icosq
;
81 struct mlx5e_cq_param rx_cq
;
82 struct mlx5e_cq_param tx_cq
;
83 struct mlx5e_cq_param icosq_cq
;
86 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
88 bool striding_rq_umr
= MLX5_CAP_GEN(mdev
, striding_rq
) &&
89 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
90 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
91 u16 max_wqe_sz_cap
= MLX5_CAP_GEN(mdev
, max_wqe_sz_sq
);
92 bool inline_umr
= MLX5E_UMR_WQE_INLINE_SZ
<= max_wqe_sz_cap
;
97 mlx5_core_warn(mdev
, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
98 (int)MLX5E_UMR_WQE_INLINE_SZ
, max_wqe_sz_cap
);
104 static u32
mlx5e_rx_get_linear_frag_sz(struct mlx5e_params
*params
)
106 u16 hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
107 u16 linear_rq_headroom
= params
->xdp_prog
?
108 XDP_PACKET_HEADROOM
: MLX5_RX_HEADROOM
;
111 linear_rq_headroom
+= NET_IP_ALIGN
;
113 frag_sz
= MLX5_SKB_FRAG_SZ(linear_rq_headroom
+ hw_mtu
);
115 if (params
->xdp_prog
&& frag_sz
< PAGE_SIZE
)
121 static u8
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params
*params
)
123 u32 linear_frag_sz
= mlx5e_rx_get_linear_frag_sz(params
);
125 return MLX5_MPWRQ_LOG_WQE_SZ
- order_base_2(linear_frag_sz
);
128 static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev
*mdev
,
129 struct mlx5e_params
*params
)
131 u32 frag_sz
= mlx5e_rx_get_linear_frag_sz(params
);
133 return !params
->lro_en
&& frag_sz
<= PAGE_SIZE
;
136 #define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
137 MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
138 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev
*mdev
,
139 struct mlx5e_params
*params
)
141 u32 frag_sz
= mlx5e_rx_get_linear_frag_sz(params
);
142 s8 signed_log_num_strides_param
;
145 if (!mlx5e_rx_is_linear_skb(mdev
, params
))
148 if (order_base_2(frag_sz
) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ
)
151 if (MLX5_CAP_GEN(mdev
, ext_stride_num_range
))
154 log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
- order_base_2(frag_sz
);
155 signed_log_num_strides_param
=
156 (s8
)log_num_strides
- MLX5_MPWQE_LOG_NUM_STRIDES_BASE
;
158 return signed_log_num_strides_param
>= 0;
161 static u8
mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params
*params
)
163 if (params
->log_rq_mtu_frames
<
164 mlx5e_mpwqe_log_pkts_per_wqe(params
) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
)
165 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
;
167 return params
->log_rq_mtu_frames
- mlx5e_mpwqe_log_pkts_per_wqe(params
);
170 static u8
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev
*mdev
,
171 struct mlx5e_params
*params
)
173 if (mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
))
174 return order_base_2(mlx5e_rx_get_linear_frag_sz(params
));
176 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev
);
179 static u8
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev
*mdev
,
180 struct mlx5e_params
*params
)
182 return MLX5_MPWRQ_LOG_WQE_SZ
-
183 mlx5e_mpwqe_get_log_stride_size(mdev
, params
);
186 static u16
mlx5e_get_rq_headroom(struct mlx5_core_dev
*mdev
,
187 struct mlx5e_params
*params
)
189 u16 linear_rq_headroom
= params
->xdp_prog
?
190 XDP_PACKET_HEADROOM
: MLX5_RX_HEADROOM
;
193 linear_rq_headroom
+= NET_IP_ALIGN
;
195 is_linear_skb
= (params
->rq_wq_type
== MLX5_WQ_TYPE_CYCLIC
) ?
196 mlx5e_rx_is_linear_skb(mdev
, params
) :
197 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
);
199 return is_linear_skb
? linear_rq_headroom
: 0;
202 void mlx5e_init_rq_type_params(struct mlx5_core_dev
*mdev
,
203 struct mlx5e_params
*params
)
205 params
->lro_wqe_sz
= MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
206 params
->log_rq_mtu_frames
= is_kdump_kernel() ?
207 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
:
208 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
210 mlx5_core_info(mdev
, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
211 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
212 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
?
213 BIT(mlx5e_mpwqe_get_log_rq_size(params
)) :
214 BIT(params
->log_rq_mtu_frames
),
215 BIT(mlx5e_mpwqe_get_log_stride_size(mdev
, params
)),
216 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
219 bool mlx5e_striding_rq_possible(struct mlx5_core_dev
*mdev
,
220 struct mlx5e_params
*params
)
222 return mlx5e_check_fragmented_striding_rq_cap(mdev
) &&
223 !MLX5_IPSEC_DEV(mdev
) &&
224 !(params
->xdp_prog
&& !mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
));
227 void mlx5e_set_rq_type(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
229 params
->rq_wq_type
= mlx5e_striding_rq_possible(mdev
, params
) &&
230 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
) ?
231 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
235 void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
237 struct mlx5_core_dev
*mdev
= priv
->mdev
;
240 port_state
= mlx5_query_vport_state(mdev
,
241 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT
,
244 if (port_state
== VPORT_STATE_UP
) {
245 netdev_info(priv
->netdev
, "Link up\n");
246 netif_carrier_on(priv
->netdev
);
248 netdev_info(priv
->netdev
, "Link down\n");
249 netif_carrier_off(priv
->netdev
);
253 static void mlx5e_update_carrier_work(struct work_struct
*work
)
255 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
256 update_carrier_work
);
258 mutex_lock(&priv
->state_lock
);
259 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
260 if (priv
->profile
->update_carrier
)
261 priv
->profile
->update_carrier(priv
);
262 mutex_unlock(&priv
->state_lock
);
265 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
269 for (i
= mlx5e_num_stats_grps
- 1; i
>= 0; i
--)
270 if (mlx5e_stats_grps
[i
].update_stats
)
271 mlx5e_stats_grps
[i
].update_stats(priv
);
274 void mlx5e_update_ndo_stats(struct mlx5e_priv
*priv
)
278 for (i
= mlx5e_num_stats_grps
- 1; i
>= 0; i
--)
279 if (mlx5e_stats_grps
[i
].update_stats_mask
&
280 MLX5E_NDO_UPDATE_STATS
)
281 mlx5e_stats_grps
[i
].update_stats(priv
);
284 static void mlx5e_update_stats_work(struct work_struct
*work
)
286 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
289 mutex_lock(&priv
->state_lock
);
290 priv
->profile
->update_stats(priv
);
291 mutex_unlock(&priv
->state_lock
);
294 void mlx5e_queue_update_stats(struct mlx5e_priv
*priv
)
296 if (!priv
->profile
->update_stats
)
299 if (unlikely(test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
)))
302 queue_work(priv
->wq
, &priv
->update_stats_work
);
305 static int async_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
307 struct mlx5e_priv
*priv
= container_of(nb
, struct mlx5e_priv
, events_nb
);
308 struct mlx5_eqe
*eqe
= data
;
310 if (event
!= MLX5_EVENT_TYPE_PORT_CHANGE
)
313 switch (eqe
->sub_type
) {
314 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
315 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
316 queue_work(priv
->wq
, &priv
->update_carrier_work
);
325 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
327 priv
->events_nb
.notifier_call
= async_event
;
328 mlx5_notifier_register(priv
->mdev
, &priv
->events_nb
);
331 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
333 mlx5_notifier_unregister(priv
->mdev
, &priv
->events_nb
);
336 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq
*rq
,
337 struct mlx5e_icosq
*sq
,
338 struct mlx5e_umr_wqe
*wqe
)
340 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
341 struct mlx5_wqe_umr_ctrl_seg
*ucseg
= &wqe
->uctrl
;
342 u8 ds_cnt
= DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ
, MLX5_SEND_WQE_DS
);
344 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
) |
346 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
347 cseg
->imm
= rq
->mkey_be
;
349 ucseg
->flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
| MLX5_UMR_INLINE
;
350 ucseg
->xlt_octowords
=
351 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE
));
352 ucseg
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
355 static u32
mlx5e_rqwq_get_size(struct mlx5e_rq
*rq
)
357 switch (rq
->wq_type
) {
358 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
359 return mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
361 return mlx5_wq_cyc_get_size(&rq
->wqe
.wq
);
365 static u32
mlx5e_rqwq_get_cur_sz(struct mlx5e_rq
*rq
)
367 switch (rq
->wq_type
) {
368 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
369 return rq
->mpwqe
.wq
.cur_sz
;
371 return rq
->wqe
.wq
.cur_sz
;
375 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq
*rq
,
376 struct mlx5e_channel
*c
)
378 int wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
380 rq
->mpwqe
.info
= kvzalloc_node(array_size(wq_sz
,
381 sizeof(*rq
->mpwqe
.info
)),
382 GFP_KERNEL
, cpu_to_node(c
->cpu
));
386 mlx5e_build_umr_wqe(rq
, &c
->icosq
, &rq
->mpwqe
.umr_wqe
);
391 static int mlx5e_create_umr_mkey(struct mlx5_core_dev
*mdev
,
392 u64 npages
, u8 page_shift
,
393 struct mlx5_core_mkey
*umr_mkey
)
395 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
400 in
= kvzalloc(inlen
, GFP_KERNEL
);
404 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
406 MLX5_SET(mkc
, mkc
, free
, 1);
407 MLX5_SET(mkc
, mkc
, umr_en
, 1);
408 MLX5_SET(mkc
, mkc
, lw
, 1);
409 MLX5_SET(mkc
, mkc
, lr
, 1);
410 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_MTT
);
412 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
413 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.pdn
);
414 MLX5_SET64(mkc
, mkc
, len
, npages
<< page_shift
);
415 MLX5_SET(mkc
, mkc
, translations_octword_size
,
416 MLX5_MTT_OCTW(npages
));
417 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
419 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
425 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev
*mdev
, struct mlx5e_rq
*rq
)
427 u64 num_mtts
= MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
));
429 return mlx5e_create_umr_mkey(mdev
, num_mtts
, PAGE_SHIFT
, &rq
->umr_mkey
);
432 static inline u64
mlx5e_get_mpwqe_offset(struct mlx5e_rq
*rq
, u16 wqe_ix
)
434 return (wqe_ix
<< MLX5E_LOG_ALIGNED_MPWQE_PPW
) << PAGE_SHIFT
;
437 static void mlx5e_init_frags_partition(struct mlx5e_rq
*rq
)
439 struct mlx5e_wqe_frag_info next_frag
, *prev
;
442 next_frag
.di
= &rq
->wqe
.di
[0];
443 next_frag
.offset
= 0;
446 for (i
= 0; i
< mlx5_wq_cyc_get_size(&rq
->wqe
.wq
); i
++) {
447 struct mlx5e_rq_frag_info
*frag_info
= &rq
->wqe
.info
.arr
[0];
448 struct mlx5e_wqe_frag_info
*frag
=
449 &rq
->wqe
.frags
[i
<< rq
->wqe
.info
.log_num_frags
];
452 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++, frag
++) {
453 if (next_frag
.offset
+ frag_info
[f
].frag_stride
> PAGE_SIZE
) {
455 next_frag
.offset
= 0;
457 prev
->last_in_page
= true;
462 next_frag
.offset
+= frag_info
[f
].frag_stride
;
468 prev
->last_in_page
= true;
471 static int mlx5e_init_di_list(struct mlx5e_rq
*rq
,
472 struct mlx5e_params
*params
,
475 int len
= wq_sz
<< rq
->wqe
.info
.log_num_frags
;
477 rq
->wqe
.di
= kvzalloc_node(array_size(len
, sizeof(*rq
->wqe
.di
)),
478 GFP_KERNEL
, cpu_to_node(cpu
));
482 mlx5e_init_frags_partition(rq
);
487 static void mlx5e_free_di_list(struct mlx5e_rq
*rq
)
492 static int mlx5e_alloc_rq(struct mlx5e_channel
*c
,
493 struct mlx5e_params
*params
,
494 struct mlx5e_rq_param
*rqp
,
497 struct page_pool_params pp_params
= { 0 };
498 struct mlx5_core_dev
*mdev
= c
->mdev
;
499 void *rqc
= rqp
->rqc
;
500 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
506 rqp
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
508 rq
->wq_type
= params
->rq_wq_type
;
510 rq
->netdev
= c
->netdev
;
511 rq
->tstamp
= c
->tstamp
;
512 rq
->clock
= &mdev
->clock
;
516 rq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
517 rq
->stats
= &c
->priv
->channel_stats
[c
->ix
].rq
;
519 rq
->xdp_prog
= params
->xdp_prog
? bpf_prog_inc(params
->xdp_prog
) : NULL
;
520 if (IS_ERR(rq
->xdp_prog
)) {
521 err
= PTR_ERR(rq
->xdp_prog
);
523 goto err_rq_wq_destroy
;
526 err
= xdp_rxq_info_reg(&rq
->xdp_rxq
, rq
->netdev
, rq
->ix
);
528 goto err_rq_wq_destroy
;
530 rq
->buff
.map_dir
= rq
->xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
531 rq
->buff
.headroom
= mlx5e_get_rq_headroom(mdev
, params
);
532 pool_size
= 1 << params
->log_rq_mtu_frames
;
534 switch (rq
->wq_type
) {
535 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
536 err
= mlx5_wq_ll_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->mpwqe
.wq
,
541 rq
->mpwqe
.wq
.db
= &rq
->mpwqe
.wq
.db
[MLX5_RCV_DBR
];
543 wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
545 pool_size
= MLX5_MPWRQ_PAGES_PER_WQE
<< mlx5e_mpwqe_get_log_rq_size(params
);
547 rq
->post_wqes
= mlx5e_post_rx_mpwqes
;
548 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
550 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe_mpwqe
;
551 #ifdef CONFIG_MLX5_EN_IPSEC
552 if (MLX5_IPSEC_DEV(mdev
)) {
554 netdev_err(c
->netdev
, "MPWQE RQ with IPSec offload not supported\n");
555 goto err_rq_wq_destroy
;
558 if (!rq
->handle_rx_cqe
) {
560 netdev_err(c
->netdev
, "RX handler of MPWQE RQ is not set, err %d\n", err
);
561 goto err_rq_wq_destroy
;
564 rq
->mpwqe
.skb_from_cqe_mpwrq
=
565 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
) ?
566 mlx5e_skb_from_cqe_mpwrq_linear
:
567 mlx5e_skb_from_cqe_mpwrq_nonlinear
;
568 rq
->mpwqe
.log_stride_sz
= mlx5e_mpwqe_get_log_stride_size(mdev
, params
);
569 rq
->mpwqe
.num_strides
= BIT(mlx5e_mpwqe_get_log_num_strides(mdev
, params
));
571 err
= mlx5e_create_rq_umr_mkey(mdev
, rq
);
573 goto err_rq_wq_destroy
;
574 rq
->mkey_be
= cpu_to_be32(rq
->umr_mkey
.key
);
576 err
= mlx5e_rq_alloc_mpwqe_info(rq
, c
);
580 default: /* MLX5_WQ_TYPE_CYCLIC */
581 err
= mlx5_wq_cyc_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->wqe
.wq
,
586 rq
->wqe
.wq
.db
= &rq
->wqe
.wq
.db
[MLX5_RCV_DBR
];
588 wq_sz
= mlx5_wq_cyc_get_size(&rq
->wqe
.wq
);
590 rq
->wqe
.info
= rqp
->frags_info
;
592 kvzalloc_node(array_size(sizeof(*rq
->wqe
.frags
),
593 (wq_sz
<< rq
->wqe
.info
.log_num_frags
)),
594 GFP_KERNEL
, cpu_to_node(c
->cpu
));
595 if (!rq
->wqe
.frags
) {
600 err
= mlx5e_init_di_list(rq
, params
, wq_sz
, c
->cpu
);
603 rq
->post_wqes
= mlx5e_post_rx_wqes
;
604 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
606 #ifdef CONFIG_MLX5_EN_IPSEC
608 rq
->handle_rx_cqe
= mlx5e_ipsec_handle_rx_cqe
;
611 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe
;
612 if (!rq
->handle_rx_cqe
) {
614 netdev_err(c
->netdev
, "RX handler of RQ is not set, err %d\n", err
);
618 rq
->wqe
.skb_from_cqe
= mlx5e_rx_is_linear_skb(mdev
, params
) ?
619 mlx5e_skb_from_cqe_linear
:
620 mlx5e_skb_from_cqe_nonlinear
;
621 rq
->mkey_be
= c
->mkey_be
;
624 /* Create a page_pool and register it with rxq */
626 pp_params
.flags
= 0; /* No-internal DMA mapping in page_pool */
627 pp_params
.pool_size
= pool_size
;
628 pp_params
.nid
= cpu_to_node(c
->cpu
);
629 pp_params
.dev
= c
->pdev
;
630 pp_params
.dma_dir
= rq
->buff
.map_dir
;
632 /* page_pool can be used even when there is no rq->xdp_prog,
633 * given page_pool does not handle DMA mapping there is no
634 * required state to clear. And page_pool gracefully handle
637 rq
->page_pool
= page_pool_create(&pp_params
);
638 if (IS_ERR(rq
->page_pool
)) {
639 err
= PTR_ERR(rq
->page_pool
);
640 rq
->page_pool
= NULL
;
643 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
644 MEM_TYPE_PAGE_POOL
, rq
->page_pool
);
648 for (i
= 0; i
< wq_sz
; i
++) {
649 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
650 struct mlx5e_rx_wqe_ll
*wqe
=
651 mlx5_wq_ll_get_wqe(&rq
->mpwqe
.wq
, i
);
653 rq
->mpwqe
.num_strides
<< rq
->mpwqe
.log_stride_sz
;
654 u64 dma_offset
= mlx5e_get_mpwqe_offset(rq
, i
);
656 wqe
->data
[0].addr
= cpu_to_be64(dma_offset
+ rq
->buff
.headroom
);
657 wqe
->data
[0].byte_count
= cpu_to_be32(byte_count
);
658 wqe
->data
[0].lkey
= rq
->mkey_be
;
660 struct mlx5e_rx_wqe_cyc
*wqe
=
661 mlx5_wq_cyc_get_wqe(&rq
->wqe
.wq
, i
);
664 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++) {
665 u32 frag_size
= rq
->wqe
.info
.arr
[f
].frag_size
|
666 MLX5_HW_START_PADDING
;
668 wqe
->data
[f
].byte_count
= cpu_to_be32(frag_size
);
669 wqe
->data
[f
].lkey
= rq
->mkey_be
;
671 /* check if num_frags is not a pow of two */
672 if (rq
->wqe
.info
.num_frags
< (1 << rq
->wqe
.info
.log_num_frags
)) {
673 wqe
->data
[f
].byte_count
= 0;
674 wqe
->data
[f
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
675 wqe
->data
[f
].addr
= 0;
680 INIT_WORK(&rq
->dim
.work
, mlx5e_rx_dim_work
);
682 switch (params
->rx_cq_moderation
.cq_period_mode
) {
683 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
684 rq
->dim
.mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE
;
686 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE
:
688 rq
->dim
.mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
691 rq
->page_cache
.head
= 0;
692 rq
->page_cache
.tail
= 0;
697 switch (rq
->wq_type
) {
698 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
699 kvfree(rq
->mpwqe
.info
);
700 mlx5_core_destroy_mkey(mdev
, &rq
->umr_mkey
);
702 default: /* MLX5_WQ_TYPE_CYCLIC */
703 kvfree(rq
->wqe
.frags
);
704 mlx5e_free_di_list(rq
);
709 bpf_prog_put(rq
->xdp_prog
);
710 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
712 page_pool_destroy(rq
->page_pool
);
713 mlx5_wq_destroy(&rq
->wq_ctrl
);
718 static void mlx5e_free_rq(struct mlx5e_rq
*rq
)
723 bpf_prog_put(rq
->xdp_prog
);
725 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
727 page_pool_destroy(rq
->page_pool
);
729 switch (rq
->wq_type
) {
730 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
731 kvfree(rq
->mpwqe
.info
);
732 mlx5_core_destroy_mkey(rq
->mdev
, &rq
->umr_mkey
);
734 default: /* MLX5_WQ_TYPE_CYCLIC */
735 kvfree(rq
->wqe
.frags
);
736 mlx5e_free_di_list(rq
);
739 for (i
= rq
->page_cache
.head
; i
!= rq
->page_cache
.tail
;
740 i
= (i
+ 1) & (MLX5E_CACHE_SIZE
- 1)) {
741 struct mlx5e_dma_info
*dma_info
= &rq
->page_cache
.page_cache
[i
];
743 mlx5e_page_release(rq
, dma_info
, false);
745 mlx5_wq_destroy(&rq
->wq_ctrl
);
748 static int mlx5e_create_rq(struct mlx5e_rq
*rq
,
749 struct mlx5e_rq_param
*param
)
751 struct mlx5_core_dev
*mdev
= rq
->mdev
;
759 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
760 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
761 in
= kvzalloc(inlen
, GFP_KERNEL
);
765 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
766 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
768 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
770 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
771 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
772 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
773 MLX5_ADAPTER_PAGE_SHIFT
);
774 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
776 mlx5_fill_page_frag_array(&rq
->wq_ctrl
.buf
,
777 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
779 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
786 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
,
789 struct mlx5_core_dev
*mdev
= rq
->mdev
;
796 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
797 in
= kvzalloc(inlen
, GFP_KERNEL
);
801 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
803 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
804 MLX5_SET(rqc
, rqc
, state
, next_state
);
806 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
813 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq
*rq
, bool enable
)
815 struct mlx5e_channel
*c
= rq
->channel
;
816 struct mlx5e_priv
*priv
= c
->priv
;
817 struct mlx5_core_dev
*mdev
= priv
->mdev
;
824 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
825 in
= kvzalloc(inlen
, GFP_KERNEL
);
829 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
831 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
832 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
833 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS
);
834 MLX5_SET(rqc
, rqc
, scatter_fcs
, enable
);
835 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
837 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
844 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
846 struct mlx5e_channel
*c
= rq
->channel
;
847 struct mlx5_core_dev
*mdev
= c
->mdev
;
853 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
854 in
= kvzalloc(inlen
, GFP_KERNEL
);
858 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
860 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
861 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
862 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
863 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
864 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
866 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
873 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
875 mlx5_core_destroy_rq(rq
->mdev
, rq
->rqn
);
878 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
, int wait_time
)
880 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(wait_time
);
881 struct mlx5e_channel
*c
= rq
->channel
;
883 u16 min_wqes
= mlx5_min_rx_wqes(rq
->wq_type
, mlx5e_rqwq_get_size(rq
));
886 if (mlx5e_rqwq_get_cur_sz(rq
) >= min_wqes
)
890 } while (time_before(jiffies
, exp_time
));
892 netdev_warn(c
->netdev
, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
893 c
->ix
, rq
->rqn
, mlx5e_rqwq_get_cur_sz(rq
), min_wqes
);
898 static void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
903 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
904 struct mlx5_wq_ll
*wq
= &rq
->mpwqe
.wq
;
906 /* UMR WQE (if in progress) is always at wq->head */
907 if (rq
->mpwqe
.umr_in_progress
)
908 rq
->dealloc_wqe(rq
, wq
->head
);
910 while (!mlx5_wq_ll_is_empty(wq
)) {
911 struct mlx5e_rx_wqe_ll
*wqe
;
913 wqe_ix_be
= *wq
->tail_next
;
914 wqe_ix
= be16_to_cpu(wqe_ix_be
);
915 wqe
= mlx5_wq_ll_get_wqe(wq
, wqe_ix
);
916 rq
->dealloc_wqe(rq
, wqe_ix
);
917 mlx5_wq_ll_pop(wq
, wqe_ix_be
,
918 &wqe
->next
.next_wqe_index
);
921 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
923 while (!mlx5_wq_cyc_is_empty(wq
)) {
924 wqe_ix
= mlx5_wq_cyc_get_tail(wq
);
925 rq
->dealloc_wqe(rq
, wqe_ix
);
932 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
933 struct mlx5e_params
*params
,
934 struct mlx5e_rq_param
*param
,
939 err
= mlx5e_alloc_rq(c
, params
, param
, rq
);
943 err
= mlx5e_create_rq(rq
, param
);
947 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
951 if (params
->rx_dim_enabled
)
952 __set_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
);
954 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
))
955 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE
, &c
->rq
.state
);
960 mlx5e_destroy_rq(rq
);
967 static void mlx5e_activate_rq(struct mlx5e_rq
*rq
)
969 struct mlx5e_icosq
*sq
= &rq
->channel
->icosq
;
970 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
971 struct mlx5e_tx_wqe
*nopwqe
;
973 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
975 set_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
976 sq
->db
.ico_wqe
[pi
].opcode
= MLX5_OPCODE_NOP
;
977 nopwqe
= mlx5e_post_nop(wq
, sq
->sqn
, &sq
->pc
);
978 mlx5e_notify_hw(wq
, sq
->pc
, sq
->uar_map
, &nopwqe
->ctrl
);
981 static void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
)
983 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
984 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
987 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
989 cancel_work_sync(&rq
->dim
.work
);
990 mlx5e_destroy_rq(rq
);
991 mlx5e_free_rx_descs(rq
);
995 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq
*sq
)
997 kvfree(sq
->db
.xdpi_fifo
.xi
);
998 kvfree(sq
->db
.wqe_info
);
1001 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq
*sq
, int numa
)
1003 struct mlx5e_xdp_info_fifo
*xdpi_fifo
= &sq
->db
.xdpi_fifo
;
1004 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1005 int dsegs_per_wq
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1007 xdpi_fifo
->xi
= kvzalloc_node(sizeof(*xdpi_fifo
->xi
) * dsegs_per_wq
,
1012 xdpi_fifo
->pc
= &sq
->xdpi_fifo_pc
;
1013 xdpi_fifo
->cc
= &sq
->xdpi_fifo_cc
;
1014 xdpi_fifo
->mask
= dsegs_per_wq
- 1;
1019 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq
*sq
, int numa
)
1021 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1024 sq
->db
.wqe_info
= kvzalloc_node(sizeof(*sq
->db
.wqe_info
) * wq_sz
,
1026 if (!sq
->db
.wqe_info
)
1029 err
= mlx5e_alloc_xdpsq_fifo(sq
, numa
);
1031 mlx5e_free_xdpsq_db(sq
);
1038 static int mlx5e_alloc_xdpsq(struct mlx5e_channel
*c
,
1039 struct mlx5e_params
*params
,
1040 struct mlx5e_sq_param
*param
,
1041 struct mlx5e_xdpsq
*sq
,
1044 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1045 struct mlx5_core_dev
*mdev
= c
->mdev
;
1046 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1050 sq
->mkey_be
= c
->mkey_be
;
1052 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1053 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1054 sq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
1055 sq
->stats
= is_redirect
?
1056 &c
->priv
->channel_stats
[c
->ix
].xdpsq
:
1057 &c
->priv
->channel_stats
[c
->ix
].rq_xdpsq
;
1059 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1060 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1063 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1065 err
= mlx5e_alloc_xdpsq_db(sq
, cpu_to_node(c
->cpu
));
1067 goto err_sq_wq_destroy
;
1072 mlx5_wq_destroy(&sq
->wq_ctrl
);
1077 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq
*sq
)
1079 mlx5e_free_xdpsq_db(sq
);
1080 mlx5_wq_destroy(&sq
->wq_ctrl
);
1083 static void mlx5e_free_icosq_db(struct mlx5e_icosq
*sq
)
1085 kvfree(sq
->db
.ico_wqe
);
1088 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq
*sq
, int numa
)
1090 u8 wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1092 sq
->db
.ico_wqe
= kvzalloc_node(array_size(wq_sz
,
1093 sizeof(*sq
->db
.ico_wqe
)),
1095 if (!sq
->db
.ico_wqe
)
1101 static int mlx5e_alloc_icosq(struct mlx5e_channel
*c
,
1102 struct mlx5e_sq_param
*param
,
1103 struct mlx5e_icosq
*sq
)
1105 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1106 struct mlx5_core_dev
*mdev
= c
->mdev
;
1107 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1111 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1113 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1114 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1117 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1119 err
= mlx5e_alloc_icosq_db(sq
, cpu_to_node(c
->cpu
));
1121 goto err_sq_wq_destroy
;
1126 mlx5_wq_destroy(&sq
->wq_ctrl
);
1131 static void mlx5e_free_icosq(struct mlx5e_icosq
*sq
)
1133 mlx5e_free_icosq_db(sq
);
1134 mlx5_wq_destroy(&sq
->wq_ctrl
);
1137 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
)
1139 kvfree(sq
->db
.wqe_info
);
1140 kvfree(sq
->db
.dma_fifo
);
1143 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
)
1145 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1146 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1148 sq
->db
.dma_fifo
= kvzalloc_node(array_size(df_sz
,
1149 sizeof(*sq
->db
.dma_fifo
)),
1151 sq
->db
.wqe_info
= kvzalloc_node(array_size(wq_sz
,
1152 sizeof(*sq
->db
.wqe_info
)),
1154 if (!sq
->db
.dma_fifo
|| !sq
->db
.wqe_info
) {
1155 mlx5e_free_txqsq_db(sq
);
1159 sq
->dma_fifo_mask
= df_sz
- 1;
1164 static void mlx5e_tx_err_cqe_work(struct work_struct
*recover_work
);
1165 static int mlx5e_alloc_txqsq(struct mlx5e_channel
*c
,
1167 struct mlx5e_params
*params
,
1168 struct mlx5e_sq_param
*param
,
1169 struct mlx5e_txqsq
*sq
,
1172 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1173 struct mlx5_core_dev
*mdev
= c
->mdev
;
1174 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1178 sq
->tstamp
= c
->tstamp
;
1179 sq
->clock
= &mdev
->clock
;
1180 sq
->mkey_be
= c
->mkey_be
;
1182 sq
->txq_ix
= txq_ix
;
1183 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1184 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1185 sq
->stats
= &c
->priv
->channel_stats
[c
->ix
].sq
[tc
];
1186 INIT_WORK(&sq
->recover_work
, mlx5e_tx_err_cqe_work
);
1187 if (MLX5_IPSEC_DEV(c
->priv
->mdev
))
1188 set_bit(MLX5E_SQ_STATE_IPSEC
, &sq
->state
);
1189 if (mlx5_accel_is_tls_device(c
->priv
->mdev
))
1190 set_bit(MLX5E_SQ_STATE_TLS
, &sq
->state
);
1192 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1193 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1196 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1198 err
= mlx5e_alloc_txqsq_db(sq
, cpu_to_node(c
->cpu
));
1200 goto err_sq_wq_destroy
;
1202 INIT_WORK(&sq
->dim
.work
, mlx5e_tx_dim_work
);
1203 sq
->dim
.mode
= params
->tx_cq_moderation
.cq_period_mode
;
1208 mlx5_wq_destroy(&sq
->wq_ctrl
);
1213 static void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
)
1215 mlx5e_free_txqsq_db(sq
);
1216 mlx5_wq_destroy(&sq
->wq_ctrl
);
1219 struct mlx5e_create_sq_param
{
1220 struct mlx5_wq_ctrl
*wq_ctrl
;
1227 static int mlx5e_create_sq(struct mlx5_core_dev
*mdev
,
1228 struct mlx5e_sq_param
*param
,
1229 struct mlx5e_create_sq_param
*csp
,
1238 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
1239 sizeof(u64
) * csp
->wq_ctrl
->buf
.npages
;
1240 in
= kvzalloc(inlen
, GFP_KERNEL
);
1244 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1245 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1247 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
1248 MLX5_SET(sqc
, sqc
, tis_lst_sz
, csp
->tis_lst_sz
);
1249 MLX5_SET(sqc
, sqc
, tis_num_0
, csp
->tisn
);
1250 MLX5_SET(sqc
, sqc
, cqn
, csp
->cqn
);
1252 if (MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
1253 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, csp
->min_inline_mode
);
1255 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1256 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1258 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1259 MLX5_SET(wq
, wq
, uar_page
, mdev
->mlx5e_res
.bfreg
.index
);
1260 MLX5_SET(wq
, wq
, log_wq_pg_sz
, csp
->wq_ctrl
->buf
.page_shift
-
1261 MLX5_ADAPTER_PAGE_SHIFT
);
1262 MLX5_SET64(wq
, wq
, dbr_addr
, csp
->wq_ctrl
->db
.dma
);
1264 mlx5_fill_page_frag_array(&csp
->wq_ctrl
->buf
,
1265 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1267 err
= mlx5_core_create_sq(mdev
, in
, inlen
, sqn
);
1274 int mlx5e_modify_sq(struct mlx5_core_dev
*mdev
, u32 sqn
,
1275 struct mlx5e_modify_sq_param
*p
)
1282 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
1283 in
= kvzalloc(inlen
, GFP_KERNEL
);
1287 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
1289 MLX5_SET(modify_sq_in
, in
, sq_state
, p
->curr_state
);
1290 MLX5_SET(sqc
, sqc
, state
, p
->next_state
);
1291 if (p
->rl_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1292 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
1293 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, p
->rl_index
);
1296 err
= mlx5_core_modify_sq(mdev
, sqn
, in
, inlen
);
1303 static void mlx5e_destroy_sq(struct mlx5_core_dev
*mdev
, u32 sqn
)
1305 mlx5_core_destroy_sq(mdev
, sqn
);
1308 static int mlx5e_create_sq_rdy(struct mlx5_core_dev
*mdev
,
1309 struct mlx5e_sq_param
*param
,
1310 struct mlx5e_create_sq_param
*csp
,
1313 struct mlx5e_modify_sq_param msp
= {0};
1316 err
= mlx5e_create_sq(mdev
, param
, csp
, sqn
);
1320 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1321 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1322 err
= mlx5e_modify_sq(mdev
, *sqn
, &msp
);
1324 mlx5e_destroy_sq(mdev
, *sqn
);
1329 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1330 struct mlx5e_txqsq
*sq
, u32 rate
);
1332 static int mlx5e_open_txqsq(struct mlx5e_channel
*c
,
1335 struct mlx5e_params
*params
,
1336 struct mlx5e_sq_param
*param
,
1337 struct mlx5e_txqsq
*sq
,
1340 struct mlx5e_create_sq_param csp
= {};
1344 err
= mlx5e_alloc_txqsq(c
, txq_ix
, params
, param
, sq
, tc
);
1350 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1351 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1352 csp
.min_inline_mode
= sq
->min_inline_mode
;
1353 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1355 goto err_free_txqsq
;
1357 tx_rate
= c
->priv
->tx_rates
[sq
->txq_ix
];
1359 mlx5e_set_sq_maxrate(c
->netdev
, sq
, tx_rate
);
1361 if (params
->tx_dim_enabled
)
1362 sq
->state
|= BIT(MLX5E_SQ_STATE_AM
);
1367 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1368 mlx5e_free_txqsq(sq
);
1373 void mlx5e_activate_txqsq(struct mlx5e_txqsq
*sq
)
1375 sq
->txq
= netdev_get_tx_queue(sq
->channel
->netdev
, sq
->txq_ix
);
1376 clear_bit(MLX5E_SQ_STATE_RECOVERING
, &sq
->state
);
1377 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1378 netdev_tx_reset_queue(sq
->txq
);
1379 netif_tx_start_queue(sq
->txq
);
1382 void mlx5e_tx_disable_queue(struct netdev_queue
*txq
)
1384 __netif_tx_lock_bh(txq
);
1385 netif_tx_stop_queue(txq
);
1386 __netif_tx_unlock_bh(txq
);
1389 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq
*sq
)
1391 struct mlx5e_channel
*c
= sq
->channel
;
1392 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1394 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1395 /* prevent netif_tx_wake_queue */
1396 napi_synchronize(&c
->napi
);
1398 mlx5e_tx_disable_queue(sq
->txq
);
1400 /* last doorbell out, godspeed .. */
1401 if (mlx5e_wqc_has_room_for(wq
, sq
->cc
, sq
->pc
, 1)) {
1402 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
1403 struct mlx5e_tx_wqe
*nop
;
1405 sq
->db
.wqe_info
[pi
].skb
= NULL
;
1406 nop
= mlx5e_post_nop(wq
, sq
->sqn
, &sq
->pc
);
1407 mlx5e_notify_hw(wq
, sq
->pc
, sq
->uar_map
, &nop
->ctrl
);
1411 static void mlx5e_close_txqsq(struct mlx5e_txqsq
*sq
)
1413 struct mlx5e_channel
*c
= sq
->channel
;
1414 struct mlx5_core_dev
*mdev
= c
->mdev
;
1415 struct mlx5_rate_limit rl
= {0};
1417 cancel_work_sync(&sq
->dim
.work
);
1418 cancel_work_sync(&sq
->recover_work
);
1419 mlx5e_destroy_sq(mdev
, sq
->sqn
);
1420 if (sq
->rate_limit
) {
1421 rl
.rate
= sq
->rate_limit
;
1422 mlx5_rl_remove_rate(mdev
, &rl
);
1424 mlx5e_free_txqsq_descs(sq
);
1425 mlx5e_free_txqsq(sq
);
1428 static void mlx5e_tx_err_cqe_work(struct work_struct
*recover_work
)
1430 struct mlx5e_txqsq
*sq
= container_of(recover_work
, struct mlx5e_txqsq
,
1433 mlx5e_tx_reporter_err_cqe(sq
);
1436 static int mlx5e_open_icosq(struct mlx5e_channel
*c
,
1437 struct mlx5e_params
*params
,
1438 struct mlx5e_sq_param
*param
,
1439 struct mlx5e_icosq
*sq
)
1441 struct mlx5e_create_sq_param csp
= {};
1444 err
= mlx5e_alloc_icosq(c
, param
, sq
);
1448 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1449 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1450 csp
.min_inline_mode
= params
->tx_min_inline_mode
;
1451 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1452 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1454 goto err_free_icosq
;
1459 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1460 mlx5e_free_icosq(sq
);
1465 static void mlx5e_close_icosq(struct mlx5e_icosq
*sq
)
1467 struct mlx5e_channel
*c
= sq
->channel
;
1469 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1470 napi_synchronize(&c
->napi
);
1472 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1473 mlx5e_free_icosq(sq
);
1476 static int mlx5e_open_xdpsq(struct mlx5e_channel
*c
,
1477 struct mlx5e_params
*params
,
1478 struct mlx5e_sq_param
*param
,
1479 struct mlx5e_xdpsq
*sq
,
1482 struct mlx5e_create_sq_param csp
= {};
1485 err
= mlx5e_alloc_xdpsq(c
, params
, param
, sq
, is_redirect
);
1490 csp
.tisn
= c
->priv
->tisn
[0]; /* tc = 0 */
1491 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1492 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1493 csp
.min_inline_mode
= sq
->min_inline_mode
;
1494 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1495 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1497 goto err_free_xdpsq
;
1499 mlx5e_set_xmit_fp(sq
, param
->is_mpw
);
1501 if (!param
->is_mpw
) {
1502 unsigned int ds_cnt
= MLX5E_XDP_TX_DS_COUNT
;
1503 unsigned int inline_hdr_sz
= 0;
1506 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
1507 inline_hdr_sz
= MLX5E_XDP_MIN_INLINE
;
1511 /* Pre initialize fixed WQE fields */
1512 for (i
= 0; i
< mlx5_wq_cyc_get_size(&sq
->wq
); i
++) {
1513 struct mlx5e_xdp_wqe_info
*wi
= &sq
->db
.wqe_info
[i
];
1514 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, i
);
1515 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
1516 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
1517 struct mlx5_wqe_data_seg
*dseg
;
1519 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
1520 eseg
->inline_hdr
.sz
= cpu_to_be16(inline_hdr_sz
);
1522 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ (ds_cnt
- 1);
1523 dseg
->lkey
= sq
->mkey_be
;
1533 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1534 mlx5e_free_xdpsq(sq
);
1539 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
, struct mlx5e_rq
*rq
)
1541 struct mlx5e_channel
*c
= sq
->channel
;
1543 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1544 napi_synchronize(&c
->napi
);
1546 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1547 mlx5e_free_xdpsq_descs(sq
, rq
);
1548 mlx5e_free_xdpsq(sq
);
1551 static int mlx5e_alloc_cq_common(struct mlx5_core_dev
*mdev
,
1552 struct mlx5e_cq_param
*param
,
1553 struct mlx5e_cq
*cq
)
1555 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1561 err
= mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1565 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1571 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1572 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1573 *mcq
->set_ci_db
= 0;
1575 mcq
->vector
= param
->eq_ix
;
1576 mcq
->comp
= mlx5e_completion_event
;
1577 mcq
->event
= mlx5e_cq_error_event
;
1580 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
1581 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
1591 static int mlx5e_alloc_cq(struct mlx5e_channel
*c
,
1592 struct mlx5e_cq_param
*param
,
1593 struct mlx5e_cq
*cq
)
1595 struct mlx5_core_dev
*mdev
= c
->priv
->mdev
;
1598 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
1599 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1600 param
->eq_ix
= c
->ix
;
1602 err
= mlx5e_alloc_cq_common(mdev
, param
, cq
);
1604 cq
->napi
= &c
->napi
;
1610 static void mlx5e_free_cq(struct mlx5e_cq
*cq
)
1612 mlx5_wq_destroy(&cq
->wq_ctrl
);
1615 static int mlx5e_create_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
1617 struct mlx5_core_dev
*mdev
= cq
->mdev
;
1618 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1623 unsigned int irqn_not_used
;
1627 err
= mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
1631 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
1632 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
1633 in
= kvzalloc(inlen
, GFP_KERNEL
);
1637 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
1639 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
1641 mlx5_fill_page_frag_array(&cq
->wq_ctrl
.buf
,
1642 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
1644 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
1645 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
1646 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
1647 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
1648 MLX5_ADAPTER_PAGE_SHIFT
);
1649 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
1651 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
1663 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
1665 mlx5_core_destroy_cq(cq
->mdev
, &cq
->mcq
);
1668 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
1669 struct net_dim_cq_moder moder
,
1670 struct mlx5e_cq_param
*param
,
1671 struct mlx5e_cq
*cq
)
1673 struct mlx5_core_dev
*mdev
= c
->mdev
;
1676 err
= mlx5e_alloc_cq(c
, param
, cq
);
1680 err
= mlx5e_create_cq(cq
, param
);
1684 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
1685 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
, moder
.usec
, moder
.pkts
);
1694 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
1696 mlx5e_destroy_cq(cq
);
1700 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
1701 struct mlx5e_params
*params
,
1702 struct mlx5e_channel_param
*cparam
)
1707 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1708 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1709 &cparam
->tx_cq
, &c
->sq
[tc
].cq
);
1711 goto err_close_tx_cqs
;
1717 for (tc
--; tc
>= 0; tc
--)
1718 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1723 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1727 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1728 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1731 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1732 struct mlx5e_params
*params
,
1733 struct mlx5e_channel_param
*cparam
)
1735 struct mlx5e_priv
*priv
= c
->priv
;
1736 int err
, tc
, max_nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
1738 for (tc
= 0; tc
< params
->num_tc
; tc
++) {
1739 int txq_ix
= c
->ix
+ tc
* max_nch
;
1741 err
= mlx5e_open_txqsq(c
, c
->priv
->tisn
[tc
], txq_ix
,
1742 params
, &cparam
->sq
, &c
->sq
[tc
], tc
);
1750 for (tc
--; tc
>= 0; tc
--)
1751 mlx5e_close_txqsq(&c
->sq
[tc
]);
1756 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1760 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1761 mlx5e_close_txqsq(&c
->sq
[tc
]);
1764 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1765 struct mlx5e_txqsq
*sq
, u32 rate
)
1767 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1768 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1769 struct mlx5e_modify_sq_param msp
= {0};
1770 struct mlx5_rate_limit rl
= {0};
1774 if (rate
== sq
->rate_limit
)
1778 if (sq
->rate_limit
) {
1779 rl
.rate
= sq
->rate_limit
;
1780 /* remove current rl index to free space to next ones */
1781 mlx5_rl_remove_rate(mdev
, &rl
);
1788 err
= mlx5_rl_add_rate(mdev
, &rl_index
, &rl
);
1790 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1796 msp
.curr_state
= MLX5_SQC_STATE_RDY
;
1797 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1798 msp
.rl_index
= rl_index
;
1799 msp
.rl_update
= true;
1800 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
1802 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1804 /* remove the rate from the table */
1806 mlx5_rl_remove_rate(mdev
, &rl
);
1810 sq
->rate_limit
= rate
;
1814 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1816 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1817 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1818 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[index
];
1821 if (!mlx5_rl_is_supported(mdev
)) {
1822 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1826 /* rate is given in Mb/sec, HW config is in Kb/sec */
1829 /* Check whether rate in valid range, 0 is always valid */
1830 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1831 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1835 mutex_lock(&priv
->state_lock
);
1836 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1837 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1839 priv
->tx_rates
[index
] = rate
;
1840 mutex_unlock(&priv
->state_lock
);
1845 static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel
*c
,
1846 struct mlx5e_params
*params
)
1848 int num_comp_vectors
= mlx5_comp_vectors_count(c
->mdev
);
1851 if (!zalloc_cpumask_var(&c
->xps_cpumask
, GFP_KERNEL
))
1854 for (irq
= c
->ix
; irq
< num_comp_vectors
; irq
+= params
->num_channels
) {
1855 int cpu
= cpumask_first(mlx5_comp_irq_get_affinity_mask(c
->mdev
, irq
));
1857 cpumask_set_cpu(cpu
, c
->xps_cpumask
);
1863 static void mlx5e_free_xps_cpumask(struct mlx5e_channel
*c
)
1865 free_cpumask_var(c
->xps_cpumask
);
1868 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1869 struct mlx5e_params
*params
,
1870 struct mlx5e_channel_param
*cparam
,
1871 struct mlx5e_channel
**cp
)
1873 int cpu
= cpumask_first(mlx5_comp_irq_get_affinity_mask(priv
->mdev
, ix
));
1874 struct net_dim_cq_moder icocq_moder
= {0, 0};
1875 struct net_device
*netdev
= priv
->netdev
;
1876 struct mlx5e_channel
*c
;
1881 err
= mlx5_vector2eqn(priv
->mdev
, ix
, &eqn
, &irq
);
1885 c
= kvzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1890 c
->mdev
= priv
->mdev
;
1891 c
->tstamp
= &priv
->tstamp
;
1894 c
->pdev
= &priv
->mdev
->pdev
->dev
;
1895 c
->netdev
= priv
->netdev
;
1896 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1897 c
->num_tc
= params
->num_tc
;
1898 c
->xdp
= !!params
->xdp_prog
;
1899 c
->stats
= &priv
->channel_stats
[ix
].ch
;
1900 c
->irq_desc
= irq_to_desc(irq
);
1902 err
= mlx5e_alloc_xps_cpumask(c
, params
);
1904 goto err_free_channel
;
1906 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1908 err
= mlx5e_open_cq(c
, icocq_moder
, &cparam
->icosq_cq
, &c
->icosq
.cq
);
1912 err
= mlx5e_open_tx_cqs(c
, params
, cparam
);
1914 goto err_close_icosq_cq
;
1916 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
, &cparam
->tx_cq
, &c
->xdpsq
.cq
);
1918 goto err_close_tx_cqs
;
1920 err
= mlx5e_open_cq(c
, params
->rx_cq_moderation
, &cparam
->rx_cq
, &c
->rq
.cq
);
1922 goto err_close_xdp_tx_cqs
;
1924 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1925 err
= c
->xdp
? mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1926 &cparam
->tx_cq
, &c
->rq
.xdpsq
.cq
) : 0;
1928 goto err_close_rx_cq
;
1930 napi_enable(&c
->napi
);
1932 err
= mlx5e_open_icosq(c
, params
, &cparam
->icosq
, &c
->icosq
);
1934 goto err_disable_napi
;
1936 err
= mlx5e_open_sqs(c
, params
, cparam
);
1938 goto err_close_icosq
;
1940 err
= c
->xdp
? mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, &c
->rq
.xdpsq
, false) : 0;
1944 err
= mlx5e_open_rq(c
, params
, &cparam
->rq
, &c
->rq
);
1946 goto err_close_xdp_sq
;
1948 err
= mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, &c
->xdpsq
, true);
1957 mlx5e_close_rq(&c
->rq
);
1961 mlx5e_close_xdpsq(&c
->rq
.xdpsq
, &c
->rq
);
1967 mlx5e_close_icosq(&c
->icosq
);
1970 napi_disable(&c
->napi
);
1972 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
1975 mlx5e_close_cq(&c
->rq
.cq
);
1977 err_close_xdp_tx_cqs
:
1978 mlx5e_close_cq(&c
->xdpsq
.cq
);
1981 mlx5e_close_tx_cqs(c
);
1984 mlx5e_close_cq(&c
->icosq
.cq
);
1987 netif_napi_del(&c
->napi
);
1988 mlx5e_free_xps_cpumask(c
);
1996 static void mlx5e_activate_channel(struct mlx5e_channel
*c
)
2000 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2001 mlx5e_activate_txqsq(&c
->sq
[tc
]);
2002 mlx5e_activate_rq(&c
->rq
);
2003 netif_set_xps_queue(c
->netdev
, c
->xps_cpumask
, c
->ix
);
2006 static void mlx5e_deactivate_channel(struct mlx5e_channel
*c
)
2010 mlx5e_deactivate_rq(&c
->rq
);
2011 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2012 mlx5e_deactivate_txqsq(&c
->sq
[tc
]);
2015 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
2017 mlx5e_close_xdpsq(&c
->xdpsq
, NULL
);
2018 mlx5e_close_rq(&c
->rq
);
2020 mlx5e_close_xdpsq(&c
->rq
.xdpsq
, &c
->rq
);
2022 mlx5e_close_icosq(&c
->icosq
);
2023 napi_disable(&c
->napi
);
2025 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
2026 mlx5e_close_cq(&c
->rq
.cq
);
2027 mlx5e_close_cq(&c
->xdpsq
.cq
);
2028 mlx5e_close_tx_cqs(c
);
2029 mlx5e_close_cq(&c
->icosq
.cq
);
2030 netif_napi_del(&c
->napi
);
2031 mlx5e_free_xps_cpumask(c
);
2036 #define DEFAULT_FRAG_SIZE (2048)
2038 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev
*mdev
,
2039 struct mlx5e_params
*params
,
2040 struct mlx5e_rq_frags_info
*info
)
2042 u32 byte_count
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
2043 int frag_size_max
= DEFAULT_FRAG_SIZE
;
2047 #ifdef CONFIG_MLX5_EN_IPSEC
2048 if (MLX5_IPSEC_DEV(mdev
))
2049 byte_count
+= MLX5E_METADATA_ETHER_LEN
;
2052 if (mlx5e_rx_is_linear_skb(mdev
, params
)) {
2055 frag_stride
= mlx5e_rx_get_linear_frag_sz(params
);
2056 frag_stride
= roundup_pow_of_two(frag_stride
);
2058 info
->arr
[0].frag_size
= byte_count
;
2059 info
->arr
[0].frag_stride
= frag_stride
;
2060 info
->num_frags
= 1;
2061 info
->wqe_bulk
= PAGE_SIZE
/ frag_stride
;
2065 if (byte_count
> PAGE_SIZE
+
2066 (MLX5E_MAX_RX_FRAGS
- 1) * frag_size_max
)
2067 frag_size_max
= PAGE_SIZE
;
2070 while (buf_size
< byte_count
) {
2071 int frag_size
= byte_count
- buf_size
;
2073 if (i
< MLX5E_MAX_RX_FRAGS
- 1)
2074 frag_size
= min(frag_size
, frag_size_max
);
2076 info
->arr
[i
].frag_size
= frag_size
;
2077 info
->arr
[i
].frag_stride
= roundup_pow_of_two(frag_size
);
2079 buf_size
+= frag_size
;
2082 info
->num_frags
= i
;
2083 /* number of different wqes sharing a page */
2084 info
->wqe_bulk
= 1 + (info
->num_frags
% 2);
2087 info
->wqe_bulk
= max_t(u8
, info
->wqe_bulk
, 8);
2088 info
->log_num_frags
= order_base_2(info
->num_frags
);
2091 static inline u8
mlx5e_get_rqwq_log_stride(u8 wq_type
, int ndsegs
)
2093 int sz
= sizeof(struct mlx5_wqe_data_seg
) * ndsegs
;
2096 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2097 sz
+= sizeof(struct mlx5e_rx_wqe_ll
);
2099 default: /* MLX5_WQ_TYPE_CYCLIC */
2100 sz
+= sizeof(struct mlx5e_rx_wqe_cyc
);
2103 return order_base_2(sz
);
2106 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
2107 struct mlx5e_params
*params
,
2108 struct mlx5e_rq_param
*param
)
2110 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2111 void *rqc
= param
->rqc
;
2112 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2115 switch (params
->rq_wq_type
) {
2116 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2117 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
2118 mlx5e_mpwqe_get_log_num_strides(mdev
, params
) -
2119 MLX5_MPWQE_LOG_NUM_STRIDES_BASE
);
2120 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
2121 mlx5e_mpwqe_get_log_stride_size(mdev
, params
) -
2122 MLX5_MPWQE_LOG_STRIDE_SZ_BASE
);
2123 MLX5_SET(wq
, wq
, log_wq_sz
, mlx5e_mpwqe_get_log_rq_size(params
));
2125 default: /* MLX5_WQ_TYPE_CYCLIC */
2126 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_rq_mtu_frames
);
2127 mlx5e_build_rq_frags_info(mdev
, params
, ¶m
->frags_info
);
2128 ndsegs
= param
->frags_info
.num_frags
;
2131 MLX5_SET(wq
, wq
, wq_type
, params
->rq_wq_type
);
2132 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
2133 MLX5_SET(wq
, wq
, log_wq_stride
,
2134 mlx5e_get_rqwq_log_stride(params
->rq_wq_type
, ndsegs
));
2135 MLX5_SET(wq
, wq
, pd
, mdev
->mlx5e_res
.pdn
);
2136 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
2137 MLX5_SET(rqc
, rqc
, vsd
, params
->vlan_strip_disable
);
2138 MLX5_SET(rqc
, rqc
, scatter_fcs
, params
->scatter_fcs_en
);
2140 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
2143 static void mlx5e_build_drop_rq_param(struct mlx5e_priv
*priv
,
2144 struct mlx5e_rq_param
*param
)
2146 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2147 void *rqc
= param
->rqc
;
2148 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2150 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
2151 MLX5_SET(wq
, wq
, log_wq_stride
,
2152 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC
, 1));
2153 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->drop_rq_q_counter
);
2155 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
2158 static void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
2159 struct mlx5e_sq_param
*param
)
2161 void *sqc
= param
->sqc
;
2162 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2164 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
2165 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
2167 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
2170 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
2171 struct mlx5e_params
*params
,
2172 struct mlx5e_sq_param
*param
)
2174 void *sqc
= param
->sqc
;
2175 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2177 mlx5e_build_sq_param_common(priv
, param
);
2178 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
2179 MLX5_SET(sqc
, sqc
, allow_swp
, !!MLX5_IPSEC_DEV(priv
->mdev
));
2182 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
2183 struct mlx5e_cq_param
*param
)
2185 void *cqc
= param
->cqc
;
2187 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->priv
.uar
->index
);
2188 if (MLX5_CAP_GEN(priv
->mdev
, cqe_128_always
) && cache_line_size() >= 128)
2189 MLX5_SET(cqc
, cqc
, cqe_sz
, CQE_STRIDE_128_PAD
);
2192 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
2193 struct mlx5e_params
*params
,
2194 struct mlx5e_cq_param
*param
)
2196 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2197 void *cqc
= param
->cqc
;
2200 switch (params
->rq_wq_type
) {
2201 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2202 log_cq_size
= mlx5e_mpwqe_get_log_rq_size(params
) +
2203 mlx5e_mpwqe_get_log_num_strides(mdev
, params
);
2205 default: /* MLX5_WQ_TYPE_CYCLIC */
2206 log_cq_size
= params
->log_rq_mtu_frames
;
2209 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
2210 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
2211 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
2212 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
2215 mlx5e_build_common_cq_param(priv
, param
);
2216 param
->cq_period_mode
= params
->rx_cq_moderation
.cq_period_mode
;
2219 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
2220 struct mlx5e_params
*params
,
2221 struct mlx5e_cq_param
*param
)
2223 void *cqc
= param
->cqc
;
2225 MLX5_SET(cqc
, cqc
, log_cq_size
, params
->log_sq_size
);
2227 mlx5e_build_common_cq_param(priv
, param
);
2228 param
->cq_period_mode
= params
->tx_cq_moderation
.cq_period_mode
;
2231 static void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
2233 struct mlx5e_cq_param
*param
)
2235 void *cqc
= param
->cqc
;
2237 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
2239 mlx5e_build_common_cq_param(priv
, param
);
2241 param
->cq_period_mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
2244 static void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
2246 struct mlx5e_sq_param
*param
)
2248 void *sqc
= param
->sqc
;
2249 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2251 mlx5e_build_sq_param_common(priv
, param
);
2253 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
2254 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
2257 static void mlx5e_build_xdpsq_param(struct mlx5e_priv
*priv
,
2258 struct mlx5e_params
*params
,
2259 struct mlx5e_sq_param
*param
)
2261 void *sqc
= param
->sqc
;
2262 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2264 mlx5e_build_sq_param_common(priv
, param
);
2265 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
2266 param
->is_mpw
= MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_XDP_TX_MPWQE
);
2269 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
2270 struct mlx5e_params
*params
,
2271 struct mlx5e_channel_param
*cparam
)
2273 u8 icosq_log_wq_sz
= MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
2275 mlx5e_build_rq_param(priv
, params
, &cparam
->rq
);
2276 mlx5e_build_sq_param(priv
, params
, &cparam
->sq
);
2277 mlx5e_build_xdpsq_param(priv
, params
, &cparam
->xdp_sq
);
2278 mlx5e_build_icosq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq
);
2279 mlx5e_build_rx_cq_param(priv
, params
, &cparam
->rx_cq
);
2280 mlx5e_build_tx_cq_param(priv
, params
, &cparam
->tx_cq
);
2281 mlx5e_build_ico_cq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq_cq
);
2284 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
2285 struct mlx5e_channels
*chs
)
2287 struct mlx5e_channel_param
*cparam
;
2291 chs
->num
= chs
->params
.num_channels
;
2293 chs
->c
= kcalloc(chs
->num
, sizeof(struct mlx5e_channel
*), GFP_KERNEL
);
2294 cparam
= kvzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
2295 if (!chs
->c
|| !cparam
)
2298 mlx5e_build_channel_param(priv
, &chs
->params
, cparam
);
2299 for (i
= 0; i
< chs
->num
; i
++) {
2300 err
= mlx5e_open_channel(priv
, i
, &chs
->params
, cparam
, &chs
->c
[i
]);
2302 goto err_close_channels
;
2305 if (!IS_ERR_OR_NULL(priv
->tx_reporter
))
2306 devlink_health_reporter_state_update(priv
->tx_reporter
,
2307 DEVLINK_HEALTH_REPORTER_STATE_HEALTHY
);
2313 for (i
--; i
>= 0; i
--)
2314 mlx5e_close_channel(chs
->c
[i
]);
2323 static void mlx5e_activate_channels(struct mlx5e_channels
*chs
)
2327 for (i
= 0; i
< chs
->num
; i
++)
2328 mlx5e_activate_channel(chs
->c
[i
]);
2331 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels
*chs
)
2336 for (i
= 0; i
< chs
->num
; i
++)
2337 err
|= mlx5e_wait_for_min_rx_wqes(&chs
->c
[i
]->rq
,
2340 return err
? -ETIMEDOUT
: 0;
2343 static void mlx5e_deactivate_channels(struct mlx5e_channels
*chs
)
2347 for (i
= 0; i
< chs
->num
; i
++)
2348 mlx5e_deactivate_channel(chs
->c
[i
]);
2351 void mlx5e_close_channels(struct mlx5e_channels
*chs
)
2355 for (i
= 0; i
< chs
->num
; i
++)
2356 mlx5e_close_channel(chs
->c
[i
]);
2363 mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
, struct mlx5e_rqt
*rqt
)
2365 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2372 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
2373 in
= kvzalloc(inlen
, GFP_KERNEL
);
2377 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
2379 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2380 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
2382 for (i
= 0; i
< sz
; i
++)
2383 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], priv
->drop_rq
.rqn
);
2385 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &rqt
->rqtn
);
2387 rqt
->enabled
= true;
2393 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
)
2395 rqt
->enabled
= false;
2396 mlx5_core_destroy_rqt(priv
->mdev
, rqt
->rqtn
);
2399 int mlx5e_create_indirect_rqt(struct mlx5e_priv
*priv
)
2401 struct mlx5e_rqt
*rqt
= &priv
->indir_rqt
;
2404 err
= mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, rqt
);
2406 mlx5_core_warn(priv
->mdev
, "create indirect rqts failed, %d\n", err
);
2410 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
)
2412 struct mlx5e_rqt
*rqt
;
2416 for (ix
= 0; ix
< mlx5e_get_netdev_max_channels(priv
->netdev
); ix
++) {
2417 rqt
= &priv
->direct_tir
[ix
].rqt
;
2418 err
= mlx5e_create_rqt(priv
, 1 /*size */, rqt
);
2420 goto err_destroy_rqts
;
2426 mlx5_core_warn(priv
->mdev
, "create direct rqts failed, %d\n", err
);
2427 for (ix
--; ix
>= 0; ix
--)
2428 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[ix
].rqt
);
2433 void mlx5e_destroy_direct_rqts(struct mlx5e_priv
*priv
)
2437 for (i
= 0; i
< mlx5e_get_netdev_max_channels(priv
->netdev
); i
++)
2438 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
2441 static int mlx5e_rx_hash_fn(int hfunc
)
2443 return (hfunc
== ETH_RSS_HASH_TOP
) ?
2444 MLX5_RX_HASH_FN_TOEPLITZ
:
2445 MLX5_RX_HASH_FN_INVERTED_XOR8
;
2448 int mlx5e_bits_invert(unsigned long a
, int size
)
2453 for (i
= 0; i
< size
; i
++)
2454 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
2459 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, int sz
,
2460 struct mlx5e_redirect_rqt_param rrp
, void *rqtc
)
2464 for (i
= 0; i
< sz
; i
++) {
2470 if (rrp
.rss
.hfunc
== ETH_RSS_HASH_XOR
)
2471 ix
= mlx5e_bits_invert(i
, ilog2(sz
));
2473 ix
= priv
->rss_params
.indirection_rqt
[ix
];
2474 rqn
= rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2478 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
2482 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
,
2483 struct mlx5e_redirect_rqt_param rrp
)
2485 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2491 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
2492 in
= kvzalloc(inlen
, GFP_KERNEL
);
2496 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
2498 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2499 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
2500 mlx5e_fill_rqt_rqns(priv
, sz
, rrp
, rqtc
);
2501 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
2507 static u32
mlx5e_get_direct_rqn(struct mlx5e_priv
*priv
, int ix
,
2508 struct mlx5e_redirect_rqt_param rrp
)
2513 if (ix
>= rrp
.rss
.channels
->num
)
2514 return priv
->drop_rq
.rqn
;
2516 return rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2519 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
,
2520 struct mlx5e_redirect_rqt_param rrp
)
2525 if (priv
->indir_rqt
.enabled
) {
2527 rqtn
= priv
->indir_rqt
.rqtn
;
2528 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, rrp
);
2531 for (ix
= 0; ix
< mlx5e_get_netdev_max_channels(priv
->netdev
); ix
++) {
2532 struct mlx5e_redirect_rqt_param direct_rrp
= {
2535 .rqn
= mlx5e_get_direct_rqn(priv
, ix
, rrp
)
2539 /* Direct RQ Tables */
2540 if (!priv
->direct_tir
[ix
].rqt
.enabled
)
2543 rqtn
= priv
->direct_tir
[ix
].rqt
.rqtn
;
2544 mlx5e_redirect_rqt(priv
, rqtn
, 1, direct_rrp
);
2548 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv
*priv
,
2549 struct mlx5e_channels
*chs
)
2551 struct mlx5e_redirect_rqt_param rrp
= {
2556 .hfunc
= priv
->rss_params
.hfunc
,
2561 mlx5e_redirect_rqts(priv
, rrp
);
2564 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv
*priv
)
2566 struct mlx5e_redirect_rqt_param drop_rrp
= {
2569 .rqn
= priv
->drop_rq
.rqn
,
2573 mlx5e_redirect_rqts(priv
, drop_rrp
);
2576 static const struct mlx5e_tirc_config tirc_default_config
[MLX5E_NUM_INDIR_TIRS
] = {
2577 [MLX5E_TT_IPV4_TCP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2578 .l4_prot_type
= MLX5_L4_PROT_TYPE_TCP
,
2579 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2581 [MLX5E_TT_IPV6_TCP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2582 .l4_prot_type
= MLX5_L4_PROT_TYPE_TCP
,
2583 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2585 [MLX5E_TT_IPV4_UDP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2586 .l4_prot_type
= MLX5_L4_PROT_TYPE_UDP
,
2587 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2589 [MLX5E_TT_IPV6_UDP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2590 .l4_prot_type
= MLX5_L4_PROT_TYPE_UDP
,
2591 .rx_hash_fields
= MLX5_HASH_IP_L4PORTS
,
2593 [MLX5E_TT_IPV4_IPSEC_AH
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2595 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2597 [MLX5E_TT_IPV6_IPSEC_AH
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2599 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2601 [MLX5E_TT_IPV4_IPSEC_ESP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2603 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2605 [MLX5E_TT_IPV6_IPSEC_ESP
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2607 .rx_hash_fields
= MLX5_HASH_IP_IPSEC_SPI
,
2609 [MLX5E_TT_IPV4
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV4
,
2611 .rx_hash_fields
= MLX5_HASH_IP
,
2613 [MLX5E_TT_IPV6
] = { .l3_prot_type
= MLX5_L3_PROT_TYPE_IPV6
,
2615 .rx_hash_fields
= MLX5_HASH_IP
,
2619 struct mlx5e_tirc_config
mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt
)
2621 return tirc_default_config
[tt
];
2624 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params
*params
, void *tirc
)
2626 if (!params
->lro_en
)
2629 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2631 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
2632 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
2633 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
2634 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
2635 (params
->lro_wqe_sz
- ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
2636 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
, params
->lro_timeout
);
2639 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params
*rss_params
,
2640 const struct mlx5e_tirc_config
*ttconfig
,
2641 void *tirc
, bool inner
)
2643 void *hfso
= inner
? MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
) :
2644 MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
2646 MLX5_SET(tirc
, tirc
, rx_hash_fn
, mlx5e_rx_hash_fn(rss_params
->hfunc
));
2647 if (rss_params
->hfunc
== ETH_RSS_HASH_TOP
) {
2648 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
2649 rx_hash_toeplitz_key
);
2650 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
2651 rx_hash_toeplitz_key
);
2653 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
2654 memcpy(rss_key
, rss_params
->toeplitz_hash_key
, len
);
2656 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2657 ttconfig
->l3_prot_type
);
2658 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2659 ttconfig
->l4_prot_type
);
2660 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2661 ttconfig
->rx_hash_fields
);
2664 static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config
*ttconfig
,
2665 enum mlx5e_traffic_types tt
,
2668 *ttconfig
= tirc_default_config
[tt
];
2669 ttconfig
->rx_hash_fields
= rx_hash_fields
;
2672 void mlx5e_modify_tirs_hash(struct mlx5e_priv
*priv
, void *in
, int inlen
)
2674 void *tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2675 struct mlx5e_rss_params
*rss
= &priv
->rss_params
;
2676 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2677 int ctxlen
= MLX5_ST_SZ_BYTES(tirc
);
2678 struct mlx5e_tirc_config ttconfig
;
2681 MLX5_SET(modify_tir_in
, in
, bitmask
.hash
, 1);
2683 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2684 memset(tirc
, 0, ctxlen
);
2685 mlx5e_update_rx_hash_fields(&ttconfig
, tt
,
2686 rss
->rx_hash_fields
[tt
]);
2687 mlx5e_build_indir_tir_ctx_hash(rss
, &ttconfig
, tirc
, false);
2688 mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
, inlen
);
2691 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
2694 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2695 memset(tirc
, 0, ctxlen
);
2696 mlx5e_update_rx_hash_fields(&ttconfig
, tt
,
2697 rss
->rx_hash_fields
[tt
]);
2698 mlx5e_build_indir_tir_ctx_hash(rss
, &ttconfig
, tirc
, true);
2699 mlx5_core_modify_tir(mdev
, priv
->inner_indir_tir
[tt
].tirn
, in
,
2704 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
2706 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2715 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
2716 in
= kvzalloc(inlen
, GFP_KERNEL
);
2720 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
2721 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2723 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2725 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2726 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
2732 for (ix
= 0; ix
< mlx5e_get_netdev_max_channels(priv
->netdev
); ix
++) {
2733 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
2745 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv
*priv
,
2746 enum mlx5e_traffic_types tt
,
2749 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2751 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2753 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2754 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
2755 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 0x1);
2757 mlx5e_build_indir_tir_ctx_hash(&priv
->rss_params
,
2758 &tirc_default_config
[tt
], tirc
, true);
2761 static int mlx5e_set_mtu(struct mlx5_core_dev
*mdev
,
2762 struct mlx5e_params
*params
, u16 mtu
)
2764 u16 hw_mtu
= MLX5E_SW2HW_MTU(params
, mtu
);
2767 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
2771 /* Update vport context MTU */
2772 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
2776 static void mlx5e_query_mtu(struct mlx5_core_dev
*mdev
,
2777 struct mlx5e_params
*params
, u16
*mtu
)
2782 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
2783 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
2784 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
2786 *mtu
= MLX5E_HW2SW_MTU(params
, hw_mtu
);
2789 int mlx5e_set_dev_port_mtu(struct mlx5e_priv
*priv
)
2791 struct mlx5e_params
*params
= &priv
->channels
.params
;
2792 struct net_device
*netdev
= priv
->netdev
;
2793 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2797 err
= mlx5e_set_mtu(mdev
, params
, params
->sw_mtu
);
2801 mlx5e_query_mtu(mdev
, params
, &mtu
);
2802 if (mtu
!= params
->sw_mtu
)
2803 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
2804 __func__
, mtu
, params
->sw_mtu
);
2806 params
->sw_mtu
= mtu
;
2810 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
2812 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2813 int nch
= priv
->channels
.params
.num_channels
;
2814 int ntc
= priv
->channels
.params
.num_tc
;
2817 netdev_reset_tc(netdev
);
2822 netdev_set_num_tc(netdev
, ntc
);
2824 /* Map netdev TCs to offset 0
2825 * We have our own UP to TXQ mapping for QoS
2827 for (tc
= 0; tc
< ntc
; tc
++)
2828 netdev_set_tc_queue(netdev
, tc
, nch
, 0);
2831 static void mlx5e_build_tc2txq_maps(struct mlx5e_priv
*priv
)
2833 int max_nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
2836 for (i
= 0; i
< max_nch
; i
++)
2837 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2838 priv
->channel_tc2txq
[i
][tc
] = i
+ tc
* max_nch
;
2841 static void mlx5e_build_tx2sq_maps(struct mlx5e_priv
*priv
)
2843 struct mlx5e_channel
*c
;
2844 struct mlx5e_txqsq
*sq
;
2847 for (i
= 0; i
< priv
->channels
.num
; i
++) {
2848 c
= priv
->channels
.c
[i
];
2849 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
2851 priv
->txq2sq
[sq
->txq_ix
] = sq
;
2856 void mlx5e_activate_priv_channels(struct mlx5e_priv
*priv
)
2858 int num_txqs
= priv
->channels
.num
* priv
->channels
.params
.num_tc
;
2859 struct net_device
*netdev
= priv
->netdev
;
2861 mlx5e_netdev_set_tcs(netdev
);
2862 netif_set_real_num_tx_queues(netdev
, num_txqs
);
2863 netif_set_real_num_rx_queues(netdev
, priv
->channels
.num
);
2865 mlx5e_build_tx2sq_maps(priv
);
2866 mlx5e_activate_channels(&priv
->channels
);
2867 mlx5e_xdp_tx_enable(priv
);
2868 netif_tx_start_all_queues(priv
->netdev
);
2870 if (mlx5e_is_vport_rep(priv
))
2871 mlx5e_add_sqs_fwd_rules(priv
);
2873 mlx5e_wait_channels_min_rx_wqes(&priv
->channels
);
2874 mlx5e_redirect_rqts_to_channels(priv
, &priv
->channels
);
2877 void mlx5e_deactivate_priv_channels(struct mlx5e_priv
*priv
)
2879 mlx5e_redirect_rqts_to_drop(priv
);
2881 if (mlx5e_is_vport_rep(priv
))
2882 mlx5e_remove_sqs_fwd_rules(priv
);
2884 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2885 * polling for inactive tx queues.
2887 netif_tx_stop_all_queues(priv
->netdev
);
2888 netif_tx_disable(priv
->netdev
);
2889 mlx5e_xdp_tx_disable(priv
);
2890 mlx5e_deactivate_channels(&priv
->channels
);
2893 static void mlx5e_switch_priv_channels(struct mlx5e_priv
*priv
,
2894 struct mlx5e_channels
*new_chs
,
2895 mlx5e_fp_hw_modify hw_modify
)
2897 struct net_device
*netdev
= priv
->netdev
;
2901 new_num_txqs
= new_chs
->num
* new_chs
->params
.num_tc
;
2903 carrier_ok
= netif_carrier_ok(netdev
);
2904 netif_carrier_off(netdev
);
2906 if (new_num_txqs
< netdev
->real_num_tx_queues
)
2907 netif_set_real_num_tx_queues(netdev
, new_num_txqs
);
2909 mlx5e_deactivate_priv_channels(priv
);
2910 mlx5e_close_channels(&priv
->channels
);
2912 priv
->channels
= *new_chs
;
2914 /* New channels are ready to roll, modify HW settings if needed */
2918 mlx5e_refresh_tirs(priv
, false);
2919 mlx5e_activate_priv_channels(priv
);
2921 /* return carrier back if needed */
2923 netif_carrier_on(netdev
);
2926 int mlx5e_safe_switch_channels(struct mlx5e_priv
*priv
,
2927 struct mlx5e_channels
*new_chs
,
2928 mlx5e_fp_hw_modify hw_modify
)
2932 err
= mlx5e_open_channels(priv
, new_chs
);
2936 mlx5e_switch_priv_channels(priv
, new_chs
, hw_modify
);
2940 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
)
2942 priv
->tstamp
.tx_type
= HWTSTAMP_TX_OFF
;
2943 priv
->tstamp
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2946 int mlx5e_open_locked(struct net_device
*netdev
)
2948 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2951 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2953 err
= mlx5e_open_channels(priv
, &priv
->channels
);
2955 goto err_clear_state_opened_flag
;
2957 mlx5e_refresh_tirs(priv
, false);
2958 mlx5e_activate_priv_channels(priv
);
2959 if (priv
->profile
->update_carrier
)
2960 priv
->profile
->update_carrier(priv
);
2962 mlx5e_queue_update_stats(priv
);
2965 err_clear_state_opened_flag
:
2966 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2970 int mlx5e_open(struct net_device
*netdev
)
2972 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2975 mutex_lock(&priv
->state_lock
);
2976 err
= mlx5e_open_locked(netdev
);
2978 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_UP
);
2979 mutex_unlock(&priv
->state_lock
);
2981 if (mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
2982 udp_tunnel_get_rx_info(netdev
);
2987 int mlx5e_close_locked(struct net_device
*netdev
)
2989 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2991 /* May already be CLOSED in case a previous configuration operation
2992 * (e.g RX/TX queue size change) that involves close&open failed.
2994 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2997 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2999 netif_carrier_off(priv
->netdev
);
3000 mlx5e_deactivate_priv_channels(priv
);
3001 mlx5e_close_channels(&priv
->channels
);
3006 int mlx5e_close(struct net_device
*netdev
)
3008 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3011 if (!netif_device_present(netdev
))
3014 mutex_lock(&priv
->state_lock
);
3015 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_DOWN
);
3016 err
= mlx5e_close_locked(netdev
);
3017 mutex_unlock(&priv
->state_lock
);
3022 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev
*mdev
,
3023 struct mlx5e_rq
*rq
,
3024 struct mlx5e_rq_param
*param
)
3026 void *rqc
= param
->rqc
;
3027 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
3030 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
3032 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wqe
.wq
,
3037 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3038 xdp_rxq_info_unused(&rq
->xdp_rxq
);
3045 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev
*mdev
,
3046 struct mlx5e_cq
*cq
,
3047 struct mlx5e_cq_param
*param
)
3049 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
3050 param
->wq
.db_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
3052 return mlx5e_alloc_cq_common(mdev
, param
, cq
);
3055 int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
,
3056 struct mlx5e_rq
*drop_rq
)
3058 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3059 struct mlx5e_cq_param cq_param
= {};
3060 struct mlx5e_rq_param rq_param
= {};
3061 struct mlx5e_cq
*cq
= &drop_rq
->cq
;
3064 mlx5e_build_drop_rq_param(priv
, &rq_param
);
3066 err
= mlx5e_alloc_drop_cq(mdev
, cq
, &cq_param
);
3070 err
= mlx5e_create_cq(cq
, &cq_param
);
3074 err
= mlx5e_alloc_drop_rq(mdev
, drop_rq
, &rq_param
);
3076 goto err_destroy_cq
;
3078 err
= mlx5e_create_rq(drop_rq
, &rq_param
);
3082 err
= mlx5e_modify_rq_state(drop_rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
3084 mlx5_core_warn(priv
->mdev
, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err
);
3089 mlx5e_free_rq(drop_rq
);
3092 mlx5e_destroy_cq(cq
);
3100 void mlx5e_close_drop_rq(struct mlx5e_rq
*drop_rq
)
3102 mlx5e_destroy_rq(drop_rq
);
3103 mlx5e_free_rq(drop_rq
);
3104 mlx5e_destroy_cq(&drop_rq
->cq
);
3105 mlx5e_free_cq(&drop_rq
->cq
);
3108 int mlx5e_create_tis(struct mlx5_core_dev
*mdev
, int tc
,
3109 u32 underlay_qpn
, u32
*tisn
)
3111 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
3112 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
3114 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
3115 MLX5_SET(tisc
, tisc
, underlay_qpn
, underlay_qpn
);
3116 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
3118 if (mlx5_lag_is_lacp_owner(mdev
))
3119 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
3121 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), tisn
);
3124 void mlx5e_destroy_tis(struct mlx5_core_dev
*mdev
, u32 tisn
)
3126 mlx5_core_destroy_tis(mdev
, tisn
);
3129 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
3134 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
3135 err
= mlx5e_create_tis(priv
->mdev
, tc
, 0, &priv
->tisn
[tc
]);
3137 goto err_close_tises
;
3143 for (tc
--; tc
>= 0; tc
--)
3144 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
3149 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
3153 mlx5e_tx_reporter_destroy(priv
);
3154 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
3155 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
3158 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
,
3159 enum mlx5e_traffic_types tt
,
3162 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
3164 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
3166 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
3167 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
3169 mlx5e_build_indir_tir_ctx_hash(&priv
->rss_params
,
3170 &tirc_default_config
[tt
], tirc
, false);
3173 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32 rqtn
, u32
*tirc
)
3175 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
3177 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
3179 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
3180 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
3181 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
3184 int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
, bool inner_ttc
)
3186 struct mlx5e_tir
*tir
;
3194 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
3195 in
= kvzalloc(inlen
, GFP_KERNEL
);
3199 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
3200 memset(in
, 0, inlen
);
3201 tir
= &priv
->indir_tir
[tt
];
3202 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3203 mlx5e_build_indir_tir_ctx(priv
, tt
, tirc
);
3204 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3206 mlx5_core_warn(priv
->mdev
, "create indirect tirs failed, %d\n", err
);
3207 goto err_destroy_inner_tirs
;
3211 if (!inner_ttc
|| !mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
3214 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++) {
3215 memset(in
, 0, inlen
);
3216 tir
= &priv
->inner_indir_tir
[i
];
3217 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3218 mlx5e_build_inner_indir_tir_ctx(priv
, i
, tirc
);
3219 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3221 mlx5_core_warn(priv
->mdev
, "create inner indirect tirs failed, %d\n", err
);
3222 goto err_destroy_inner_tirs
;
3231 err_destroy_inner_tirs
:
3232 for (i
--; i
>= 0; i
--)
3233 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
3235 for (tt
--; tt
>= 0; tt
--)
3236 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
3243 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
)
3245 int nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
3246 struct mlx5e_tir
*tir
;
3253 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
3254 in
= kvzalloc(inlen
, GFP_KERNEL
);
3258 for (ix
= 0; ix
< nch
; ix
++) {
3259 memset(in
, 0, inlen
);
3260 tir
= &priv
->direct_tir
[ix
];
3261 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3262 mlx5e_build_direct_tir_ctx(priv
, priv
->direct_tir
[ix
].rqt
.rqtn
, tirc
);
3263 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3265 goto err_destroy_ch_tirs
;
3272 err_destroy_ch_tirs
:
3273 mlx5_core_warn(priv
->mdev
, "create direct tirs failed, %d\n", err
);
3274 for (ix
--; ix
>= 0; ix
--)
3275 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[ix
]);
3282 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
, bool inner_ttc
)
3286 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3287 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
3289 if (!inner_ttc
|| !mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
3292 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3293 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
3296 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
)
3298 int nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
3301 for (i
= 0; i
< nch
; i
++)
3302 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[i
]);
3305 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels
*chs
, bool enable
)
3310 for (i
= 0; i
< chs
->num
; i
++) {
3311 err
= mlx5e_modify_rq_scatter_fcs(&chs
->c
[i
]->rq
, enable
);
3319 static int mlx5e_modify_channels_vsd(struct mlx5e_channels
*chs
, bool vsd
)
3324 for (i
= 0; i
< chs
->num
; i
++) {
3325 err
= mlx5e_modify_rq_vsd(&chs
->c
[i
]->rq
, vsd
);
3333 static int mlx5e_setup_tc_mqprio(struct net_device
*netdev
,
3334 struct tc_mqprio_qopt
*mqprio
)
3336 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3337 struct mlx5e_channels new_channels
= {};
3338 u8 tc
= mqprio
->num_tc
;
3341 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
3343 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
3346 mutex_lock(&priv
->state_lock
);
3348 new_channels
.params
= priv
->channels
.params
;
3349 new_channels
.params
.num_tc
= tc
? tc
: 1;
3351 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
3352 priv
->channels
.params
= new_channels
.params
;
3356 err
= mlx5e_safe_switch_channels(priv
, &new_channels
, NULL
);
3360 priv
->max_opened_tc
= max_t(u8
, priv
->max_opened_tc
,
3361 new_channels
.params
.num_tc
);
3363 mutex_unlock(&priv
->state_lock
);
3367 #ifdef CONFIG_MLX5_ESWITCH
3368 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv
*priv
,
3369 struct tc_cls_flower_offload
*cls_flower
,
3372 switch (cls_flower
->command
) {
3373 case TC_CLSFLOWER_REPLACE
:
3374 return mlx5e_configure_flower(priv
->netdev
, priv
, cls_flower
,
3376 case TC_CLSFLOWER_DESTROY
:
3377 return mlx5e_delete_flower(priv
->netdev
, priv
, cls_flower
,
3379 case TC_CLSFLOWER_STATS
:
3380 return mlx5e_stats_flower(priv
->netdev
, priv
, cls_flower
,
3387 static int mlx5e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
3390 struct mlx5e_priv
*priv
= cb_priv
;
3393 case TC_SETUP_CLSFLOWER
:
3394 return mlx5e_setup_tc_cls_flower(priv
, type_data
, MLX5E_TC_INGRESS
|
3395 MLX5E_TC_NIC_OFFLOAD
);
3401 static int mlx5e_setup_tc_block(struct net_device
*dev
,
3402 struct tc_block_offload
*f
)
3404 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3406 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
3409 switch (f
->command
) {
3411 return tcf_block_cb_register(f
->block
, mlx5e_setup_tc_block_cb
,
3412 priv
, priv
, f
->extack
);
3413 case TC_BLOCK_UNBIND
:
3414 tcf_block_cb_unregister(f
->block
, mlx5e_setup_tc_block_cb
,
3423 static int mlx5e_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
3427 #ifdef CONFIG_MLX5_ESWITCH
3428 case TC_SETUP_BLOCK
:
3429 return mlx5e_setup_tc_block(dev
, type_data
);
3431 case TC_SETUP_QDISC_MQPRIO
:
3432 return mlx5e_setup_tc_mqprio(dev
, type_data
);
3438 void mlx5e_fold_sw_stats64(struct mlx5e_priv
*priv
, struct rtnl_link_stats64
*s
)
3442 for (i
= 0; i
< mlx5e_get_netdev_max_channels(priv
->netdev
); i
++) {
3443 struct mlx5e_channel_stats
*channel_stats
= &priv
->channel_stats
[i
];
3444 struct mlx5e_rq_stats
*rq_stats
= &channel_stats
->rq
;
3447 s
->rx_packets
+= rq_stats
->packets
;
3448 s
->rx_bytes
+= rq_stats
->bytes
;
3450 for (j
= 0; j
< priv
->max_opened_tc
; j
++) {
3451 struct mlx5e_sq_stats
*sq_stats
= &channel_stats
->sq
[j
];
3453 s
->tx_packets
+= sq_stats
->packets
;
3454 s
->tx_bytes
+= sq_stats
->bytes
;
3455 s
->tx_dropped
+= sq_stats
->dropped
;
3461 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3463 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3464 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
3465 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
3467 if (!mlx5e_monitor_counter_supported(priv
)) {
3468 /* update HW stats in background for next time */
3469 mlx5e_queue_update_stats(priv
);
3472 if (mlx5e_is_uplink_rep(priv
)) {
3473 stats
->rx_packets
= PPORT_802_3_GET(pstats
, a_frames_received_ok
);
3474 stats
->rx_bytes
= PPORT_802_3_GET(pstats
, a_octets_received_ok
);
3475 stats
->tx_packets
= PPORT_802_3_GET(pstats
, a_frames_transmitted_ok
);
3476 stats
->tx_bytes
= PPORT_802_3_GET(pstats
, a_octets_transmitted_ok
);
3478 mlx5e_fold_sw_stats64(priv
, stats
);
3481 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
3483 stats
->rx_length_errors
=
3484 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
3485 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
3486 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
3487 stats
->rx_crc_errors
=
3488 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
3489 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
3490 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
3491 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
3492 stats
->rx_frame_errors
;
3493 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
3495 /* vport multicast also counts packets that are dropped due to steering
3496 * or rx out of buffer
3499 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
3502 static void mlx5e_set_rx_mode(struct net_device
*dev
)
3504 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3506 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3509 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
3511 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3512 struct sockaddr
*saddr
= addr
;
3514 if (!is_valid_ether_addr(saddr
->sa_data
))
3515 return -EADDRNOTAVAIL
;
3517 netif_addr_lock_bh(netdev
);
3518 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
3519 netif_addr_unlock_bh(netdev
);
3521 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3526 #define MLX5E_SET_FEATURE(features, feature, enable) \
3529 *features |= feature; \
3531 *features &= ~feature; \
3534 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
3536 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
3538 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3539 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3540 struct mlx5e_channels new_channels
= {};
3541 struct mlx5e_params
*old_params
;
3545 mutex_lock(&priv
->state_lock
);
3547 old_params
= &priv
->channels
.params
;
3548 if (enable
&& !MLX5E_GET_PFLAG(old_params
, MLX5E_PFLAG_RX_STRIDING_RQ
)) {
3549 netdev_warn(netdev
, "can't set LRO with legacy RQ\n");
3554 reset
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3556 new_channels
.params
= *old_params
;
3557 new_channels
.params
.lro_en
= enable
;
3559 if (old_params
->rq_wq_type
!= MLX5_WQ_TYPE_CYCLIC
) {
3560 if (mlx5e_rx_mpwqe_is_linear_skb(mdev
, old_params
) ==
3561 mlx5e_rx_mpwqe_is_linear_skb(mdev
, &new_channels
.params
))
3566 *old_params
= new_channels
.params
;
3567 err
= mlx5e_modify_tirs_lro(priv
);
3571 err
= mlx5e_safe_switch_channels(priv
, &new_channels
, mlx5e_modify_tirs_lro
);
3573 mutex_unlock(&priv
->state_lock
);
3577 static int set_feature_cvlan_filter(struct net_device
*netdev
, bool enable
)
3579 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3582 mlx5e_enable_cvlan_filter(priv
);
3584 mlx5e_disable_cvlan_filter(priv
);
3589 #ifdef CONFIG_MLX5_ESWITCH
3590 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
3592 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3594 if (!enable
&& mlx5e_tc_num_filters(priv
, MLX5E_TC_NIC_OFFLOAD
)) {
3596 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3604 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
3606 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3607 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3609 return mlx5_set_port_fcs(mdev
, !enable
);
3612 static int set_feature_rx_fcs(struct net_device
*netdev
, bool enable
)
3614 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3617 mutex_lock(&priv
->state_lock
);
3619 priv
->channels
.params
.scatter_fcs_en
= enable
;
3620 err
= mlx5e_modify_channels_scatter_fcs(&priv
->channels
, enable
);
3622 priv
->channels
.params
.scatter_fcs_en
= !enable
;
3624 mutex_unlock(&priv
->state_lock
);
3629 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
3631 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3634 mutex_lock(&priv
->state_lock
);
3636 priv
->channels
.params
.vlan_strip_disable
= !enable
;
3637 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3640 err
= mlx5e_modify_channels_vsd(&priv
->channels
, !enable
);
3642 priv
->channels
.params
.vlan_strip_disable
= enable
;
3645 mutex_unlock(&priv
->state_lock
);
3650 #ifdef CONFIG_MLX5_EN_ARFS
3651 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
3653 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3657 err
= mlx5e_arfs_enable(priv
);
3659 err
= mlx5e_arfs_disable(priv
);
3665 static int mlx5e_handle_feature(struct net_device
*netdev
,
3666 netdev_features_t
*features
,
3667 netdev_features_t wanted_features
,
3668 netdev_features_t feature
,
3669 mlx5e_feature_handler feature_handler
)
3671 netdev_features_t changes
= wanted_features
^ netdev
->features
;
3672 bool enable
= !!(wanted_features
& feature
);
3675 if (!(changes
& feature
))
3678 err
= feature_handler(netdev
, enable
);
3680 netdev_err(netdev
, "%s feature %pNF failed, err %d\n",
3681 enable
? "Enable" : "Disable", &feature
, err
);
3685 MLX5E_SET_FEATURE(features
, feature
, enable
);
3689 static int mlx5e_set_features(struct net_device
*netdev
,
3690 netdev_features_t features
)
3692 netdev_features_t oper_features
= netdev
->features
;
3695 #define MLX5E_HANDLE_FEATURE(feature, handler) \
3696 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
3698 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_LRO
, set_feature_lro
);
3699 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER
,
3700 set_feature_cvlan_filter
);
3701 #ifdef CONFIG_MLX5_ESWITCH
3702 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC
, set_feature_tc_num_filters
);
3704 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL
, set_feature_rx_all
);
3705 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS
, set_feature_rx_fcs
);
3706 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX
, set_feature_rx_vlan
);
3707 #ifdef CONFIG_MLX5_EN_ARFS
3708 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE
, set_feature_arfs
);
3712 netdev
->features
= oper_features
;
3719 static netdev_features_t
mlx5e_fix_features(struct net_device
*netdev
,
3720 netdev_features_t features
)
3722 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3723 struct mlx5e_params
*params
;
3725 mutex_lock(&priv
->state_lock
);
3726 params
= &priv
->channels
.params
;
3727 if (!bitmap_empty(priv
->fs
.vlan
.active_svlans
, VLAN_N_VID
)) {
3728 /* HW strips the outer C-tag header, this is a problem
3729 * for S-tag traffic.
3731 features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
3732 if (!params
->vlan_strip_disable
)
3733 netdev_warn(netdev
, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3735 if (!MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
)) {
3736 features
&= ~NETIF_F_LRO
;
3738 netdev_warn(netdev
, "Disabling LRO, not supported in legacy RQ\n");
3741 mutex_unlock(&priv
->state_lock
);
3746 int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
,
3747 change_hw_mtu_cb set_mtu_cb
)
3749 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3750 struct mlx5e_channels new_channels
= {};
3751 struct mlx5e_params
*params
;
3755 mutex_lock(&priv
->state_lock
);
3757 params
= &priv
->channels
.params
;
3759 reset
= !params
->lro_en
;
3760 reset
= reset
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3762 new_channels
.params
= *params
;
3763 new_channels
.params
.sw_mtu
= new_mtu
;
3765 if (params
->xdp_prog
&&
3766 !mlx5e_rx_is_linear_skb(priv
->mdev
, &new_channels
.params
)) {
3767 netdev_err(netdev
, "MTU(%d) > %d is not allowed while XDP enabled\n",
3768 new_mtu
, MLX5E_XDP_MAX_MTU
);
3773 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
3774 bool is_linear
= mlx5e_rx_mpwqe_is_linear_skb(priv
->mdev
, &new_channels
.params
);
3775 u8 ppw_old
= mlx5e_mpwqe_log_pkts_per_wqe(params
);
3776 u8 ppw_new
= mlx5e_mpwqe_log_pkts_per_wqe(&new_channels
.params
);
3778 reset
= reset
&& (is_linear
|| (ppw_old
!= ppw_new
));
3782 params
->sw_mtu
= new_mtu
;
3785 netdev
->mtu
= params
->sw_mtu
;
3789 err
= mlx5e_safe_switch_channels(priv
, &new_channels
, set_mtu_cb
);
3793 netdev
->mtu
= new_channels
.params
.sw_mtu
;
3796 mutex_unlock(&priv
->state_lock
);
3800 static int mlx5e_change_nic_mtu(struct net_device
*netdev
, int new_mtu
)
3802 return mlx5e_change_mtu(netdev
, new_mtu
, mlx5e_set_dev_port_mtu
);
3805 int mlx5e_hwstamp_set(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
3807 struct hwtstamp_config config
;
3810 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
) ||
3811 (mlx5_clock_get_ptp_index(priv
->mdev
) == -1))
3814 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
3817 /* TX HW timestamp */
3818 switch (config
.tx_type
) {
3819 case HWTSTAMP_TX_OFF
:
3820 case HWTSTAMP_TX_ON
:
3826 mutex_lock(&priv
->state_lock
);
3827 /* RX HW timestamp */
3828 switch (config
.rx_filter
) {
3829 case HWTSTAMP_FILTER_NONE
:
3830 /* Reset CQE compression to Admin default */
3831 mlx5e_modify_rx_cqe_compression_locked(priv
, priv
->channels
.params
.rx_cqe_compress_def
);
3833 case HWTSTAMP_FILTER_ALL
:
3834 case HWTSTAMP_FILTER_SOME
:
3835 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
3836 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
3837 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
3838 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
3839 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
3840 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
3841 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
3842 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
3843 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
3844 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
3845 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
3846 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
3847 case HWTSTAMP_FILTER_NTP_ALL
:
3848 /* Disable CQE compression */
3849 netdev_warn(priv
->netdev
, "Disabling cqe compression");
3850 err
= mlx5e_modify_rx_cqe_compression_locked(priv
, false);
3852 netdev_err(priv
->netdev
, "Failed disabling cqe compression err=%d\n", err
);
3853 mutex_unlock(&priv
->state_lock
);
3856 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
3859 mutex_unlock(&priv
->state_lock
);
3863 memcpy(&priv
->tstamp
, &config
, sizeof(config
));
3864 mutex_unlock(&priv
->state_lock
);
3866 return copy_to_user(ifr
->ifr_data
, &config
,
3867 sizeof(config
)) ? -EFAULT
: 0;
3870 int mlx5e_hwstamp_get(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
3872 struct hwtstamp_config
*cfg
= &priv
->tstamp
;
3874 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
3877 return copy_to_user(ifr
->ifr_data
, cfg
, sizeof(*cfg
)) ? -EFAULT
: 0;
3880 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3882 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3886 return mlx5e_hwstamp_set(priv
, ifr
);
3888 return mlx5e_hwstamp_get(priv
, ifr
);
3894 #ifdef CONFIG_MLX5_ESWITCH
3895 int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
3897 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3898 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3900 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
3903 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
3906 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3907 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3909 if (vlan_proto
!= htons(ETH_P_8021Q
))
3910 return -EPROTONOSUPPORT
;
3912 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
3916 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
3918 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3919 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3921 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3924 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
3926 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3927 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3929 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3932 int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
,
3935 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3936 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3938 return mlx5_eswitch_set_vport_rate(mdev
->priv
.eswitch
, vf
+ 1,
3939 max_tx_rate
, min_tx_rate
);
3942 static int mlx5_vport_link2ifla(u8 esw_link
)
3945 case MLX5_VPORT_ADMIN_STATE_DOWN
:
3946 return IFLA_VF_LINK_STATE_DISABLE
;
3947 case MLX5_VPORT_ADMIN_STATE_UP
:
3948 return IFLA_VF_LINK_STATE_ENABLE
;
3950 return IFLA_VF_LINK_STATE_AUTO
;
3953 static int mlx5_ifla_link2vport(u8 ifla_link
)
3955 switch (ifla_link
) {
3956 case IFLA_VF_LINK_STATE_DISABLE
:
3957 return MLX5_VPORT_ADMIN_STATE_DOWN
;
3958 case IFLA_VF_LINK_STATE_ENABLE
:
3959 return MLX5_VPORT_ADMIN_STATE_UP
;
3961 return MLX5_VPORT_ADMIN_STATE_AUTO
;
3964 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
3967 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3968 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3970 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
3971 mlx5_ifla_link2vport(link_state
));
3974 int mlx5e_get_vf_config(struct net_device
*dev
,
3975 int vf
, struct ifla_vf_info
*ivi
)
3977 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3978 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3981 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
3984 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
3988 int mlx5e_get_vf_stats(struct net_device
*dev
,
3989 int vf
, struct ifla_vf_stats
*vf_stats
)
3991 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3992 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3994 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
3999 struct mlx5e_vxlan_work
{
4000 struct work_struct work
;
4001 struct mlx5e_priv
*priv
;
4005 static void mlx5e_vxlan_add_work(struct work_struct
*work
)
4007 struct mlx5e_vxlan_work
*vxlan_work
=
4008 container_of(work
, struct mlx5e_vxlan_work
, work
);
4009 struct mlx5e_priv
*priv
= vxlan_work
->priv
;
4010 u16 port
= vxlan_work
->port
;
4012 mutex_lock(&priv
->state_lock
);
4013 mlx5_vxlan_add_port(priv
->mdev
->vxlan
, port
);
4014 mutex_unlock(&priv
->state_lock
);
4019 static void mlx5e_vxlan_del_work(struct work_struct
*work
)
4021 struct mlx5e_vxlan_work
*vxlan_work
=
4022 container_of(work
, struct mlx5e_vxlan_work
, work
);
4023 struct mlx5e_priv
*priv
= vxlan_work
->priv
;
4024 u16 port
= vxlan_work
->port
;
4026 mutex_lock(&priv
->state_lock
);
4027 mlx5_vxlan_del_port(priv
->mdev
->vxlan
, port
);
4028 mutex_unlock(&priv
->state_lock
);
4032 static void mlx5e_vxlan_queue_work(struct mlx5e_priv
*priv
, u16 port
, int add
)
4034 struct mlx5e_vxlan_work
*vxlan_work
;
4036 vxlan_work
= kmalloc(sizeof(*vxlan_work
), GFP_ATOMIC
);
4041 INIT_WORK(&vxlan_work
->work
, mlx5e_vxlan_add_work
);
4043 INIT_WORK(&vxlan_work
->work
, mlx5e_vxlan_del_work
);
4045 vxlan_work
->priv
= priv
;
4046 vxlan_work
->port
= port
;
4047 queue_work(priv
->wq
, &vxlan_work
->work
);
4050 void mlx5e_add_vxlan_port(struct net_device
*netdev
, struct udp_tunnel_info
*ti
)
4052 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4054 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
4057 if (!mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
4060 mlx5e_vxlan_queue_work(priv
, be16_to_cpu(ti
->port
), 1);
4063 void mlx5e_del_vxlan_port(struct net_device
*netdev
, struct udp_tunnel_info
*ti
)
4065 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4067 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
4070 if (!mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
4073 mlx5e_vxlan_queue_work(priv
, be16_to_cpu(ti
->port
), 0);
4076 static netdev_features_t
mlx5e_tunnel_features_check(struct mlx5e_priv
*priv
,
4077 struct sk_buff
*skb
,
4078 netdev_features_t features
)
4080 unsigned int offset
= 0;
4081 struct udphdr
*udph
;
4085 switch (vlan_get_protocol(skb
)) {
4086 case htons(ETH_P_IP
):
4087 proto
= ip_hdr(skb
)->protocol
;
4089 case htons(ETH_P_IPV6
):
4090 proto
= ipv6_find_hdr(skb
, &offset
, -1, NULL
, NULL
);
4100 udph
= udp_hdr(skb
);
4101 port
= be16_to_cpu(udph
->dest
);
4103 /* Verify if UDP port is being offloaded by HW */
4104 if (mlx5_vxlan_lookup_port(priv
->mdev
->vxlan
, port
))
4109 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4110 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
4113 netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
4114 struct net_device
*netdev
,
4115 netdev_features_t features
)
4117 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4119 features
= vlan_features_check(skb
, features
);
4120 features
= vxlan_features_check(skb
, features
);
4122 #ifdef CONFIG_MLX5_EN_IPSEC
4123 if (mlx5e_ipsec_feature_check(skb
, netdev
, features
))
4127 /* Validate if the tunneled packet is being offloaded by HW */
4128 if (skb
->encapsulation
&&
4129 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
4130 return mlx5e_tunnel_features_check(priv
, skb
, features
);
4135 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
4137 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
4139 bool report_failed
= false;
4144 mutex_lock(&priv
->state_lock
);
4146 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
4149 for (i
= 0; i
< priv
->channels
.num
* priv
->channels
.params
.num_tc
; i
++) {
4150 struct netdev_queue
*dev_queue
=
4151 netdev_get_tx_queue(priv
->netdev
, i
);
4152 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[i
];
4154 if (!netif_xmit_stopped(dev_queue
))
4157 if (mlx5e_tx_reporter_timeout(sq
))
4158 report_failed
= true;
4164 mlx5e_close_locked(priv
->netdev
);
4165 err
= mlx5e_open_locked(priv
->netdev
);
4167 netdev_err(priv
->netdev
,
4168 "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
4172 mutex_unlock(&priv
->state_lock
);
4176 static void mlx5e_tx_timeout(struct net_device
*dev
)
4178 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4180 netdev_err(dev
, "TX timeout detected\n");
4181 queue_work(priv
->wq
, &priv
->tx_timeout_work
);
4184 static int mlx5e_xdp_allowed(struct mlx5e_priv
*priv
, struct bpf_prog
*prog
)
4186 struct net_device
*netdev
= priv
->netdev
;
4187 struct mlx5e_channels new_channels
= {};
4189 if (priv
->channels
.params
.lro_en
) {
4190 netdev_warn(netdev
, "can't set XDP while LRO is on, disable LRO first\n");
4194 if (MLX5_IPSEC_DEV(priv
->mdev
)) {
4195 netdev_warn(netdev
, "can't set XDP with IPSec offload\n");
4199 new_channels
.params
= priv
->channels
.params
;
4200 new_channels
.params
.xdp_prog
= prog
;
4202 if (!mlx5e_rx_is_linear_skb(priv
->mdev
, &new_channels
.params
)) {
4203 netdev_warn(netdev
, "XDP is not allowed with MTU(%d) > %d\n",
4204 new_channels
.params
.sw_mtu
, MLX5E_XDP_MAX_MTU
);
4211 static int mlx5e_xdp_set(struct net_device
*netdev
, struct bpf_prog
*prog
)
4213 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4214 struct bpf_prog
*old_prog
;
4215 bool reset
, was_opened
;
4219 mutex_lock(&priv
->state_lock
);
4222 err
= mlx5e_xdp_allowed(priv
, prog
);
4227 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
4228 /* no need for full reset when exchanging programs */
4229 reset
= (!priv
->channels
.params
.xdp_prog
|| !prog
);
4231 if (was_opened
&& reset
)
4232 mlx5e_close_locked(netdev
);
4233 if (was_opened
&& !reset
) {
4234 /* num_channels is invariant here, so we can take the
4235 * batched reference right upfront.
4237 prog
= bpf_prog_add(prog
, priv
->channels
.num
);
4239 err
= PTR_ERR(prog
);
4244 /* exchange programs, extra prog reference we got from caller
4245 * as long as we don't fail from this point onwards.
4247 old_prog
= xchg(&priv
->channels
.params
.xdp_prog
, prog
);
4249 bpf_prog_put(old_prog
);
4251 if (reset
) /* change RQ type according to priv->xdp_prog */
4252 mlx5e_set_rq_type(priv
->mdev
, &priv
->channels
.params
);
4254 if (was_opened
&& reset
)
4255 mlx5e_open_locked(netdev
);
4257 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
) || reset
)
4260 /* exchanging programs w/o reset, we update ref counts on behalf
4261 * of the channels RQs here.
4263 for (i
= 0; i
< priv
->channels
.num
; i
++) {
4264 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
4266 clear_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
4267 napi_synchronize(&c
->napi
);
4268 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
4270 old_prog
= xchg(&c
->rq
.xdp_prog
, prog
);
4272 set_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
4273 /* napi_schedule in case we have missed anything */
4274 napi_schedule(&c
->napi
);
4277 bpf_prog_put(old_prog
);
4281 mutex_unlock(&priv
->state_lock
);
4285 static u32
mlx5e_xdp_query(struct net_device
*dev
)
4287 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4288 const struct bpf_prog
*xdp_prog
;
4291 mutex_lock(&priv
->state_lock
);
4292 xdp_prog
= priv
->channels
.params
.xdp_prog
;
4294 prog_id
= xdp_prog
->aux
->id
;
4295 mutex_unlock(&priv
->state_lock
);
4300 static int mlx5e_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4302 switch (xdp
->command
) {
4303 case XDP_SETUP_PROG
:
4304 return mlx5e_xdp_set(dev
, xdp
->prog
);
4305 case XDP_QUERY_PROG
:
4306 xdp
->prog_id
= mlx5e_xdp_query(dev
);
4313 #ifdef CONFIG_MLX5_ESWITCH
4314 static int mlx5e_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
4315 struct net_device
*dev
, u32 filter_mask
,
4318 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4319 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4323 err
= mlx5_eswitch_get_vepa(mdev
->priv
.eswitch
, &setting
);
4326 mode
= setting
? BRIDGE_MODE_VEPA
: BRIDGE_MODE_VEB
;
4327 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
,
4329 0, 0, nlflags
, filter_mask
, NULL
);
4332 static int mlx5e_bridge_setlink(struct net_device
*dev
, struct nlmsghdr
*nlh
,
4333 u16 flags
, struct netlink_ext_ack
*extack
)
4335 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4336 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4337 struct nlattr
*attr
, *br_spec
;
4338 u16 mode
= BRIDGE_MODE_UNDEF
;
4342 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
4346 nla_for_each_nested(attr
, br_spec
, rem
) {
4347 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
4350 if (nla_len(attr
) < sizeof(mode
))
4353 mode
= nla_get_u16(attr
);
4354 if (mode
> BRIDGE_MODE_VEPA
)
4360 if (mode
== BRIDGE_MODE_UNDEF
)
4363 setting
= (mode
== BRIDGE_MODE_VEPA
) ? 1 : 0;
4364 return mlx5_eswitch_set_vepa(mdev
->priv
.eswitch
, setting
);
4368 const struct net_device_ops mlx5e_netdev_ops
= {
4369 .ndo_open
= mlx5e_open
,
4370 .ndo_stop
= mlx5e_close
,
4371 .ndo_start_xmit
= mlx5e_xmit
,
4372 .ndo_setup_tc
= mlx5e_setup_tc
,
4373 .ndo_select_queue
= mlx5e_select_queue
,
4374 .ndo_get_stats64
= mlx5e_get_stats
,
4375 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
4376 .ndo_set_mac_address
= mlx5e_set_mac
,
4377 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
4378 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
4379 .ndo_set_features
= mlx5e_set_features
,
4380 .ndo_fix_features
= mlx5e_fix_features
,
4381 .ndo_change_mtu
= mlx5e_change_nic_mtu
,
4382 .ndo_do_ioctl
= mlx5e_ioctl
,
4383 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
4384 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
4385 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
4386 .ndo_features_check
= mlx5e_features_check
,
4387 .ndo_tx_timeout
= mlx5e_tx_timeout
,
4388 .ndo_bpf
= mlx5e_xdp
,
4389 .ndo_xdp_xmit
= mlx5e_xdp_xmit
,
4390 #ifdef CONFIG_MLX5_EN_ARFS
4391 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
4393 #ifdef CONFIG_MLX5_ESWITCH
4394 .ndo_bridge_setlink
= mlx5e_bridge_setlink
,
4395 .ndo_bridge_getlink
= mlx5e_bridge_getlink
,
4397 /* SRIOV E-Switch NDOs */
4398 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
4399 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
4400 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
4401 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
4402 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
4403 .ndo_get_vf_config
= mlx5e_get_vf_config
,
4404 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
4405 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
4409 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
4411 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
4413 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
4414 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
4415 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
4416 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
4417 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
4418 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
4419 MLX5_CAP_FLOWTABLE(mdev
,
4420 flow_table_properties_nic_receive
.max_ft_level
)
4422 mlx5_core_warn(mdev
,
4423 "Not creating net device, some required device capabilities are missing\n");
4426 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
4427 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
4428 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
4429 mlx5_core_warn(mdev
, "CQ moderation is not supported\n");
4434 void mlx5e_build_default_indir_rqt(u32
*indirection_rqt
, int len
,
4439 for (i
= 0; i
< len
; i
++)
4440 indirection_rqt
[i
] = i
% num_channels
;
4443 static bool slow_pci_heuristic(struct mlx5_core_dev
*mdev
)
4448 mlx5e_port_max_linkspeed(mdev
, &link_speed
);
4449 pci_bw
= pcie_bandwidth_available(mdev
->pdev
, NULL
, NULL
, NULL
);
4450 mlx5_core_dbg_once(mdev
, "Max link speed = %d, PCI BW = %d\n",
4451 link_speed
, pci_bw
);
4453 #define MLX5E_SLOW_PCI_RATIO (2)
4455 return link_speed
&& pci_bw
&&
4456 link_speed
> MLX5E_SLOW_PCI_RATIO
* pci_bw
;
4459 static struct net_dim_cq_moder
mlx5e_get_def_tx_moderation(u8 cq_period_mode
)
4461 struct net_dim_cq_moder moder
;
4463 moder
.cq_period_mode
= cq_period_mode
;
4464 moder
.pkts
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
4465 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
4466 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4467 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE
;
4472 static struct net_dim_cq_moder
mlx5e_get_def_rx_moderation(u8 cq_period_mode
)
4474 struct net_dim_cq_moder moder
;
4476 moder
.cq_period_mode
= cq_period_mode
;
4477 moder
.pkts
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
4478 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
4479 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4480 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
4485 static u8
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode
)
4487 return cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
?
4488 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE
:
4489 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
4492 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4494 if (params
->tx_dim_enabled
) {
4495 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
4497 params
->tx_cq_moderation
= net_dim_get_def_tx_moderation(dim_period_mode
);
4499 params
->tx_cq_moderation
= mlx5e_get_def_tx_moderation(cq_period_mode
);
4502 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_TX_CQE_BASED_MODER
,
4503 params
->tx_cq_moderation
.cq_period_mode
==
4504 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4507 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4509 if (params
->rx_dim_enabled
) {
4510 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
4512 params
->rx_cq_moderation
= net_dim_get_def_rx_moderation(dim_period_mode
);
4514 params
->rx_cq_moderation
= mlx5e_get_def_rx_moderation(cq_period_mode
);
4517 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
4518 params
->rx_cq_moderation
.cq_period_mode
==
4519 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4522 static u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
)
4526 /* The supported periods are organized in ascending order */
4527 for (i
= 0; i
< MLX5E_LRO_TIMEOUT_ARR_SIZE
- 1; i
++)
4528 if (MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]) >= wanted_timeout
)
4531 return MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]);
4534 void mlx5e_build_rq_params(struct mlx5_core_dev
*mdev
,
4535 struct mlx5e_params
*params
)
4537 /* Prefer Striding RQ, unless any of the following holds:
4538 * - Striding RQ configuration is not possible/supported.
4539 * - Slow PCI heuristic.
4540 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
4542 if (!slow_pci_heuristic(mdev
) &&
4543 mlx5e_striding_rq_possible(mdev
, params
) &&
4544 (mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
) ||
4545 !mlx5e_rx_is_linear_skb(mdev
, params
)))
4546 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
, true);
4547 mlx5e_set_rq_type(mdev
, params
);
4548 mlx5e_init_rq_type_params(mdev
, params
);
4551 void mlx5e_build_rss_params(struct mlx5e_rss_params
*rss_params
,
4554 enum mlx5e_traffic_types tt
;
4556 rss_params
->hfunc
= ETH_RSS_HASH_XOR
;
4557 netdev_rss_key_fill(rss_params
->toeplitz_hash_key
,
4558 sizeof(rss_params
->toeplitz_hash_key
));
4559 mlx5e_build_default_indir_rqt(rss_params
->indirection_rqt
,
4560 MLX5E_INDIR_RQT_SIZE
, num_channels
);
4561 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++)
4562 rss_params
->rx_hash_fields
[tt
] =
4563 tirc_default_config
[tt
].rx_hash_fields
;
4566 void mlx5e_build_nic_params(struct mlx5_core_dev
*mdev
,
4567 struct mlx5e_rss_params
*rss_params
,
4568 struct mlx5e_params
*params
,
4569 u16 max_channels
, u16 mtu
)
4571 u8 rx_cq_period_mode
;
4573 params
->sw_mtu
= mtu
;
4574 params
->hard_mtu
= MLX5E_ETH_HARD_MTU
;
4575 params
->num_channels
= max_channels
;
4579 params
->log_sq_size
= is_kdump_kernel() ?
4580 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
:
4581 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
4584 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_XDP_TX_MPWQE
,
4585 MLX5_CAP_ETH(mdev
, enhanced_multi_pkt_send_wqe
));
4587 /* set CQE compression */
4588 params
->rx_cqe_compress_def
= false;
4589 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
4590 MLX5_CAP_GEN(mdev
, vport_group_manager
))
4591 params
->rx_cqe_compress_def
= slow_pci_heuristic(mdev
);
4593 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
, params
->rx_cqe_compress_def
);
4594 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
, false);
4597 mlx5e_build_rq_params(mdev
, params
);
4601 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4602 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
4603 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
))
4604 params
->lro_en
= !slow_pci_heuristic(mdev
);
4605 params
->lro_timeout
= mlx5e_choose_lro_timeout(mdev
, MLX5E_DEFAULT_LRO_TIMEOUT
);
4607 /* CQ moderation params */
4608 rx_cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
4609 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
4610 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
4611 params
->rx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
4612 params
->tx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
4613 mlx5e_set_rx_cq_mode_params(params
, rx_cq_period_mode
);
4614 mlx5e_set_tx_cq_mode_params(params
, MLX5_CQ_PERIOD_MODE_START_FROM_EQE
);
4617 params
->tx_min_inline_mode
= mlx5e_params_calculate_tx_min_inline(mdev
);
4620 mlx5e_build_rss_params(rss_params
, params
->num_channels
);
4623 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
4625 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4627 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
4628 if (is_zero_ether_addr(netdev
->dev_addr
) &&
4629 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
4630 eth_hw_addr_random(netdev
);
4631 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
4635 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
4637 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4638 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4642 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
4644 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
4646 #ifdef CONFIG_MLX5_CORE_EN_DCB
4647 if (MLX5_CAP_GEN(mdev
, vport_group_manager
) && MLX5_CAP_GEN(mdev
, qos
))
4648 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
4651 netdev
->watchdog_timeo
= 15 * HZ
;
4653 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
4655 netdev
->vlan_features
|= NETIF_F_SG
;
4656 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
4657 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
4658 netdev
->vlan_features
|= NETIF_F_GRO
;
4659 netdev
->vlan_features
|= NETIF_F_TSO
;
4660 netdev
->vlan_features
|= NETIF_F_TSO6
;
4661 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
4662 netdev
->vlan_features
|= NETIF_F_RXHASH
;
4664 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
4665 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
4667 if (!!MLX5_CAP_ETH(mdev
, lro_cap
) &&
4668 mlx5e_check_fragmented_striding_rq_cap(mdev
))
4669 netdev
->vlan_features
|= NETIF_F_LRO
;
4671 netdev
->hw_features
= netdev
->vlan_features
;
4672 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
4673 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
4674 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
4675 netdev
->hw_features
|= NETIF_F_HW_VLAN_STAG_TX
;
4677 if (mlx5_vxlan_allowed(mdev
->vxlan
) || MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
)) {
4678 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
;
4679 netdev
->hw_enc_features
|= NETIF_F_IPV6_CSUM
;
4680 netdev
->hw_enc_features
|= NETIF_F_TSO
;
4681 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
4682 netdev
->hw_enc_features
|= NETIF_F_GSO_PARTIAL
;
4685 if (mlx5_vxlan_allowed(mdev
->vxlan
)) {
4686 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4687 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4688 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4689 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4690 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4693 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
)) {
4694 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
4695 NETIF_F_GSO_GRE_CSUM
;
4696 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE
|
4697 NETIF_F_GSO_GRE_CSUM
;
4698 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE
|
4699 NETIF_F_GSO_GRE_CSUM
;
4702 netdev
->hw_features
|= NETIF_F_GSO_PARTIAL
;
4703 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_L4
;
4704 netdev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
4705 netdev
->features
|= NETIF_F_GSO_UDP_L4
;
4707 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
4710 netdev
->hw_features
|= NETIF_F_RXALL
;
4712 if (MLX5_CAP_ETH(mdev
, scatter_fcs
))
4713 netdev
->hw_features
|= NETIF_F_RXFCS
;
4715 netdev
->features
= netdev
->hw_features
;
4716 if (!priv
->channels
.params
.lro_en
)
4717 netdev
->features
&= ~NETIF_F_LRO
;
4720 netdev
->features
&= ~NETIF_F_RXALL
;
4722 if (!priv
->channels
.params
.scatter_fcs_en
)
4723 netdev
->features
&= ~NETIF_F_RXFCS
;
4725 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4726 if (FT_CAP(flow_modify_en
) &&
4727 FT_CAP(modify_root
) &&
4728 FT_CAP(identified_miss_table_mode
) &&
4729 FT_CAP(flow_table_modify
)) {
4730 #ifdef CONFIG_MLX5_ESWITCH
4731 netdev
->hw_features
|= NETIF_F_HW_TC
;
4733 #ifdef CONFIG_MLX5_EN_ARFS
4734 netdev
->hw_features
|= NETIF_F_NTUPLE
;
4738 netdev
->features
|= NETIF_F_HIGHDMA
;
4739 netdev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
4741 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4743 mlx5e_set_netdev_dev_addr(netdev
);
4744 mlx5e_ipsec_build_netdev(priv
);
4745 mlx5e_tls_build_netdev(priv
);
4748 void mlx5e_create_q_counters(struct mlx5e_priv
*priv
)
4750 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4753 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
4755 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
4756 priv
->q_counter
= 0;
4759 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->drop_rq_q_counter
);
4761 mlx5_core_warn(mdev
, "alloc drop RQ counter failed, %d\n", err
);
4762 priv
->drop_rq_q_counter
= 0;
4766 void mlx5e_destroy_q_counters(struct mlx5e_priv
*priv
)
4768 if (priv
->q_counter
)
4769 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
4771 if (priv
->drop_rq_q_counter
)
4772 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->drop_rq_q_counter
);
4775 static int mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
4776 struct net_device
*netdev
,
4777 const struct mlx5e_profile
*profile
,
4780 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4781 struct mlx5e_rss_params
*rss
= &priv
->rss_params
;
4784 err
= mlx5e_netdev_init(netdev
, priv
, mdev
, profile
, ppriv
);
4788 mlx5e_build_nic_params(mdev
, rss
, &priv
->channels
.params
,
4789 mlx5e_get_netdev_max_channels(netdev
),
4792 mlx5e_timestamp_init(priv
);
4794 err
= mlx5e_ipsec_init(priv
);
4796 mlx5_core_err(mdev
, "IPSec initialization failed, %d\n", err
);
4797 err
= mlx5e_tls_init(priv
);
4799 mlx5_core_err(mdev
, "TLS initialization failed, %d\n", err
);
4800 mlx5e_build_nic_netdev(netdev
);
4801 mlx5e_build_tc2txq_maps(priv
);
4806 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
4808 mlx5e_tls_cleanup(priv
);
4809 mlx5e_ipsec_cleanup(priv
);
4810 mlx5e_netdev_cleanup(priv
->netdev
, priv
);
4813 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
4815 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4818 mlx5e_create_q_counters(priv
);
4820 err
= mlx5e_open_drop_rq(priv
, &priv
->drop_rq
);
4822 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
4823 goto err_destroy_q_counters
;
4826 err
= mlx5e_create_indirect_rqt(priv
);
4828 goto err_close_drop_rq
;
4830 err
= mlx5e_create_direct_rqts(priv
);
4832 goto err_destroy_indirect_rqts
;
4834 err
= mlx5e_create_indirect_tirs(priv
, true);
4836 goto err_destroy_direct_rqts
;
4838 err
= mlx5e_create_direct_tirs(priv
);
4840 goto err_destroy_indirect_tirs
;
4842 err
= mlx5e_create_flow_steering(priv
);
4844 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
4845 goto err_destroy_direct_tirs
;
4848 err
= mlx5e_tc_nic_init(priv
);
4850 goto err_destroy_flow_steering
;
4854 err_destroy_flow_steering
:
4855 mlx5e_destroy_flow_steering(priv
);
4856 err_destroy_direct_tirs
:
4857 mlx5e_destroy_direct_tirs(priv
);
4858 err_destroy_indirect_tirs
:
4859 mlx5e_destroy_indirect_tirs(priv
, true);
4860 err_destroy_direct_rqts
:
4861 mlx5e_destroy_direct_rqts(priv
);
4862 err_destroy_indirect_rqts
:
4863 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4865 mlx5e_close_drop_rq(&priv
->drop_rq
);
4866 err_destroy_q_counters
:
4867 mlx5e_destroy_q_counters(priv
);
4871 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
4873 mlx5e_tc_nic_cleanup(priv
);
4874 mlx5e_destroy_flow_steering(priv
);
4875 mlx5e_destroy_direct_tirs(priv
);
4876 mlx5e_destroy_indirect_tirs(priv
, true);
4877 mlx5e_destroy_direct_rqts(priv
);
4878 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4879 mlx5e_close_drop_rq(&priv
->drop_rq
);
4880 mlx5e_destroy_q_counters(priv
);
4883 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
4887 err
= mlx5e_create_tises(priv
);
4889 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
4893 #ifdef CONFIG_MLX5_CORE_EN_DCB
4894 mlx5e_dcbnl_initialize(priv
);
4896 mlx5e_tx_reporter_create(priv
);
4900 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
4902 struct net_device
*netdev
= priv
->netdev
;
4903 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4906 mlx5e_init_l2_addr(priv
);
4908 /* Marking the link as currently not needed by the Driver */
4909 if (!netif_running(netdev
))
4910 mlx5_set_port_admin_status(mdev
, MLX5_PORT_DOWN
);
4912 /* MTU range: 68 - hw-specific max */
4913 netdev
->min_mtu
= ETH_MIN_MTU
;
4914 mlx5_query_port_max_mtu(priv
->mdev
, &max_mtu
, 1);
4915 netdev
->max_mtu
= MLX5E_HW2SW_MTU(&priv
->channels
.params
, max_mtu
);
4916 mlx5e_set_dev_port_mtu(priv
);
4918 mlx5_lag_add(mdev
, netdev
);
4920 mlx5e_enable_async_events(priv
);
4921 if (mlx5e_monitor_counter_supported(priv
))
4922 mlx5e_monitor_counter_init(priv
);
4924 if (netdev
->reg_state
!= NETREG_REGISTERED
)
4926 #ifdef CONFIG_MLX5_CORE_EN_DCB
4927 mlx5e_dcbnl_init_app(priv
);
4930 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4933 if (netif_running(netdev
))
4935 netif_device_attach(netdev
);
4939 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
4941 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4943 #ifdef CONFIG_MLX5_CORE_EN_DCB
4944 if (priv
->netdev
->reg_state
== NETREG_REGISTERED
)
4945 mlx5e_dcbnl_delete_app(priv
);
4949 if (netif_running(priv
->netdev
))
4950 mlx5e_close(priv
->netdev
);
4951 netif_device_detach(priv
->netdev
);
4954 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4956 if (mlx5e_monitor_counter_supported(priv
))
4957 mlx5e_monitor_counter_cleanup(priv
);
4959 mlx5e_disable_async_events(priv
);
4960 mlx5_lag_remove(mdev
);
4963 static const struct mlx5e_profile mlx5e_nic_profile
= {
4964 .init
= mlx5e_nic_init
,
4965 .cleanup
= mlx5e_nic_cleanup
,
4966 .init_rx
= mlx5e_init_nic_rx
,
4967 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
4968 .init_tx
= mlx5e_init_nic_tx
,
4969 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
4970 .enable
= mlx5e_nic_enable
,
4971 .disable
= mlx5e_nic_disable
,
4972 .update_stats
= mlx5e_update_ndo_stats
,
4973 .update_carrier
= mlx5e_update_carrier
,
4974 .rx_handlers
.handle_rx_cqe
= mlx5e_handle_rx_cqe
,
4975 .rx_handlers
.handle_rx_cqe_mpwqe
= mlx5e_handle_rx_cqe_mpwrq
,
4976 .max_tc
= MLX5E_MAX_NUM_TC
,
4979 /* mlx5e generic netdev management API (move to en_common.c) */
4981 /* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
4982 int mlx5e_netdev_init(struct net_device
*netdev
,
4983 struct mlx5e_priv
*priv
,
4984 struct mlx5_core_dev
*mdev
,
4985 const struct mlx5e_profile
*profile
,
4990 priv
->netdev
= netdev
;
4991 priv
->profile
= profile
;
4992 priv
->ppriv
= ppriv
;
4993 priv
->msglevel
= MLX5E_MSG_LEVEL
;
4994 priv
->max_opened_tc
= 1;
4996 mutex_init(&priv
->state_lock
);
4997 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
4998 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
4999 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
5000 INIT_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
5002 priv
->wq
= create_singlethread_workqueue("mlx5e");
5007 netif_carrier_off(netdev
);
5009 #ifdef CONFIG_MLX5_EN_ARFS
5010 netdev
->rx_cpu_rmap
= mlx5_eq_table_get_rmap(mdev
);
5016 void mlx5e_netdev_cleanup(struct net_device
*netdev
, struct mlx5e_priv
*priv
)
5018 destroy_workqueue(priv
->wq
);
5021 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
5022 const struct mlx5e_profile
*profile
,
5026 struct net_device
*netdev
;
5029 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
5030 nch
* profile
->max_tc
,
5033 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
5037 err
= profile
->init(mdev
, netdev
, profile
, ppriv
);
5039 mlx5_core_err(mdev
, "failed to init mlx5e profile %d\n", err
);
5040 goto err_free_netdev
;
5046 free_netdev(netdev
);
5051 int mlx5e_attach_netdev(struct mlx5e_priv
*priv
)
5053 const struct mlx5e_profile
*profile
;
5057 profile
= priv
->profile
;
5058 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5060 /* max number of channels may have changed */
5061 max_nch
= mlx5e_get_max_num_channels(priv
->mdev
);
5062 if (priv
->channels
.params
.num_channels
> max_nch
) {
5063 mlx5_core_warn(priv
->mdev
, "MLX5E: Reducing number of channels to %d\n", max_nch
);
5064 priv
->channels
.params
.num_channels
= max_nch
;
5065 mlx5e_build_default_indir_rqt(priv
->rss_params
.indirection_rqt
,
5066 MLX5E_INDIR_RQT_SIZE
, max_nch
);
5069 err
= profile
->init_tx(priv
);
5073 err
= profile
->init_rx(priv
);
5075 goto err_cleanup_tx
;
5077 if (profile
->enable
)
5078 profile
->enable(priv
);
5083 profile
->cleanup_tx(priv
);
5089 void mlx5e_detach_netdev(struct mlx5e_priv
*priv
)
5091 const struct mlx5e_profile
*profile
= priv
->profile
;
5093 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5095 if (profile
->disable
)
5096 profile
->disable(priv
);
5097 flush_workqueue(priv
->wq
);
5099 profile
->cleanup_rx(priv
);
5100 profile
->cleanup_tx(priv
);
5101 cancel_work_sync(&priv
->update_stats_work
);
5104 void mlx5e_destroy_netdev(struct mlx5e_priv
*priv
)
5106 const struct mlx5e_profile
*profile
= priv
->profile
;
5107 struct net_device
*netdev
= priv
->netdev
;
5109 if (profile
->cleanup
)
5110 profile
->cleanup(priv
);
5111 free_netdev(netdev
);
5114 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
5115 * hardware contexts and to connect it to the current netdev.
5117 static int mlx5e_attach(struct mlx5_core_dev
*mdev
, void *vpriv
)
5119 struct mlx5e_priv
*priv
= vpriv
;
5120 struct net_device
*netdev
= priv
->netdev
;
5123 if (netif_device_present(netdev
))
5126 err
= mlx5e_create_mdev_resources(mdev
);
5130 err
= mlx5e_attach_netdev(priv
);
5132 mlx5e_destroy_mdev_resources(mdev
);
5139 static void mlx5e_detach(struct mlx5_core_dev
*mdev
, void *vpriv
)
5141 struct mlx5e_priv
*priv
= vpriv
;
5142 struct net_device
*netdev
= priv
->netdev
;
5144 if (!netif_device_present(netdev
))
5147 mlx5e_detach_netdev(priv
);
5148 mlx5e_destroy_mdev_resources(mdev
);
5151 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
5153 struct net_device
*netdev
;
5158 err
= mlx5e_check_required_hca_cap(mdev
);
5162 #ifdef CONFIG_MLX5_ESWITCH
5163 if (MLX5_ESWITCH_MANAGER(mdev
) &&
5164 mlx5_eswitch_mode(mdev
->priv
.eswitch
) == SRIOV_OFFLOADS
) {
5165 mlx5e_rep_register_vport_reps(mdev
);
5170 nch
= mlx5e_get_max_num_channels(mdev
);
5171 netdev
= mlx5e_create_netdev(mdev
, &mlx5e_nic_profile
, nch
, NULL
);
5173 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
5177 priv
= netdev_priv(netdev
);
5179 err
= mlx5e_attach(mdev
, priv
);
5181 mlx5_core_err(mdev
, "mlx5e_attach failed, %d\n", err
);
5182 goto err_destroy_netdev
;
5185 err
= register_netdev(netdev
);
5187 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
5191 #ifdef CONFIG_MLX5_CORE_EN_DCB
5192 mlx5e_dcbnl_init_app(priv
);
5197 mlx5e_detach(mdev
, priv
);
5199 mlx5e_destroy_netdev(priv
);
5203 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
5205 struct mlx5e_priv
*priv
;
5207 #ifdef CONFIG_MLX5_ESWITCH
5208 if (MLX5_ESWITCH_MANAGER(mdev
) && vpriv
== mdev
) {
5209 mlx5e_rep_unregister_vport_reps(mdev
);
5214 #ifdef CONFIG_MLX5_CORE_EN_DCB
5215 mlx5e_dcbnl_delete_app(priv
);
5217 unregister_netdev(priv
->netdev
);
5218 mlx5e_detach(mdev
, vpriv
);
5219 mlx5e_destroy_netdev(priv
);
5222 static struct mlx5_interface mlx5e_interface
= {
5224 .remove
= mlx5e_remove
,
5225 .attach
= mlx5e_attach
,
5226 .detach
= mlx5e_detach
,
5227 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
5230 void mlx5e_init(void)
5232 mlx5e_ipsec_build_inverse_table();
5233 mlx5e_build_ptys2ethtool_map();
5234 mlx5_register_interface(&mlx5e_interface
);
5237 void mlx5e_cleanup(void)
5239 mlx5_unregister_interface(&mlx5e_interface
);