2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
38 #include <net/page_pool.h>
43 #include "en_accel/ipsec.h"
44 #include "en_accel/ipsec_rxtx.h"
45 #include "en_accel/tls.h"
46 #include "accel/ipsec.h"
47 #include "accel/tls.h"
48 #include "lib/vxlan.h"
49 #include "lib/clock.h"
53 struct mlx5e_rq_param
{
54 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
55 struct mlx5_wq_param wq
;
56 struct mlx5e_rq_frags_info frags_info
;
59 struct mlx5e_sq_param
{
60 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
61 struct mlx5_wq_param wq
;
64 struct mlx5e_cq_param
{
65 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
66 struct mlx5_wq_param wq
;
71 struct mlx5e_channel_param
{
72 struct mlx5e_rq_param rq
;
73 struct mlx5e_sq_param sq
;
74 struct mlx5e_sq_param xdp_sq
;
75 struct mlx5e_sq_param icosq
;
76 struct mlx5e_cq_param rx_cq
;
77 struct mlx5e_cq_param tx_cq
;
78 struct mlx5e_cq_param icosq_cq
;
81 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
83 bool striding_rq_umr
= MLX5_CAP_GEN(mdev
, striding_rq
) &&
84 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
85 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
86 u16 max_wqe_sz_cap
= MLX5_CAP_GEN(mdev
, max_wqe_sz_sq
);
87 bool inline_umr
= MLX5E_UMR_WQE_INLINE_SZ
<= max_wqe_sz_cap
;
92 mlx5_core_warn(mdev
, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
93 (int)MLX5E_UMR_WQE_INLINE_SZ
, max_wqe_sz_cap
);
99 static u32
mlx5e_rx_get_linear_frag_sz(struct mlx5e_params
*params
)
101 u16 hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
102 u16 linear_rq_headroom
= params
->xdp_prog
?
103 XDP_PACKET_HEADROOM
: MLX5_RX_HEADROOM
;
106 linear_rq_headroom
+= NET_IP_ALIGN
;
108 frag_sz
= MLX5_SKB_FRAG_SZ(linear_rq_headroom
+ hw_mtu
);
110 if (params
->xdp_prog
&& frag_sz
< PAGE_SIZE
)
116 static u8
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params
*params
)
118 u32 linear_frag_sz
= mlx5e_rx_get_linear_frag_sz(params
);
120 return MLX5_MPWRQ_LOG_WQE_SZ
- order_base_2(linear_frag_sz
);
123 static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev
*mdev
,
124 struct mlx5e_params
*params
)
126 u32 frag_sz
= mlx5e_rx_get_linear_frag_sz(params
);
128 return !params
->lro_en
&& frag_sz
<= PAGE_SIZE
;
131 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev
*mdev
,
132 struct mlx5e_params
*params
)
134 u32 frag_sz
= mlx5e_rx_get_linear_frag_sz(params
);
135 s8 signed_log_num_strides_param
;
138 if (!mlx5e_rx_is_linear_skb(mdev
, params
))
141 if (MLX5_CAP_GEN(mdev
, ext_stride_num_range
))
144 log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
- order_base_2(frag_sz
);
145 signed_log_num_strides_param
=
146 (s8
)log_num_strides
- MLX5_MPWQE_LOG_NUM_STRIDES_BASE
;
148 return signed_log_num_strides_param
>= 0;
151 static u8
mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params
*params
)
153 if (params
->log_rq_mtu_frames
<
154 mlx5e_mpwqe_log_pkts_per_wqe(params
) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
)
155 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
;
157 return params
->log_rq_mtu_frames
- mlx5e_mpwqe_log_pkts_per_wqe(params
);
160 static u8
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev
*mdev
,
161 struct mlx5e_params
*params
)
163 if (mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
))
164 return order_base_2(mlx5e_rx_get_linear_frag_sz(params
));
166 return MLX5E_MPWQE_STRIDE_SZ(mdev
,
167 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
170 static u8
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev
*mdev
,
171 struct mlx5e_params
*params
)
173 return MLX5_MPWRQ_LOG_WQE_SZ
-
174 mlx5e_mpwqe_get_log_stride_size(mdev
, params
);
177 static u16
mlx5e_get_rq_headroom(struct mlx5_core_dev
*mdev
,
178 struct mlx5e_params
*params
)
180 u16 linear_rq_headroom
= params
->xdp_prog
?
181 XDP_PACKET_HEADROOM
: MLX5_RX_HEADROOM
;
184 linear_rq_headroom
+= NET_IP_ALIGN
;
186 is_linear_skb
= (params
->rq_wq_type
== MLX5_WQ_TYPE_CYCLIC
) ?
187 mlx5e_rx_is_linear_skb(mdev
, params
) :
188 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
);
190 return is_linear_skb
? linear_rq_headroom
: 0;
193 void mlx5e_init_rq_type_params(struct mlx5_core_dev
*mdev
,
194 struct mlx5e_params
*params
)
196 params
->lro_wqe_sz
= MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
197 params
->log_rq_mtu_frames
= is_kdump_kernel() ?
198 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
:
199 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
201 mlx5_core_info(mdev
, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
202 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
203 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
?
204 BIT(mlx5e_mpwqe_get_log_rq_size(params
)) :
205 BIT(params
->log_rq_mtu_frames
),
206 BIT(mlx5e_mpwqe_get_log_stride_size(mdev
, params
)),
207 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
210 bool mlx5e_striding_rq_possible(struct mlx5_core_dev
*mdev
,
211 struct mlx5e_params
*params
)
213 return mlx5e_check_fragmented_striding_rq_cap(mdev
) &&
214 !MLX5_IPSEC_DEV(mdev
) &&
215 !(params
->xdp_prog
&& !mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
));
218 void mlx5e_set_rq_type(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
220 params
->rq_wq_type
= mlx5e_striding_rq_possible(mdev
, params
) &&
221 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
) ?
222 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
226 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
228 struct mlx5_core_dev
*mdev
= priv
->mdev
;
231 port_state
= mlx5_query_vport_state(mdev
,
232 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT
,
235 if (port_state
== VPORT_STATE_UP
) {
236 netdev_info(priv
->netdev
, "Link up\n");
237 netif_carrier_on(priv
->netdev
);
239 netdev_info(priv
->netdev
, "Link down\n");
240 netif_carrier_off(priv
->netdev
);
244 static void mlx5e_update_carrier_work(struct work_struct
*work
)
246 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
247 update_carrier_work
);
249 mutex_lock(&priv
->state_lock
);
250 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
251 if (priv
->profile
->update_carrier
)
252 priv
->profile
->update_carrier(priv
);
253 mutex_unlock(&priv
->state_lock
);
256 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
260 for (i
= mlx5e_num_stats_grps
- 1; i
>= 0; i
--)
261 if (mlx5e_stats_grps
[i
].update_stats
)
262 mlx5e_stats_grps
[i
].update_stats(priv
);
265 static void mlx5e_update_ndo_stats(struct mlx5e_priv
*priv
)
269 for (i
= mlx5e_num_stats_grps
- 1; i
>= 0; i
--)
270 if (mlx5e_stats_grps
[i
].update_stats_mask
&
271 MLX5E_NDO_UPDATE_STATS
)
272 mlx5e_stats_grps
[i
].update_stats(priv
);
275 static void mlx5e_update_stats_work(struct work_struct
*work
)
277 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
280 mutex_lock(&priv
->state_lock
);
281 priv
->profile
->update_stats(priv
);
282 mutex_unlock(&priv
->state_lock
);
285 void mlx5e_queue_update_stats(struct mlx5e_priv
*priv
)
287 if (!priv
->profile
->update_stats
)
290 if (unlikely(test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
)))
293 queue_work(priv
->wq
, &priv
->update_stats_work
);
296 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
297 enum mlx5_dev_event event
, unsigned long param
)
299 struct mlx5e_priv
*priv
= vpriv
;
301 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
))
305 case MLX5_DEV_EVENT_PORT_UP
:
306 case MLX5_DEV_EVENT_PORT_DOWN
:
307 queue_work(priv
->wq
, &priv
->update_carrier_work
);
314 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
316 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
319 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
321 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
322 synchronize_irq(pci_irq_vector(priv
->mdev
->pdev
, MLX5_EQ_VEC_ASYNC
));
325 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq
*rq
,
326 struct mlx5e_icosq
*sq
,
327 struct mlx5e_umr_wqe
*wqe
)
329 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
330 struct mlx5_wqe_umr_ctrl_seg
*ucseg
= &wqe
->uctrl
;
331 u8 ds_cnt
= DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ
, MLX5_SEND_WQE_DS
);
333 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
) |
335 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
336 cseg
->imm
= rq
->mkey_be
;
338 ucseg
->flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
| MLX5_UMR_INLINE
;
339 ucseg
->xlt_octowords
=
340 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE
));
341 ucseg
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
344 static u32
mlx5e_rqwq_get_size(struct mlx5e_rq
*rq
)
346 switch (rq
->wq_type
) {
347 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
348 return mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
350 return mlx5_wq_cyc_get_size(&rq
->wqe
.wq
);
354 static u32
mlx5e_rqwq_get_cur_sz(struct mlx5e_rq
*rq
)
356 switch (rq
->wq_type
) {
357 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
358 return rq
->mpwqe
.wq
.cur_sz
;
360 return rq
->wqe
.wq
.cur_sz
;
364 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq
*rq
,
365 struct mlx5e_channel
*c
)
367 int wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
369 rq
->mpwqe
.info
= kvzalloc_node(array_size(wq_sz
,
370 sizeof(*rq
->mpwqe
.info
)),
371 GFP_KERNEL
, cpu_to_node(c
->cpu
));
375 mlx5e_build_umr_wqe(rq
, &c
->icosq
, &rq
->mpwqe
.umr_wqe
);
380 static int mlx5e_create_umr_mkey(struct mlx5_core_dev
*mdev
,
381 u64 npages
, u8 page_shift
,
382 struct mlx5_core_mkey
*umr_mkey
)
384 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
389 in
= kvzalloc(inlen
, GFP_KERNEL
);
393 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
395 MLX5_SET(mkc
, mkc
, free
, 1);
396 MLX5_SET(mkc
, mkc
, umr_en
, 1);
397 MLX5_SET(mkc
, mkc
, lw
, 1);
398 MLX5_SET(mkc
, mkc
, lr
, 1);
399 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_MTT
);
401 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
402 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.pdn
);
403 MLX5_SET64(mkc
, mkc
, len
, npages
<< page_shift
);
404 MLX5_SET(mkc
, mkc
, translations_octword_size
,
405 MLX5_MTT_OCTW(npages
));
406 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
408 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
414 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev
*mdev
, struct mlx5e_rq
*rq
)
416 u64 num_mtts
= MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
));
418 return mlx5e_create_umr_mkey(mdev
, num_mtts
, PAGE_SHIFT
, &rq
->umr_mkey
);
421 static inline u64
mlx5e_get_mpwqe_offset(struct mlx5e_rq
*rq
, u16 wqe_ix
)
423 return (wqe_ix
<< MLX5E_LOG_ALIGNED_MPWQE_PPW
) << PAGE_SHIFT
;
426 static void mlx5e_init_frags_partition(struct mlx5e_rq
*rq
)
428 struct mlx5e_wqe_frag_info next_frag
, *prev
;
431 next_frag
.di
= &rq
->wqe
.di
[0];
432 next_frag
.offset
= 0;
435 for (i
= 0; i
< mlx5_wq_cyc_get_size(&rq
->wqe
.wq
); i
++) {
436 struct mlx5e_rq_frag_info
*frag_info
= &rq
->wqe
.info
.arr
[0];
437 struct mlx5e_wqe_frag_info
*frag
=
438 &rq
->wqe
.frags
[i
<< rq
->wqe
.info
.log_num_frags
];
441 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++, frag
++) {
442 if (next_frag
.offset
+ frag_info
[f
].frag_stride
> PAGE_SIZE
) {
444 next_frag
.offset
= 0;
446 prev
->last_in_page
= true;
451 next_frag
.offset
+= frag_info
[f
].frag_stride
;
457 prev
->last_in_page
= true;
460 static int mlx5e_init_di_list(struct mlx5e_rq
*rq
,
461 struct mlx5e_params
*params
,
464 int len
= wq_sz
<< rq
->wqe
.info
.log_num_frags
;
466 rq
->wqe
.di
= kvzalloc_node(array_size(len
, sizeof(*rq
->wqe
.di
)),
467 GFP_KERNEL
, cpu_to_node(cpu
));
471 mlx5e_init_frags_partition(rq
);
476 static void mlx5e_free_di_list(struct mlx5e_rq
*rq
)
481 static int mlx5e_alloc_rq(struct mlx5e_channel
*c
,
482 struct mlx5e_params
*params
,
483 struct mlx5e_rq_param
*rqp
,
486 struct page_pool_params pp_params
= { 0 };
487 struct mlx5_core_dev
*mdev
= c
->mdev
;
488 void *rqc
= rqp
->rqc
;
489 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
495 rqp
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
497 rq
->wq_type
= params
->rq_wq_type
;
499 rq
->netdev
= c
->netdev
;
500 rq
->tstamp
= c
->tstamp
;
501 rq
->clock
= &mdev
->clock
;
505 rq
->stats
= &c
->priv
->channel_stats
[c
->ix
].rq
;
507 rq
->xdp_prog
= params
->xdp_prog
? bpf_prog_inc(params
->xdp_prog
) : NULL
;
508 if (IS_ERR(rq
->xdp_prog
)) {
509 err
= PTR_ERR(rq
->xdp_prog
);
511 goto err_rq_wq_destroy
;
514 err
= xdp_rxq_info_reg(&rq
->xdp_rxq
, rq
->netdev
, rq
->ix
);
516 goto err_rq_wq_destroy
;
518 rq
->buff
.map_dir
= rq
->xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
519 rq
->buff
.headroom
= mlx5e_get_rq_headroom(mdev
, params
);
520 pool_size
= 1 << params
->log_rq_mtu_frames
;
522 switch (rq
->wq_type
) {
523 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
524 err
= mlx5_wq_ll_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->mpwqe
.wq
,
529 rq
->mpwqe
.wq
.db
= &rq
->mpwqe
.wq
.db
[MLX5_RCV_DBR
];
531 wq_sz
= mlx5_wq_ll_get_size(&rq
->mpwqe
.wq
);
533 pool_size
= MLX5_MPWRQ_PAGES_PER_WQE
<< mlx5e_mpwqe_get_log_rq_size(params
);
535 rq
->post_wqes
= mlx5e_post_rx_mpwqes
;
536 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
538 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe_mpwqe
;
539 #ifdef CONFIG_MLX5_EN_IPSEC
540 if (MLX5_IPSEC_DEV(mdev
)) {
542 netdev_err(c
->netdev
, "MPWQE RQ with IPSec offload not supported\n");
543 goto err_rq_wq_destroy
;
546 if (!rq
->handle_rx_cqe
) {
548 netdev_err(c
->netdev
, "RX handler of MPWQE RQ is not set, err %d\n", err
);
549 goto err_rq_wq_destroy
;
552 rq
->mpwqe
.skb_from_cqe_mpwrq
=
553 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
) ?
554 mlx5e_skb_from_cqe_mpwrq_linear
:
555 mlx5e_skb_from_cqe_mpwrq_nonlinear
;
556 rq
->mpwqe
.log_stride_sz
= mlx5e_mpwqe_get_log_stride_size(mdev
, params
);
557 rq
->mpwqe
.num_strides
= BIT(mlx5e_mpwqe_get_log_num_strides(mdev
, params
));
559 err
= mlx5e_create_rq_umr_mkey(mdev
, rq
);
561 goto err_rq_wq_destroy
;
562 rq
->mkey_be
= cpu_to_be32(rq
->umr_mkey
.key
);
564 err
= mlx5e_rq_alloc_mpwqe_info(rq
, c
);
568 default: /* MLX5_WQ_TYPE_CYCLIC */
569 err
= mlx5_wq_cyc_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->wqe
.wq
,
574 rq
->wqe
.wq
.db
= &rq
->wqe
.wq
.db
[MLX5_RCV_DBR
];
576 wq_sz
= mlx5_wq_cyc_get_size(&rq
->wqe
.wq
);
578 rq
->wqe
.info
= rqp
->frags_info
;
580 kvzalloc_node(array_size(sizeof(*rq
->wqe
.frags
),
581 (wq_sz
<< rq
->wqe
.info
.log_num_frags
)),
582 GFP_KERNEL
, cpu_to_node(c
->cpu
));
583 if (!rq
->wqe
.frags
) {
588 err
= mlx5e_init_di_list(rq
, params
, wq_sz
, c
->cpu
);
591 rq
->post_wqes
= mlx5e_post_rx_wqes
;
592 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
594 #ifdef CONFIG_MLX5_EN_IPSEC
596 rq
->handle_rx_cqe
= mlx5e_ipsec_handle_rx_cqe
;
599 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe
;
600 if (!rq
->handle_rx_cqe
) {
602 netdev_err(c
->netdev
, "RX handler of RQ is not set, err %d\n", err
);
606 rq
->wqe
.skb_from_cqe
= mlx5e_rx_is_linear_skb(mdev
, params
) ?
607 mlx5e_skb_from_cqe_linear
:
608 mlx5e_skb_from_cqe_nonlinear
;
609 rq
->mkey_be
= c
->mkey_be
;
612 /* Create a page_pool and register it with rxq */
614 pp_params
.flags
= 0; /* No-internal DMA mapping in page_pool */
615 pp_params
.pool_size
= pool_size
;
616 pp_params
.nid
= cpu_to_node(c
->cpu
);
617 pp_params
.dev
= c
->pdev
;
618 pp_params
.dma_dir
= rq
->buff
.map_dir
;
620 /* page_pool can be used even when there is no rq->xdp_prog,
621 * given page_pool does not handle DMA mapping there is no
622 * required state to clear. And page_pool gracefully handle
625 rq
->page_pool
= page_pool_create(&pp_params
);
626 if (IS_ERR(rq
->page_pool
)) {
627 err
= PTR_ERR(rq
->page_pool
);
628 rq
->page_pool
= NULL
;
631 err
= xdp_rxq_info_reg_mem_model(&rq
->xdp_rxq
,
632 MEM_TYPE_PAGE_POOL
, rq
->page_pool
);
636 for (i
= 0; i
< wq_sz
; i
++) {
637 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
638 struct mlx5e_rx_wqe_ll
*wqe
=
639 mlx5_wq_ll_get_wqe(&rq
->mpwqe
.wq
, i
);
641 rq
->mpwqe
.num_strides
<< rq
->mpwqe
.log_stride_sz
;
642 u64 dma_offset
= mlx5e_get_mpwqe_offset(rq
, i
);
644 wqe
->data
[0].addr
= cpu_to_be64(dma_offset
+ rq
->buff
.headroom
);
645 wqe
->data
[0].byte_count
= cpu_to_be32(byte_count
);
646 wqe
->data
[0].lkey
= rq
->mkey_be
;
648 struct mlx5e_rx_wqe_cyc
*wqe
=
649 mlx5_wq_cyc_get_wqe(&rq
->wqe
.wq
, i
);
652 for (f
= 0; f
< rq
->wqe
.info
.num_frags
; f
++) {
653 u32 frag_size
= rq
->wqe
.info
.arr
[f
].frag_size
|
654 MLX5_HW_START_PADDING
;
656 wqe
->data
[f
].byte_count
= cpu_to_be32(frag_size
);
657 wqe
->data
[f
].lkey
= rq
->mkey_be
;
659 /* check if num_frags is not a pow of two */
660 if (rq
->wqe
.info
.num_frags
< (1 << rq
->wqe
.info
.log_num_frags
)) {
661 wqe
->data
[f
].byte_count
= 0;
662 wqe
->data
[f
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
663 wqe
->data
[f
].addr
= 0;
668 INIT_WORK(&rq
->dim
.work
, mlx5e_rx_dim_work
);
670 switch (params
->rx_cq_moderation
.cq_period_mode
) {
671 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
672 rq
->dim
.mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE
;
674 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE
:
676 rq
->dim
.mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
679 rq
->page_cache
.head
= 0;
680 rq
->page_cache
.tail
= 0;
685 switch (rq
->wq_type
) {
686 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
687 kvfree(rq
->mpwqe
.info
);
688 mlx5_core_destroy_mkey(mdev
, &rq
->umr_mkey
);
690 default: /* MLX5_WQ_TYPE_CYCLIC */
691 kvfree(rq
->wqe
.frags
);
692 mlx5e_free_di_list(rq
);
697 bpf_prog_put(rq
->xdp_prog
);
698 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
700 page_pool_destroy(rq
->page_pool
);
701 mlx5_wq_destroy(&rq
->wq_ctrl
);
706 static void mlx5e_free_rq(struct mlx5e_rq
*rq
)
711 bpf_prog_put(rq
->xdp_prog
);
713 xdp_rxq_info_unreg(&rq
->xdp_rxq
);
715 page_pool_destroy(rq
->page_pool
);
717 switch (rq
->wq_type
) {
718 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
719 kvfree(rq
->mpwqe
.info
);
720 mlx5_core_destroy_mkey(rq
->mdev
, &rq
->umr_mkey
);
722 default: /* MLX5_WQ_TYPE_CYCLIC */
723 kvfree(rq
->wqe
.frags
);
724 mlx5e_free_di_list(rq
);
727 for (i
= rq
->page_cache
.head
; i
!= rq
->page_cache
.tail
;
728 i
= (i
+ 1) & (MLX5E_CACHE_SIZE
- 1)) {
729 struct mlx5e_dma_info
*dma_info
= &rq
->page_cache
.page_cache
[i
];
731 mlx5e_page_release(rq
, dma_info
, false);
733 mlx5_wq_destroy(&rq
->wq_ctrl
);
736 static int mlx5e_create_rq(struct mlx5e_rq
*rq
,
737 struct mlx5e_rq_param
*param
)
739 struct mlx5_core_dev
*mdev
= rq
->mdev
;
747 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
748 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
749 in
= kvzalloc(inlen
, GFP_KERNEL
);
753 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
754 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
756 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
758 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
759 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
760 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
761 MLX5_ADAPTER_PAGE_SHIFT
);
762 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
764 mlx5_fill_page_frag_array(&rq
->wq_ctrl
.buf
,
765 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
767 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
774 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
,
777 struct mlx5_core_dev
*mdev
= rq
->mdev
;
784 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
785 in
= kvzalloc(inlen
, GFP_KERNEL
);
789 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
791 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
792 MLX5_SET(rqc
, rqc
, state
, next_state
);
794 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
801 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq
*rq
, bool enable
)
803 struct mlx5e_channel
*c
= rq
->channel
;
804 struct mlx5e_priv
*priv
= c
->priv
;
805 struct mlx5_core_dev
*mdev
= priv
->mdev
;
812 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
813 in
= kvzalloc(inlen
, GFP_KERNEL
);
817 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
819 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
820 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
821 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS
);
822 MLX5_SET(rqc
, rqc
, scatter_fcs
, enable
);
823 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
825 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
832 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
834 struct mlx5e_channel
*c
= rq
->channel
;
835 struct mlx5_core_dev
*mdev
= c
->mdev
;
841 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
842 in
= kvzalloc(inlen
, GFP_KERNEL
);
846 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
848 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
849 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
850 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
851 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
852 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
854 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
861 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
863 mlx5_core_destroy_rq(rq
->mdev
, rq
->rqn
);
866 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
, int wait_time
)
868 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(wait_time
);
869 struct mlx5e_channel
*c
= rq
->channel
;
871 u16 min_wqes
= mlx5_min_rx_wqes(rq
->wq_type
, mlx5e_rqwq_get_size(rq
));
874 if (mlx5e_rqwq_get_cur_sz(rq
) >= min_wqes
)
878 } while (time_before(jiffies
, exp_time
));
880 netdev_warn(c
->netdev
, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
881 c
->ix
, rq
->rqn
, mlx5e_rqwq_get_cur_sz(rq
), min_wqes
);
886 static void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
891 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
892 struct mlx5_wq_ll
*wq
= &rq
->mpwqe
.wq
;
894 /* UMR WQE (if in progress) is always at wq->head */
895 if (rq
->mpwqe
.umr_in_progress
)
896 rq
->dealloc_wqe(rq
, wq
->head
);
898 while (!mlx5_wq_ll_is_empty(wq
)) {
899 struct mlx5e_rx_wqe_ll
*wqe
;
901 wqe_ix_be
= *wq
->tail_next
;
902 wqe_ix
= be16_to_cpu(wqe_ix_be
);
903 wqe
= mlx5_wq_ll_get_wqe(wq
, wqe_ix
);
904 rq
->dealloc_wqe(rq
, wqe_ix
);
905 mlx5_wq_ll_pop(wq
, wqe_ix_be
,
906 &wqe
->next
.next_wqe_index
);
909 struct mlx5_wq_cyc
*wq
= &rq
->wqe
.wq
;
911 while (!mlx5_wq_cyc_is_empty(wq
)) {
912 wqe_ix
= mlx5_wq_cyc_get_tail(wq
);
913 rq
->dealloc_wqe(rq
, wqe_ix
);
920 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
921 struct mlx5e_params
*params
,
922 struct mlx5e_rq_param
*param
,
927 err
= mlx5e_alloc_rq(c
, params
, param
, rq
);
931 err
= mlx5e_create_rq(rq
, param
);
935 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
939 if (params
->rx_dim_enabled
)
940 __set_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
);
942 if (params
->pflags
& MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
)
943 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE
, &c
->rq
.state
);
948 mlx5e_destroy_rq(rq
);
955 static void mlx5e_activate_rq(struct mlx5e_rq
*rq
)
957 struct mlx5e_icosq
*sq
= &rq
->channel
->icosq
;
958 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
959 struct mlx5e_tx_wqe
*nopwqe
;
961 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
963 set_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
964 sq
->db
.ico_wqe
[pi
].opcode
= MLX5_OPCODE_NOP
;
965 nopwqe
= mlx5e_post_nop(wq
, sq
->sqn
, &sq
->pc
);
966 mlx5e_notify_hw(wq
, sq
->pc
, sq
->uar_map
, &nopwqe
->ctrl
);
969 static void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
)
971 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
972 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
975 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
977 cancel_work_sync(&rq
->dim
.work
);
978 mlx5e_destroy_rq(rq
);
979 mlx5e_free_rx_descs(rq
);
983 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq
*sq
)
988 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq
*sq
, int numa
)
990 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
992 sq
->db
.xdpi
= kvzalloc_node(array_size(wq_sz
, sizeof(*sq
->db
.xdpi
)),
995 mlx5e_free_xdpsq_db(sq
);
1002 static int mlx5e_alloc_xdpsq(struct mlx5e_channel
*c
,
1003 struct mlx5e_params
*params
,
1004 struct mlx5e_sq_param
*param
,
1005 struct mlx5e_xdpsq
*sq
,
1008 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1009 struct mlx5_core_dev
*mdev
= c
->mdev
;
1010 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1014 sq
->mkey_be
= c
->mkey_be
;
1016 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1017 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1018 sq
->hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
1019 sq
->stats
= is_redirect
?
1020 &c
->priv
->channel_stats
[c
->ix
].xdpsq
:
1021 &c
->priv
->channel_stats
[c
->ix
].rq_xdpsq
;
1023 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1024 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1027 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1029 err
= mlx5e_alloc_xdpsq_db(sq
, cpu_to_node(c
->cpu
));
1031 goto err_sq_wq_destroy
;
1036 mlx5_wq_destroy(&sq
->wq_ctrl
);
1041 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq
*sq
)
1043 mlx5e_free_xdpsq_db(sq
);
1044 mlx5_wq_destroy(&sq
->wq_ctrl
);
1047 static void mlx5e_free_icosq_db(struct mlx5e_icosq
*sq
)
1049 kvfree(sq
->db
.ico_wqe
);
1052 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq
*sq
, int numa
)
1054 u8 wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1056 sq
->db
.ico_wqe
= kvzalloc_node(array_size(wq_sz
,
1057 sizeof(*sq
->db
.ico_wqe
)),
1059 if (!sq
->db
.ico_wqe
)
1065 static int mlx5e_alloc_icosq(struct mlx5e_channel
*c
,
1066 struct mlx5e_sq_param
*param
,
1067 struct mlx5e_icosq
*sq
)
1069 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1070 struct mlx5_core_dev
*mdev
= c
->mdev
;
1071 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1075 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1077 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1078 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1081 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1083 err
= mlx5e_alloc_icosq_db(sq
, cpu_to_node(c
->cpu
));
1085 goto err_sq_wq_destroy
;
1090 mlx5_wq_destroy(&sq
->wq_ctrl
);
1095 static void mlx5e_free_icosq(struct mlx5e_icosq
*sq
)
1097 mlx5e_free_icosq_db(sq
);
1098 mlx5_wq_destroy(&sq
->wq_ctrl
);
1101 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
)
1103 kvfree(sq
->db
.wqe_info
);
1104 kvfree(sq
->db
.dma_fifo
);
1107 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
)
1109 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1110 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1112 sq
->db
.dma_fifo
= kvzalloc_node(array_size(df_sz
,
1113 sizeof(*sq
->db
.dma_fifo
)),
1115 sq
->db
.wqe_info
= kvzalloc_node(array_size(wq_sz
,
1116 sizeof(*sq
->db
.wqe_info
)),
1118 if (!sq
->db
.dma_fifo
|| !sq
->db
.wqe_info
) {
1119 mlx5e_free_txqsq_db(sq
);
1123 sq
->dma_fifo_mask
= df_sz
- 1;
1128 static void mlx5e_sq_recover(struct work_struct
*work
);
1129 static int mlx5e_alloc_txqsq(struct mlx5e_channel
*c
,
1131 struct mlx5e_params
*params
,
1132 struct mlx5e_sq_param
*param
,
1133 struct mlx5e_txqsq
*sq
,
1136 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1137 struct mlx5_core_dev
*mdev
= c
->mdev
;
1138 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1142 sq
->tstamp
= c
->tstamp
;
1143 sq
->clock
= &mdev
->clock
;
1144 sq
->mkey_be
= c
->mkey_be
;
1146 sq
->txq_ix
= txq_ix
;
1147 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1148 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1149 sq
->stats
= &c
->priv
->channel_stats
[c
->ix
].sq
[tc
];
1150 INIT_WORK(&sq
->recover
.recover_work
, mlx5e_sq_recover
);
1151 if (MLX5_IPSEC_DEV(c
->priv
->mdev
))
1152 set_bit(MLX5E_SQ_STATE_IPSEC
, &sq
->state
);
1153 if (mlx5_accel_is_tls_device(c
->priv
->mdev
))
1154 set_bit(MLX5E_SQ_STATE_TLS
, &sq
->state
);
1156 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1157 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, wq
, &sq
->wq_ctrl
);
1160 wq
->db
= &wq
->db
[MLX5_SND_DBR
];
1162 err
= mlx5e_alloc_txqsq_db(sq
, cpu_to_node(c
->cpu
));
1164 goto err_sq_wq_destroy
;
1166 INIT_WORK(&sq
->dim
.work
, mlx5e_tx_dim_work
);
1167 sq
->dim
.mode
= params
->tx_cq_moderation
.cq_period_mode
;
1172 mlx5_wq_destroy(&sq
->wq_ctrl
);
1177 static void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
)
1179 mlx5e_free_txqsq_db(sq
);
1180 mlx5_wq_destroy(&sq
->wq_ctrl
);
1183 struct mlx5e_create_sq_param
{
1184 struct mlx5_wq_ctrl
*wq_ctrl
;
1191 static int mlx5e_create_sq(struct mlx5_core_dev
*mdev
,
1192 struct mlx5e_sq_param
*param
,
1193 struct mlx5e_create_sq_param
*csp
,
1202 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
1203 sizeof(u64
) * csp
->wq_ctrl
->buf
.npages
;
1204 in
= kvzalloc(inlen
, GFP_KERNEL
);
1208 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1209 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1211 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
1212 MLX5_SET(sqc
, sqc
, tis_lst_sz
, csp
->tis_lst_sz
);
1213 MLX5_SET(sqc
, sqc
, tis_num_0
, csp
->tisn
);
1214 MLX5_SET(sqc
, sqc
, cqn
, csp
->cqn
);
1216 if (MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
1217 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, csp
->min_inline_mode
);
1219 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1220 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1222 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1223 MLX5_SET(wq
, wq
, uar_page
, mdev
->mlx5e_res
.bfreg
.index
);
1224 MLX5_SET(wq
, wq
, log_wq_pg_sz
, csp
->wq_ctrl
->buf
.page_shift
-
1225 MLX5_ADAPTER_PAGE_SHIFT
);
1226 MLX5_SET64(wq
, wq
, dbr_addr
, csp
->wq_ctrl
->db
.dma
);
1228 mlx5_fill_page_frag_array(&csp
->wq_ctrl
->buf
,
1229 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1231 err
= mlx5_core_create_sq(mdev
, in
, inlen
, sqn
);
1238 struct mlx5e_modify_sq_param
{
1245 static int mlx5e_modify_sq(struct mlx5_core_dev
*mdev
, u32 sqn
,
1246 struct mlx5e_modify_sq_param
*p
)
1253 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
1254 in
= kvzalloc(inlen
, GFP_KERNEL
);
1258 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
1260 MLX5_SET(modify_sq_in
, in
, sq_state
, p
->curr_state
);
1261 MLX5_SET(sqc
, sqc
, state
, p
->next_state
);
1262 if (p
->rl_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1263 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
1264 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, p
->rl_index
);
1267 err
= mlx5_core_modify_sq(mdev
, sqn
, in
, inlen
);
1274 static void mlx5e_destroy_sq(struct mlx5_core_dev
*mdev
, u32 sqn
)
1276 mlx5_core_destroy_sq(mdev
, sqn
);
1279 static int mlx5e_create_sq_rdy(struct mlx5_core_dev
*mdev
,
1280 struct mlx5e_sq_param
*param
,
1281 struct mlx5e_create_sq_param
*csp
,
1284 struct mlx5e_modify_sq_param msp
= {0};
1287 err
= mlx5e_create_sq(mdev
, param
, csp
, sqn
);
1291 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1292 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1293 err
= mlx5e_modify_sq(mdev
, *sqn
, &msp
);
1295 mlx5e_destroy_sq(mdev
, *sqn
);
1300 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1301 struct mlx5e_txqsq
*sq
, u32 rate
);
1303 static int mlx5e_open_txqsq(struct mlx5e_channel
*c
,
1306 struct mlx5e_params
*params
,
1307 struct mlx5e_sq_param
*param
,
1308 struct mlx5e_txqsq
*sq
,
1311 struct mlx5e_create_sq_param csp
= {};
1315 err
= mlx5e_alloc_txqsq(c
, txq_ix
, params
, param
, sq
, tc
);
1321 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1322 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1323 csp
.min_inline_mode
= sq
->min_inline_mode
;
1324 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1326 goto err_free_txqsq
;
1328 tx_rate
= c
->priv
->tx_rates
[sq
->txq_ix
];
1330 mlx5e_set_sq_maxrate(c
->netdev
, sq
, tx_rate
);
1332 if (params
->tx_dim_enabled
)
1333 sq
->state
|= BIT(MLX5E_SQ_STATE_AM
);
1338 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1339 mlx5e_free_txqsq(sq
);
1344 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq
*sq
)
1346 WARN_ONCE(sq
->cc
!= sq
->pc
,
1347 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
1348 sq
->sqn
, sq
->cc
, sq
->pc
);
1350 sq
->dma_fifo_cc
= 0;
1354 static void mlx5e_activate_txqsq(struct mlx5e_txqsq
*sq
)
1356 sq
->txq
= netdev_get_tx_queue(sq
->channel
->netdev
, sq
->txq_ix
);
1357 clear_bit(MLX5E_SQ_STATE_RECOVERING
, &sq
->state
);
1358 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1359 netdev_tx_reset_queue(sq
->txq
);
1360 netif_tx_start_queue(sq
->txq
);
1363 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
1365 __netif_tx_lock_bh(txq
);
1366 netif_tx_stop_queue(txq
);
1367 __netif_tx_unlock_bh(txq
);
1370 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq
*sq
)
1372 struct mlx5e_channel
*c
= sq
->channel
;
1373 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
1375 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1376 /* prevent netif_tx_wake_queue */
1377 napi_synchronize(&c
->napi
);
1379 netif_tx_disable_queue(sq
->txq
);
1381 /* last doorbell out, godspeed .. */
1382 if (mlx5e_wqc_has_room_for(wq
, sq
->cc
, sq
->pc
, 1)) {
1383 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
1384 struct mlx5e_tx_wqe
*nop
;
1386 sq
->db
.wqe_info
[pi
].skb
= NULL
;
1387 nop
= mlx5e_post_nop(wq
, sq
->sqn
, &sq
->pc
);
1388 mlx5e_notify_hw(wq
, sq
->pc
, sq
->uar_map
, &nop
->ctrl
);
1392 static void mlx5e_close_txqsq(struct mlx5e_txqsq
*sq
)
1394 struct mlx5e_channel
*c
= sq
->channel
;
1395 struct mlx5_core_dev
*mdev
= c
->mdev
;
1396 struct mlx5_rate_limit rl
= {0};
1398 mlx5e_destroy_sq(mdev
, sq
->sqn
);
1399 if (sq
->rate_limit
) {
1400 rl
.rate
= sq
->rate_limit
;
1401 mlx5_rl_remove_rate(mdev
, &rl
);
1403 mlx5e_free_txqsq_descs(sq
);
1404 mlx5e_free_txqsq(sq
);
1407 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq
*sq
)
1409 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(2000);
1411 while (time_before(jiffies
, exp_time
)) {
1412 if (sq
->cc
== sq
->pc
)
1418 netdev_err(sq
->channel
->netdev
,
1419 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
1420 sq
->sqn
, sq
->cc
, sq
->pc
);
1425 static int mlx5e_sq_to_ready(struct mlx5e_txqsq
*sq
, int curr_state
)
1427 struct mlx5_core_dev
*mdev
= sq
->channel
->mdev
;
1428 struct net_device
*dev
= sq
->channel
->netdev
;
1429 struct mlx5e_modify_sq_param msp
= {0};
1432 msp
.curr_state
= curr_state
;
1433 msp
.next_state
= MLX5_SQC_STATE_RST
;
1435 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
1437 netdev_err(dev
, "Failed to move sq 0x%x to reset\n", sq
->sqn
);
1441 memset(&msp
, 0, sizeof(msp
));
1442 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1443 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1445 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
1447 netdev_err(dev
, "Failed to move sq 0x%x to ready\n", sq
->sqn
);
1454 static void mlx5e_sq_recover(struct work_struct
*work
)
1456 struct mlx5e_txqsq_recover
*recover
=
1457 container_of(work
, struct mlx5e_txqsq_recover
,
1459 struct mlx5e_txqsq
*sq
= container_of(recover
, struct mlx5e_txqsq
,
1461 struct mlx5_core_dev
*mdev
= sq
->channel
->mdev
;
1462 struct net_device
*dev
= sq
->channel
->netdev
;
1466 err
= mlx5_core_query_sq_state(mdev
, sq
->sqn
, &state
);
1468 netdev_err(dev
, "Failed to query SQ 0x%x state. err = %d\n",
1473 if (state
!= MLX5_RQC_STATE_ERR
) {
1474 netdev_err(dev
, "SQ 0x%x not in ERROR state\n", sq
->sqn
);
1478 netif_tx_disable_queue(sq
->txq
);
1480 if (mlx5e_wait_for_sq_flush(sq
))
1483 /* If the interval between two consecutive recovers per SQ is too
1484 * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
1485 * If we reached this state, there is probably a bug that needs to be
1486 * fixed. let's keep the queue close and let tx timeout cleanup.
1488 if (jiffies_to_msecs(jiffies
- recover
->last_recover
) <
1489 MLX5E_SQ_RECOVER_MIN_INTERVAL
) {
1490 netdev_err(dev
, "Recover SQ 0x%x canceled, too many error CQEs\n",
1495 /* At this point, no new packets will arrive from the stack as TXQ is
1496 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
1497 * pending WQEs. SQ can safely reset the SQ.
1499 if (mlx5e_sq_to_ready(sq
, state
))
1502 mlx5e_reset_txqsq_cc_pc(sq
);
1503 sq
->stats
->recover
++;
1504 recover
->last_recover
= jiffies
;
1505 mlx5e_activate_txqsq(sq
);
1508 static int mlx5e_open_icosq(struct mlx5e_channel
*c
,
1509 struct mlx5e_params
*params
,
1510 struct mlx5e_sq_param
*param
,
1511 struct mlx5e_icosq
*sq
)
1513 struct mlx5e_create_sq_param csp
= {};
1516 err
= mlx5e_alloc_icosq(c
, param
, sq
);
1520 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1521 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1522 csp
.min_inline_mode
= params
->tx_min_inline_mode
;
1523 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1524 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1526 goto err_free_icosq
;
1531 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1532 mlx5e_free_icosq(sq
);
1537 static void mlx5e_close_icosq(struct mlx5e_icosq
*sq
)
1539 struct mlx5e_channel
*c
= sq
->channel
;
1541 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1542 napi_synchronize(&c
->napi
);
1544 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1545 mlx5e_free_icosq(sq
);
1548 static int mlx5e_open_xdpsq(struct mlx5e_channel
*c
,
1549 struct mlx5e_params
*params
,
1550 struct mlx5e_sq_param
*param
,
1551 struct mlx5e_xdpsq
*sq
,
1554 unsigned int ds_cnt
= MLX5E_XDP_TX_DS_COUNT
;
1555 struct mlx5e_create_sq_param csp
= {};
1556 unsigned int inline_hdr_sz
= 0;
1560 err
= mlx5e_alloc_xdpsq(c
, params
, param
, sq
, is_redirect
);
1565 csp
.tisn
= c
->priv
->tisn
[0]; /* tc = 0 */
1566 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1567 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1568 csp
.min_inline_mode
= sq
->min_inline_mode
;
1570 set_bit(MLX5E_SQ_STATE_REDIRECT
, &sq
->state
);
1571 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1572 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1574 goto err_free_xdpsq
;
1576 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
1577 inline_hdr_sz
= MLX5E_XDP_MIN_INLINE
;
1581 /* Pre initialize fixed WQE fields */
1582 for (i
= 0; i
< mlx5_wq_cyc_get_size(&sq
->wq
); i
++) {
1583 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, i
);
1584 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
1585 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
1586 struct mlx5_wqe_data_seg
*dseg
;
1588 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
1589 eseg
->inline_hdr
.sz
= cpu_to_be16(inline_hdr_sz
);
1591 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ (ds_cnt
- 1);
1592 dseg
->lkey
= sq
->mkey_be
;
1598 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1599 mlx5e_free_xdpsq(sq
);
1604 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
)
1606 struct mlx5e_channel
*c
= sq
->channel
;
1608 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1609 napi_synchronize(&c
->napi
);
1611 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1612 mlx5e_free_xdpsq_descs(sq
);
1613 mlx5e_free_xdpsq(sq
);
1616 static int mlx5e_alloc_cq_common(struct mlx5_core_dev
*mdev
,
1617 struct mlx5e_cq_param
*param
,
1618 struct mlx5e_cq
*cq
)
1620 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1626 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1631 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1634 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1635 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1636 *mcq
->set_ci_db
= 0;
1638 mcq
->vector
= param
->eq_ix
;
1639 mcq
->comp
= mlx5e_completion_event
;
1640 mcq
->event
= mlx5e_cq_error_event
;
1643 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
1644 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
1654 static int mlx5e_alloc_cq(struct mlx5e_channel
*c
,
1655 struct mlx5e_cq_param
*param
,
1656 struct mlx5e_cq
*cq
)
1658 struct mlx5_core_dev
*mdev
= c
->priv
->mdev
;
1661 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
1662 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1663 param
->eq_ix
= c
->ix
;
1665 err
= mlx5e_alloc_cq_common(mdev
, param
, cq
);
1667 cq
->napi
= &c
->napi
;
1673 static void mlx5e_free_cq(struct mlx5e_cq
*cq
)
1675 mlx5_wq_destroy(&cq
->wq_ctrl
);
1678 static int mlx5e_create_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
1680 struct mlx5_core_dev
*mdev
= cq
->mdev
;
1681 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1686 unsigned int irqn_not_used
;
1690 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
1691 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
1692 in
= kvzalloc(inlen
, GFP_KERNEL
);
1696 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
1698 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
1700 mlx5_fill_page_frag_array(&cq
->wq_ctrl
.buf
,
1701 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
1703 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
1705 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
1706 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
1707 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
1708 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
1709 MLX5_ADAPTER_PAGE_SHIFT
);
1710 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
1712 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
1724 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
1726 mlx5_core_destroy_cq(cq
->mdev
, &cq
->mcq
);
1729 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
1730 struct net_dim_cq_moder moder
,
1731 struct mlx5e_cq_param
*param
,
1732 struct mlx5e_cq
*cq
)
1734 struct mlx5_core_dev
*mdev
= c
->mdev
;
1737 err
= mlx5e_alloc_cq(c
, param
, cq
);
1741 err
= mlx5e_create_cq(cq
, param
);
1745 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
1746 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
, moder
.usec
, moder
.pkts
);
1755 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
1757 mlx5e_destroy_cq(cq
);
1761 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
1763 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
+ MLX5_EQ_VEC_COMP_BASE
].mask
);
1766 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
1767 struct mlx5e_params
*params
,
1768 struct mlx5e_channel_param
*cparam
)
1773 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1774 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1775 &cparam
->tx_cq
, &c
->sq
[tc
].cq
);
1777 goto err_close_tx_cqs
;
1783 for (tc
--; tc
>= 0; tc
--)
1784 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1789 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1793 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1794 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1797 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1798 struct mlx5e_params
*params
,
1799 struct mlx5e_channel_param
*cparam
)
1801 struct mlx5e_priv
*priv
= c
->priv
;
1802 int err
, tc
, max_nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
1804 for (tc
= 0; tc
< params
->num_tc
; tc
++) {
1805 int txq_ix
= c
->ix
+ tc
* max_nch
;
1807 err
= mlx5e_open_txqsq(c
, c
->priv
->tisn
[tc
], txq_ix
,
1808 params
, &cparam
->sq
, &c
->sq
[tc
], tc
);
1816 for (tc
--; tc
>= 0; tc
--)
1817 mlx5e_close_txqsq(&c
->sq
[tc
]);
1822 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1826 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1827 mlx5e_close_txqsq(&c
->sq
[tc
]);
1830 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1831 struct mlx5e_txqsq
*sq
, u32 rate
)
1833 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1834 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1835 struct mlx5e_modify_sq_param msp
= {0};
1836 struct mlx5_rate_limit rl
= {0};
1840 if (rate
== sq
->rate_limit
)
1844 if (sq
->rate_limit
) {
1845 rl
.rate
= sq
->rate_limit
;
1846 /* remove current rl index to free space to next ones */
1847 mlx5_rl_remove_rate(mdev
, &rl
);
1854 err
= mlx5_rl_add_rate(mdev
, &rl_index
, &rl
);
1856 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1862 msp
.curr_state
= MLX5_SQC_STATE_RDY
;
1863 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1864 msp
.rl_index
= rl_index
;
1865 msp
.rl_update
= true;
1866 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
1868 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1870 /* remove the rate from the table */
1872 mlx5_rl_remove_rate(mdev
, &rl
);
1876 sq
->rate_limit
= rate
;
1880 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1882 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1883 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1884 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[index
];
1887 if (!mlx5_rl_is_supported(mdev
)) {
1888 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1892 /* rate is given in Mb/sec, HW config is in Kb/sec */
1895 /* Check whether rate in valid range, 0 is always valid */
1896 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1897 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1901 mutex_lock(&priv
->state_lock
);
1902 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1903 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1905 priv
->tx_rates
[index
] = rate
;
1906 mutex_unlock(&priv
->state_lock
);
1911 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1912 struct mlx5e_params
*params
,
1913 struct mlx5e_channel_param
*cparam
,
1914 struct mlx5e_channel
**cp
)
1916 struct net_dim_cq_moder icocq_moder
= {0, 0};
1917 struct net_device
*netdev
= priv
->netdev
;
1918 int cpu
= mlx5e_get_cpu(priv
, ix
);
1919 struct mlx5e_channel
*c
;
1924 c
= kvzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1929 c
->mdev
= priv
->mdev
;
1930 c
->tstamp
= &priv
->tstamp
;
1933 c
->pdev
= &priv
->mdev
->pdev
->dev
;
1934 c
->netdev
= priv
->netdev
;
1935 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1936 c
->num_tc
= params
->num_tc
;
1937 c
->xdp
= !!params
->xdp_prog
;
1938 c
->stats
= &priv
->channel_stats
[ix
].ch
;
1940 mlx5_vector2eqn(priv
->mdev
, ix
, &eqn
, &irq
);
1941 c
->irq_desc
= irq_to_desc(irq
);
1943 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1945 err
= mlx5e_open_cq(c
, icocq_moder
, &cparam
->icosq_cq
, &c
->icosq
.cq
);
1949 err
= mlx5e_open_tx_cqs(c
, params
, cparam
);
1951 goto err_close_icosq_cq
;
1953 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
, &cparam
->tx_cq
, &c
->xdpsq
.cq
);
1955 goto err_close_tx_cqs
;
1957 err
= mlx5e_open_cq(c
, params
->rx_cq_moderation
, &cparam
->rx_cq
, &c
->rq
.cq
);
1959 goto err_close_xdp_tx_cqs
;
1961 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1962 err
= c
->xdp
? mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1963 &cparam
->tx_cq
, &c
->rq
.xdpsq
.cq
) : 0;
1965 goto err_close_rx_cq
;
1967 napi_enable(&c
->napi
);
1969 err
= mlx5e_open_icosq(c
, params
, &cparam
->icosq
, &c
->icosq
);
1971 goto err_disable_napi
;
1973 err
= mlx5e_open_sqs(c
, params
, cparam
);
1975 goto err_close_icosq
;
1977 err
= c
->xdp
? mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, &c
->rq
.xdpsq
, false) : 0;
1981 err
= mlx5e_open_rq(c
, params
, &cparam
->rq
, &c
->rq
);
1983 goto err_close_xdp_sq
;
1985 err
= mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, &c
->xdpsq
, true);
1994 mlx5e_close_rq(&c
->rq
);
1998 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
2004 mlx5e_close_icosq(&c
->icosq
);
2007 napi_disable(&c
->napi
);
2009 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
2012 mlx5e_close_cq(&c
->rq
.cq
);
2014 err_close_xdp_tx_cqs
:
2015 mlx5e_close_cq(&c
->xdpsq
.cq
);
2018 mlx5e_close_tx_cqs(c
);
2021 mlx5e_close_cq(&c
->icosq
.cq
);
2024 netif_napi_del(&c
->napi
);
2030 static void mlx5e_activate_channel(struct mlx5e_channel
*c
)
2034 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2035 mlx5e_activate_txqsq(&c
->sq
[tc
]);
2036 mlx5e_activate_rq(&c
->rq
);
2037 netif_set_xps_queue(c
->netdev
, get_cpu_mask(c
->cpu
), c
->ix
);
2040 static void mlx5e_deactivate_channel(struct mlx5e_channel
*c
)
2044 mlx5e_deactivate_rq(&c
->rq
);
2045 for (tc
= 0; tc
< c
->num_tc
; tc
++)
2046 mlx5e_deactivate_txqsq(&c
->sq
[tc
]);
2049 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
2051 mlx5e_close_xdpsq(&c
->xdpsq
);
2052 mlx5e_close_rq(&c
->rq
);
2054 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
2056 mlx5e_close_icosq(&c
->icosq
);
2057 napi_disable(&c
->napi
);
2059 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
2060 mlx5e_close_cq(&c
->rq
.cq
);
2061 mlx5e_close_cq(&c
->xdpsq
.cq
);
2062 mlx5e_close_tx_cqs(c
);
2063 mlx5e_close_cq(&c
->icosq
.cq
);
2064 netif_napi_del(&c
->napi
);
2069 #define DEFAULT_FRAG_SIZE (2048)
2071 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev
*mdev
,
2072 struct mlx5e_params
*params
,
2073 struct mlx5e_rq_frags_info
*info
)
2075 u32 byte_count
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
2076 int frag_size_max
= DEFAULT_FRAG_SIZE
;
2080 #ifdef CONFIG_MLX5_EN_IPSEC
2081 if (MLX5_IPSEC_DEV(mdev
))
2082 byte_count
+= MLX5E_METADATA_ETHER_LEN
;
2085 if (mlx5e_rx_is_linear_skb(mdev
, params
)) {
2088 frag_stride
= mlx5e_rx_get_linear_frag_sz(params
);
2089 frag_stride
= roundup_pow_of_two(frag_stride
);
2091 info
->arr
[0].frag_size
= byte_count
;
2092 info
->arr
[0].frag_stride
= frag_stride
;
2093 info
->num_frags
= 1;
2094 info
->wqe_bulk
= PAGE_SIZE
/ frag_stride
;
2098 if (byte_count
> PAGE_SIZE
+
2099 (MLX5E_MAX_RX_FRAGS
- 1) * frag_size_max
)
2100 frag_size_max
= PAGE_SIZE
;
2103 while (buf_size
< byte_count
) {
2104 int frag_size
= byte_count
- buf_size
;
2106 if (i
< MLX5E_MAX_RX_FRAGS
- 1)
2107 frag_size
= min(frag_size
, frag_size_max
);
2109 info
->arr
[i
].frag_size
= frag_size
;
2110 info
->arr
[i
].frag_stride
= roundup_pow_of_two(frag_size
);
2112 buf_size
+= frag_size
;
2115 info
->num_frags
= i
;
2116 /* number of different wqes sharing a page */
2117 info
->wqe_bulk
= 1 + (info
->num_frags
% 2);
2120 info
->wqe_bulk
= max_t(u8
, info
->wqe_bulk
, 8);
2121 info
->log_num_frags
= order_base_2(info
->num_frags
);
2124 static inline u8
mlx5e_get_rqwq_log_stride(u8 wq_type
, int ndsegs
)
2126 int sz
= sizeof(struct mlx5_wqe_data_seg
) * ndsegs
;
2129 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2130 sz
+= sizeof(struct mlx5e_rx_wqe_ll
);
2132 default: /* MLX5_WQ_TYPE_CYCLIC */
2133 sz
+= sizeof(struct mlx5e_rx_wqe_cyc
);
2136 return order_base_2(sz
);
2139 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
2140 struct mlx5e_params
*params
,
2141 struct mlx5e_rq_param
*param
)
2143 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2144 void *rqc
= param
->rqc
;
2145 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2148 switch (params
->rq_wq_type
) {
2149 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2150 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
2151 mlx5e_mpwqe_get_log_num_strides(mdev
, params
) -
2152 MLX5_MPWQE_LOG_NUM_STRIDES_BASE
);
2153 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
2154 mlx5e_mpwqe_get_log_stride_size(mdev
, params
) -
2155 MLX5_MPWQE_LOG_STRIDE_SZ_BASE
);
2156 MLX5_SET(wq
, wq
, log_wq_sz
, mlx5e_mpwqe_get_log_rq_size(params
));
2158 default: /* MLX5_WQ_TYPE_CYCLIC */
2159 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_rq_mtu_frames
);
2160 mlx5e_build_rq_frags_info(mdev
, params
, ¶m
->frags_info
);
2161 ndsegs
= param
->frags_info
.num_frags
;
2164 MLX5_SET(wq
, wq
, wq_type
, params
->rq_wq_type
);
2165 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
2166 MLX5_SET(wq
, wq
, log_wq_stride
,
2167 mlx5e_get_rqwq_log_stride(params
->rq_wq_type
, ndsegs
));
2168 MLX5_SET(wq
, wq
, pd
, mdev
->mlx5e_res
.pdn
);
2169 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
2170 MLX5_SET(rqc
, rqc
, vsd
, params
->vlan_strip_disable
);
2171 MLX5_SET(rqc
, rqc
, scatter_fcs
, params
->scatter_fcs_en
);
2173 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
2176 static void mlx5e_build_drop_rq_param(struct mlx5e_priv
*priv
,
2177 struct mlx5e_rq_param
*param
)
2179 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2180 void *rqc
= param
->rqc
;
2181 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2183 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
2184 MLX5_SET(wq
, wq
, log_wq_stride
,
2185 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC
, 1));
2186 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->drop_rq_q_counter
);
2188 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
2191 static void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
2192 struct mlx5e_sq_param
*param
)
2194 void *sqc
= param
->sqc
;
2195 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2197 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
2198 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
2200 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
2203 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
2204 struct mlx5e_params
*params
,
2205 struct mlx5e_sq_param
*param
)
2207 void *sqc
= param
->sqc
;
2208 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2210 mlx5e_build_sq_param_common(priv
, param
);
2211 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
2212 MLX5_SET(sqc
, sqc
, allow_swp
, !!MLX5_IPSEC_DEV(priv
->mdev
));
2215 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
2216 struct mlx5e_cq_param
*param
)
2218 void *cqc
= param
->cqc
;
2220 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->priv
.uar
->index
);
2223 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
2224 struct mlx5e_params
*params
,
2225 struct mlx5e_cq_param
*param
)
2227 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2228 void *cqc
= param
->cqc
;
2231 switch (params
->rq_wq_type
) {
2232 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
2233 log_cq_size
= mlx5e_mpwqe_get_log_rq_size(params
) +
2234 mlx5e_mpwqe_get_log_num_strides(mdev
, params
);
2236 default: /* MLX5_WQ_TYPE_CYCLIC */
2237 log_cq_size
= params
->log_rq_mtu_frames
;
2240 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
2241 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
2242 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
2243 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
2246 mlx5e_build_common_cq_param(priv
, param
);
2247 param
->cq_period_mode
= params
->rx_cq_moderation
.cq_period_mode
;
2250 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
2251 struct mlx5e_params
*params
,
2252 struct mlx5e_cq_param
*param
)
2254 void *cqc
= param
->cqc
;
2256 MLX5_SET(cqc
, cqc
, log_cq_size
, params
->log_sq_size
);
2258 mlx5e_build_common_cq_param(priv
, param
);
2259 param
->cq_period_mode
= params
->tx_cq_moderation
.cq_period_mode
;
2262 static void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
2264 struct mlx5e_cq_param
*param
)
2266 void *cqc
= param
->cqc
;
2268 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
2270 mlx5e_build_common_cq_param(priv
, param
);
2272 param
->cq_period_mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
2275 static void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
2277 struct mlx5e_sq_param
*param
)
2279 void *sqc
= param
->sqc
;
2280 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2282 mlx5e_build_sq_param_common(priv
, param
);
2284 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
2285 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
2288 static void mlx5e_build_xdpsq_param(struct mlx5e_priv
*priv
,
2289 struct mlx5e_params
*params
,
2290 struct mlx5e_sq_param
*param
)
2292 void *sqc
= param
->sqc
;
2293 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2295 mlx5e_build_sq_param_common(priv
, param
);
2296 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
2299 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
2300 struct mlx5e_params
*params
,
2301 struct mlx5e_channel_param
*cparam
)
2303 u8 icosq_log_wq_sz
= MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
2305 mlx5e_build_rq_param(priv
, params
, &cparam
->rq
);
2306 mlx5e_build_sq_param(priv
, params
, &cparam
->sq
);
2307 mlx5e_build_xdpsq_param(priv
, params
, &cparam
->xdp_sq
);
2308 mlx5e_build_icosq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq
);
2309 mlx5e_build_rx_cq_param(priv
, params
, &cparam
->rx_cq
);
2310 mlx5e_build_tx_cq_param(priv
, params
, &cparam
->tx_cq
);
2311 mlx5e_build_ico_cq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq_cq
);
2314 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
2315 struct mlx5e_channels
*chs
)
2317 struct mlx5e_channel_param
*cparam
;
2321 chs
->num
= chs
->params
.num_channels
;
2323 chs
->c
= kcalloc(chs
->num
, sizeof(struct mlx5e_channel
*), GFP_KERNEL
);
2324 cparam
= kvzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
2325 if (!chs
->c
|| !cparam
)
2328 mlx5e_build_channel_param(priv
, &chs
->params
, cparam
);
2329 for (i
= 0; i
< chs
->num
; i
++) {
2330 err
= mlx5e_open_channel(priv
, i
, &chs
->params
, cparam
, &chs
->c
[i
]);
2332 goto err_close_channels
;
2339 for (i
--; i
>= 0; i
--)
2340 mlx5e_close_channel(chs
->c
[i
]);
2349 static void mlx5e_activate_channels(struct mlx5e_channels
*chs
)
2353 for (i
= 0; i
< chs
->num
; i
++)
2354 mlx5e_activate_channel(chs
->c
[i
]);
2357 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels
*chs
)
2362 for (i
= 0; i
< chs
->num
; i
++)
2363 err
|= mlx5e_wait_for_min_rx_wqes(&chs
->c
[i
]->rq
,
2366 return err
? -ETIMEDOUT
: 0;
2369 static void mlx5e_deactivate_channels(struct mlx5e_channels
*chs
)
2373 for (i
= 0; i
< chs
->num
; i
++)
2374 mlx5e_deactivate_channel(chs
->c
[i
]);
2377 void mlx5e_close_channels(struct mlx5e_channels
*chs
)
2381 for (i
= 0; i
< chs
->num
; i
++)
2382 mlx5e_close_channel(chs
->c
[i
]);
2389 mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
, struct mlx5e_rqt
*rqt
)
2391 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2398 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
2399 in
= kvzalloc(inlen
, GFP_KERNEL
);
2403 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
2405 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2406 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
2408 for (i
= 0; i
< sz
; i
++)
2409 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], priv
->drop_rq
.rqn
);
2411 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &rqt
->rqtn
);
2413 rqt
->enabled
= true;
2419 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
)
2421 rqt
->enabled
= false;
2422 mlx5_core_destroy_rqt(priv
->mdev
, rqt
->rqtn
);
2425 int mlx5e_create_indirect_rqt(struct mlx5e_priv
*priv
)
2427 struct mlx5e_rqt
*rqt
= &priv
->indir_rqt
;
2430 err
= mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, rqt
);
2432 mlx5_core_warn(priv
->mdev
, "create indirect rqts failed, %d\n", err
);
2436 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
)
2438 struct mlx5e_rqt
*rqt
;
2442 for (ix
= 0; ix
< mlx5e_get_netdev_max_channels(priv
->netdev
); ix
++) {
2443 rqt
= &priv
->direct_tir
[ix
].rqt
;
2444 err
= mlx5e_create_rqt(priv
, 1 /*size */, rqt
);
2446 goto err_destroy_rqts
;
2452 mlx5_core_warn(priv
->mdev
, "create direct rqts failed, %d\n", err
);
2453 for (ix
--; ix
>= 0; ix
--)
2454 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[ix
].rqt
);
2459 void mlx5e_destroy_direct_rqts(struct mlx5e_priv
*priv
)
2463 for (i
= 0; i
< mlx5e_get_netdev_max_channels(priv
->netdev
); i
++)
2464 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
2467 static int mlx5e_rx_hash_fn(int hfunc
)
2469 return (hfunc
== ETH_RSS_HASH_TOP
) ?
2470 MLX5_RX_HASH_FN_TOEPLITZ
:
2471 MLX5_RX_HASH_FN_INVERTED_XOR8
;
2474 int mlx5e_bits_invert(unsigned long a
, int size
)
2479 for (i
= 0; i
< size
; i
++)
2480 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
2485 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, int sz
,
2486 struct mlx5e_redirect_rqt_param rrp
, void *rqtc
)
2490 for (i
= 0; i
< sz
; i
++) {
2496 if (rrp
.rss
.hfunc
== ETH_RSS_HASH_XOR
)
2497 ix
= mlx5e_bits_invert(i
, ilog2(sz
));
2499 ix
= priv
->channels
.params
.indirection_rqt
[ix
];
2500 rqn
= rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2504 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
2508 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
,
2509 struct mlx5e_redirect_rqt_param rrp
)
2511 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2517 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
2518 in
= kvzalloc(inlen
, GFP_KERNEL
);
2522 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
2524 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2525 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
2526 mlx5e_fill_rqt_rqns(priv
, sz
, rrp
, rqtc
);
2527 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
2533 static u32
mlx5e_get_direct_rqn(struct mlx5e_priv
*priv
, int ix
,
2534 struct mlx5e_redirect_rqt_param rrp
)
2539 if (ix
>= rrp
.rss
.channels
->num
)
2540 return priv
->drop_rq
.rqn
;
2542 return rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2545 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
,
2546 struct mlx5e_redirect_rqt_param rrp
)
2551 if (priv
->indir_rqt
.enabled
) {
2553 rqtn
= priv
->indir_rqt
.rqtn
;
2554 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, rrp
);
2557 for (ix
= 0; ix
< mlx5e_get_netdev_max_channels(priv
->netdev
); ix
++) {
2558 struct mlx5e_redirect_rqt_param direct_rrp
= {
2561 .rqn
= mlx5e_get_direct_rqn(priv
, ix
, rrp
)
2565 /* Direct RQ Tables */
2566 if (!priv
->direct_tir
[ix
].rqt
.enabled
)
2569 rqtn
= priv
->direct_tir
[ix
].rqt
.rqtn
;
2570 mlx5e_redirect_rqt(priv
, rqtn
, 1, direct_rrp
);
2574 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv
*priv
,
2575 struct mlx5e_channels
*chs
)
2577 struct mlx5e_redirect_rqt_param rrp
= {
2582 .hfunc
= chs
->params
.rss_hfunc
,
2587 mlx5e_redirect_rqts(priv
, rrp
);
2590 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv
*priv
)
2592 struct mlx5e_redirect_rqt_param drop_rrp
= {
2595 .rqn
= priv
->drop_rq
.rqn
,
2599 mlx5e_redirect_rqts(priv
, drop_rrp
);
2602 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params
*params
, void *tirc
)
2604 if (!params
->lro_en
)
2607 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2609 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
2610 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
2611 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
2612 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
2613 (params
->lro_wqe_sz
- ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
2614 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
, params
->lro_timeout
);
2617 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params
*params
,
2618 enum mlx5e_traffic_types tt
,
2619 void *tirc
, bool inner
)
2621 void *hfso
= inner
? MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
) :
2622 MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
2624 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2625 MLX5_HASH_FIELD_SEL_DST_IP)
2627 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2628 MLX5_HASH_FIELD_SEL_DST_IP |\
2629 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2630 MLX5_HASH_FIELD_SEL_L4_DPORT)
2632 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2633 MLX5_HASH_FIELD_SEL_DST_IP |\
2634 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2636 MLX5_SET(tirc
, tirc
, rx_hash_fn
, mlx5e_rx_hash_fn(params
->rss_hfunc
));
2637 if (params
->rss_hfunc
== ETH_RSS_HASH_TOP
) {
2638 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
2639 rx_hash_toeplitz_key
);
2640 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
2641 rx_hash_toeplitz_key
);
2643 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
2644 memcpy(rss_key
, params
->toeplitz_hash_key
, len
);
2648 case MLX5E_TT_IPV4_TCP
:
2649 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2650 MLX5_L3_PROT_TYPE_IPV4
);
2651 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2652 MLX5_L4_PROT_TYPE_TCP
);
2653 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2654 MLX5_HASH_IP_L4PORTS
);
2657 case MLX5E_TT_IPV6_TCP
:
2658 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2659 MLX5_L3_PROT_TYPE_IPV6
);
2660 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2661 MLX5_L4_PROT_TYPE_TCP
);
2662 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2663 MLX5_HASH_IP_L4PORTS
);
2666 case MLX5E_TT_IPV4_UDP
:
2667 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2668 MLX5_L3_PROT_TYPE_IPV4
);
2669 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2670 MLX5_L4_PROT_TYPE_UDP
);
2671 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2672 MLX5_HASH_IP_L4PORTS
);
2675 case MLX5E_TT_IPV6_UDP
:
2676 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2677 MLX5_L3_PROT_TYPE_IPV6
);
2678 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2679 MLX5_L4_PROT_TYPE_UDP
);
2680 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2681 MLX5_HASH_IP_L4PORTS
);
2684 case MLX5E_TT_IPV4_IPSEC_AH
:
2685 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2686 MLX5_L3_PROT_TYPE_IPV4
);
2687 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2688 MLX5_HASH_IP_IPSEC_SPI
);
2691 case MLX5E_TT_IPV6_IPSEC_AH
:
2692 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2693 MLX5_L3_PROT_TYPE_IPV6
);
2694 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2695 MLX5_HASH_IP_IPSEC_SPI
);
2698 case MLX5E_TT_IPV4_IPSEC_ESP
:
2699 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2700 MLX5_L3_PROT_TYPE_IPV4
);
2701 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2702 MLX5_HASH_IP_IPSEC_SPI
);
2705 case MLX5E_TT_IPV6_IPSEC_ESP
:
2706 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2707 MLX5_L3_PROT_TYPE_IPV6
);
2708 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2709 MLX5_HASH_IP_IPSEC_SPI
);
2713 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2714 MLX5_L3_PROT_TYPE_IPV4
);
2715 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2720 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2721 MLX5_L3_PROT_TYPE_IPV6
);
2722 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2726 WARN_ONCE(true, "%s: bad traffic type!\n", __func__
);
2730 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
2732 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2741 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
2742 in
= kvzalloc(inlen
, GFP_KERNEL
);
2746 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
2747 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2749 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2751 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2752 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
2758 for (ix
= 0; ix
< mlx5e_get_netdev_max_channels(priv
->netdev
); ix
++) {
2759 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
2771 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv
*priv
,
2772 enum mlx5e_traffic_types tt
,
2775 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2777 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2779 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2780 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
2781 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 0x1);
2783 mlx5e_build_indir_tir_ctx_hash(&priv
->channels
.params
, tt
, tirc
, true);
2786 static int mlx5e_set_mtu(struct mlx5_core_dev
*mdev
,
2787 struct mlx5e_params
*params
, u16 mtu
)
2789 u16 hw_mtu
= MLX5E_SW2HW_MTU(params
, mtu
);
2792 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
2796 /* Update vport context MTU */
2797 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
2801 static void mlx5e_query_mtu(struct mlx5_core_dev
*mdev
,
2802 struct mlx5e_params
*params
, u16
*mtu
)
2807 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
2808 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
2809 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
2811 *mtu
= MLX5E_HW2SW_MTU(params
, hw_mtu
);
2814 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv
*priv
)
2816 struct mlx5e_params
*params
= &priv
->channels
.params
;
2817 struct net_device
*netdev
= priv
->netdev
;
2818 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2822 err
= mlx5e_set_mtu(mdev
, params
, params
->sw_mtu
);
2826 mlx5e_query_mtu(mdev
, params
, &mtu
);
2827 if (mtu
!= params
->sw_mtu
)
2828 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
2829 __func__
, mtu
, params
->sw_mtu
);
2831 params
->sw_mtu
= mtu
;
2835 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
2837 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2838 int nch
= priv
->channels
.params
.num_channels
;
2839 int ntc
= priv
->channels
.params
.num_tc
;
2842 netdev_reset_tc(netdev
);
2847 netdev_set_num_tc(netdev
, ntc
);
2849 /* Map netdev TCs to offset 0
2850 * We have our own UP to TXQ mapping for QoS
2852 for (tc
= 0; tc
< ntc
; tc
++)
2853 netdev_set_tc_queue(netdev
, tc
, nch
, 0);
2856 static void mlx5e_build_tc2txq_maps(struct mlx5e_priv
*priv
)
2858 int max_nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
2861 for (i
= 0; i
< max_nch
; i
++)
2862 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2863 priv
->channel_tc2txq
[i
][tc
] = i
+ tc
* max_nch
;
2866 static void mlx5e_build_tx2sq_maps(struct mlx5e_priv
*priv
)
2868 struct mlx5e_channel
*c
;
2869 struct mlx5e_txqsq
*sq
;
2872 for (i
= 0; i
< priv
->channels
.num
; i
++) {
2873 c
= priv
->channels
.c
[i
];
2874 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
2876 priv
->txq2sq
[sq
->txq_ix
] = sq
;
2881 void mlx5e_activate_priv_channels(struct mlx5e_priv
*priv
)
2883 int num_txqs
= priv
->channels
.num
* priv
->channels
.params
.num_tc
;
2884 struct net_device
*netdev
= priv
->netdev
;
2886 mlx5e_netdev_set_tcs(netdev
);
2887 netif_set_real_num_tx_queues(netdev
, num_txqs
);
2888 netif_set_real_num_rx_queues(netdev
, priv
->channels
.num
);
2890 mlx5e_build_tx2sq_maps(priv
);
2891 mlx5e_activate_channels(&priv
->channels
);
2892 netif_tx_start_all_queues(priv
->netdev
);
2894 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
2895 mlx5e_add_sqs_fwd_rules(priv
);
2897 mlx5e_wait_channels_min_rx_wqes(&priv
->channels
);
2898 mlx5e_redirect_rqts_to_channels(priv
, &priv
->channels
);
2901 void mlx5e_deactivate_priv_channels(struct mlx5e_priv
*priv
)
2903 mlx5e_redirect_rqts_to_drop(priv
);
2905 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
2906 mlx5e_remove_sqs_fwd_rules(priv
);
2908 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2909 * polling for inactive tx queues.
2911 netif_tx_stop_all_queues(priv
->netdev
);
2912 netif_tx_disable(priv
->netdev
);
2913 mlx5e_deactivate_channels(&priv
->channels
);
2916 void mlx5e_switch_priv_channels(struct mlx5e_priv
*priv
,
2917 struct mlx5e_channels
*new_chs
,
2918 mlx5e_fp_hw_modify hw_modify
)
2920 struct net_device
*netdev
= priv
->netdev
;
2923 new_num_txqs
= new_chs
->num
* new_chs
->params
.num_tc
;
2925 carrier_ok
= netif_carrier_ok(netdev
);
2926 netif_carrier_off(netdev
);
2928 if (new_num_txqs
< netdev
->real_num_tx_queues
)
2929 netif_set_real_num_tx_queues(netdev
, new_num_txqs
);
2931 mlx5e_deactivate_priv_channels(priv
);
2932 mlx5e_close_channels(&priv
->channels
);
2934 priv
->channels
= *new_chs
;
2936 /* New channels are ready to roll, modify HW settings if needed */
2940 mlx5e_refresh_tirs(priv
, false);
2941 mlx5e_activate_priv_channels(priv
);
2943 /* return carrier back if needed */
2945 netif_carrier_on(netdev
);
2948 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
)
2950 priv
->tstamp
.tx_type
= HWTSTAMP_TX_OFF
;
2951 priv
->tstamp
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2954 int mlx5e_open_locked(struct net_device
*netdev
)
2956 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2959 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2961 err
= mlx5e_open_channels(priv
, &priv
->channels
);
2963 goto err_clear_state_opened_flag
;
2965 mlx5e_refresh_tirs(priv
, false);
2966 mlx5e_activate_priv_channels(priv
);
2967 if (priv
->profile
->update_carrier
)
2968 priv
->profile
->update_carrier(priv
);
2970 mlx5e_queue_update_stats(priv
);
2973 err_clear_state_opened_flag
:
2974 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2978 int mlx5e_open(struct net_device
*netdev
)
2980 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2983 mutex_lock(&priv
->state_lock
);
2984 err
= mlx5e_open_locked(netdev
);
2986 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_UP
);
2987 mutex_unlock(&priv
->state_lock
);
2989 if (mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
2990 udp_tunnel_get_rx_info(netdev
);
2995 int mlx5e_close_locked(struct net_device
*netdev
)
2997 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2999 /* May already be CLOSED in case a previous configuration operation
3000 * (e.g RX/TX queue size change) that involves close&open failed.
3002 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3005 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3007 netif_carrier_off(priv
->netdev
);
3008 mlx5e_deactivate_priv_channels(priv
);
3009 mlx5e_close_channels(&priv
->channels
);
3014 int mlx5e_close(struct net_device
*netdev
)
3016 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3019 if (!netif_device_present(netdev
))
3022 mutex_lock(&priv
->state_lock
);
3023 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_DOWN
);
3024 err
= mlx5e_close_locked(netdev
);
3025 mutex_unlock(&priv
->state_lock
);
3030 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev
*mdev
,
3031 struct mlx5e_rq
*rq
,
3032 struct mlx5e_rq_param
*param
)
3034 void *rqc
= param
->rqc
;
3035 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
3038 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
3040 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wqe
.wq
,
3045 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3046 xdp_rxq_info_unused(&rq
->xdp_rxq
);
3053 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev
*mdev
,
3054 struct mlx5e_cq
*cq
,
3055 struct mlx5e_cq_param
*param
)
3057 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
3058 param
->wq
.db_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
3060 return mlx5e_alloc_cq_common(mdev
, param
, cq
);
3063 int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
,
3064 struct mlx5e_rq
*drop_rq
)
3066 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3067 struct mlx5e_cq_param cq_param
= {};
3068 struct mlx5e_rq_param rq_param
= {};
3069 struct mlx5e_cq
*cq
= &drop_rq
->cq
;
3072 mlx5e_build_drop_rq_param(priv
, &rq_param
);
3074 err
= mlx5e_alloc_drop_cq(mdev
, cq
, &cq_param
);
3078 err
= mlx5e_create_cq(cq
, &cq_param
);
3082 err
= mlx5e_alloc_drop_rq(mdev
, drop_rq
, &rq_param
);
3084 goto err_destroy_cq
;
3086 err
= mlx5e_create_rq(drop_rq
, &rq_param
);
3090 err
= mlx5e_modify_rq_state(drop_rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
3092 mlx5_core_warn(priv
->mdev
, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err
);
3097 mlx5e_free_rq(drop_rq
);
3100 mlx5e_destroy_cq(cq
);
3108 void mlx5e_close_drop_rq(struct mlx5e_rq
*drop_rq
)
3110 mlx5e_destroy_rq(drop_rq
);
3111 mlx5e_free_rq(drop_rq
);
3112 mlx5e_destroy_cq(&drop_rq
->cq
);
3113 mlx5e_free_cq(&drop_rq
->cq
);
3116 int mlx5e_create_tis(struct mlx5_core_dev
*mdev
, int tc
,
3117 u32 underlay_qpn
, u32
*tisn
)
3119 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
3120 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
3122 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
3123 MLX5_SET(tisc
, tisc
, underlay_qpn
, underlay_qpn
);
3124 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
3126 if (mlx5_lag_is_lacp_owner(mdev
))
3127 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
3129 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), tisn
);
3132 void mlx5e_destroy_tis(struct mlx5_core_dev
*mdev
, u32 tisn
)
3134 mlx5_core_destroy_tis(mdev
, tisn
);
3137 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
3142 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
3143 err
= mlx5e_create_tis(priv
->mdev
, tc
, 0, &priv
->tisn
[tc
]);
3145 goto err_close_tises
;
3151 for (tc
--; tc
>= 0; tc
--)
3152 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
3157 void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
3161 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
3162 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
3165 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
,
3166 enum mlx5e_traffic_types tt
,
3169 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
3171 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
3173 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
3174 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
3175 mlx5e_build_indir_tir_ctx_hash(&priv
->channels
.params
, tt
, tirc
, false);
3178 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32 rqtn
, u32
*tirc
)
3180 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
3182 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
3184 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
3185 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
3186 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
3189 int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
, bool inner_ttc
)
3191 struct mlx5e_tir
*tir
;
3199 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
3200 in
= kvzalloc(inlen
, GFP_KERNEL
);
3204 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
3205 memset(in
, 0, inlen
);
3206 tir
= &priv
->indir_tir
[tt
];
3207 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3208 mlx5e_build_indir_tir_ctx(priv
, tt
, tirc
);
3209 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3211 mlx5_core_warn(priv
->mdev
, "create indirect tirs failed, %d\n", err
);
3212 goto err_destroy_inner_tirs
;
3216 if (!inner_ttc
|| !mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
3219 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++) {
3220 memset(in
, 0, inlen
);
3221 tir
= &priv
->inner_indir_tir
[i
];
3222 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3223 mlx5e_build_inner_indir_tir_ctx(priv
, i
, tirc
);
3224 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3226 mlx5_core_warn(priv
->mdev
, "create inner indirect tirs failed, %d\n", err
);
3227 goto err_destroy_inner_tirs
;
3236 err_destroy_inner_tirs
:
3237 for (i
--; i
>= 0; i
--)
3238 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
3240 for (tt
--; tt
>= 0; tt
--)
3241 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
3248 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
)
3250 int nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
3251 struct mlx5e_tir
*tir
;
3258 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
3259 in
= kvzalloc(inlen
, GFP_KERNEL
);
3263 for (ix
= 0; ix
< nch
; ix
++) {
3264 memset(in
, 0, inlen
);
3265 tir
= &priv
->direct_tir
[ix
];
3266 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3267 mlx5e_build_direct_tir_ctx(priv
, priv
->direct_tir
[ix
].rqt
.rqtn
, tirc
);
3268 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3270 goto err_destroy_ch_tirs
;
3277 err_destroy_ch_tirs
:
3278 mlx5_core_warn(priv
->mdev
, "create direct tirs failed, %d\n", err
);
3279 for (ix
--; ix
>= 0; ix
--)
3280 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[ix
]);
3287 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
, bool inner_ttc
)
3291 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3292 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
3294 if (!inner_ttc
|| !mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
3297 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3298 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
3301 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
)
3303 int nch
= mlx5e_get_netdev_max_channels(priv
->netdev
);
3306 for (i
= 0; i
< nch
; i
++)
3307 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[i
]);
3310 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels
*chs
, bool enable
)
3315 for (i
= 0; i
< chs
->num
; i
++) {
3316 err
= mlx5e_modify_rq_scatter_fcs(&chs
->c
[i
]->rq
, enable
);
3324 static int mlx5e_modify_channels_vsd(struct mlx5e_channels
*chs
, bool vsd
)
3329 for (i
= 0; i
< chs
->num
; i
++) {
3330 err
= mlx5e_modify_rq_vsd(&chs
->c
[i
]->rq
, vsd
);
3338 static int mlx5e_setup_tc_mqprio(struct net_device
*netdev
,
3339 struct tc_mqprio_qopt
*mqprio
)
3341 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3342 struct mlx5e_channels new_channels
= {};
3343 u8 tc
= mqprio
->num_tc
;
3346 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
3348 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
3351 mutex_lock(&priv
->state_lock
);
3353 new_channels
.params
= priv
->channels
.params
;
3354 new_channels
.params
.num_tc
= tc
? tc
: 1;
3356 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
3357 priv
->channels
.params
= new_channels
.params
;
3361 err
= mlx5e_open_channels(priv
, &new_channels
);
3365 priv
->max_opened_tc
= max_t(u8
, priv
->max_opened_tc
,
3366 new_channels
.params
.num_tc
);
3367 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
3369 mutex_unlock(&priv
->state_lock
);
3373 #ifdef CONFIG_MLX5_ESWITCH
3374 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv
*priv
,
3375 struct tc_cls_flower_offload
*cls_flower
,
3378 switch (cls_flower
->command
) {
3379 case TC_CLSFLOWER_REPLACE
:
3380 return mlx5e_configure_flower(priv
, cls_flower
, flags
);
3381 case TC_CLSFLOWER_DESTROY
:
3382 return mlx5e_delete_flower(priv
, cls_flower
, flags
);
3383 case TC_CLSFLOWER_STATS
:
3384 return mlx5e_stats_flower(priv
, cls_flower
, flags
);
3390 static int mlx5e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
3393 struct mlx5e_priv
*priv
= cb_priv
;
3396 case TC_SETUP_CLSFLOWER
:
3397 return mlx5e_setup_tc_cls_flower(priv
, type_data
, MLX5E_TC_INGRESS
);
3403 static int mlx5e_setup_tc_block(struct net_device
*dev
,
3404 struct tc_block_offload
*f
)
3406 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3408 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
3411 switch (f
->command
) {
3413 return tcf_block_cb_register(f
->block
, mlx5e_setup_tc_block_cb
,
3414 priv
, priv
, f
->extack
);
3415 case TC_BLOCK_UNBIND
:
3416 tcf_block_cb_unregister(f
->block
, mlx5e_setup_tc_block_cb
,
3425 static int mlx5e_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
3429 #ifdef CONFIG_MLX5_ESWITCH
3430 case TC_SETUP_BLOCK
:
3431 return mlx5e_setup_tc_block(dev
, type_data
);
3433 case TC_SETUP_QDISC_MQPRIO
:
3434 return mlx5e_setup_tc_mqprio(dev
, type_data
);
3441 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3443 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3444 struct mlx5e_sw_stats
*sstats
= &priv
->stats
.sw
;
3445 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
3446 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
3448 /* update HW stats in background for next time */
3449 mlx5e_queue_update_stats(priv
);
3451 if (mlx5e_is_uplink_rep(priv
)) {
3452 stats
->rx_packets
= PPORT_802_3_GET(pstats
, a_frames_received_ok
);
3453 stats
->rx_bytes
= PPORT_802_3_GET(pstats
, a_octets_received_ok
);
3454 stats
->tx_packets
= PPORT_802_3_GET(pstats
, a_frames_transmitted_ok
);
3455 stats
->tx_bytes
= PPORT_802_3_GET(pstats
, a_octets_transmitted_ok
);
3457 mlx5e_grp_sw_update_stats(priv
);
3458 stats
->rx_packets
= sstats
->rx_packets
;
3459 stats
->rx_bytes
= sstats
->rx_bytes
;
3460 stats
->tx_packets
= sstats
->tx_packets
;
3461 stats
->tx_bytes
= sstats
->tx_bytes
;
3462 stats
->tx_dropped
= sstats
->tx_queue_dropped
;
3465 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
3467 stats
->rx_length_errors
=
3468 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
3469 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
3470 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
3471 stats
->rx_crc_errors
=
3472 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
3473 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
3474 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
3475 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
3476 stats
->rx_frame_errors
;
3477 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
3479 /* vport multicast also counts packets that are dropped due to steering
3480 * or rx out of buffer
3483 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
3486 static void mlx5e_set_rx_mode(struct net_device
*dev
)
3488 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3490 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3493 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
3495 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3496 struct sockaddr
*saddr
= addr
;
3498 if (!is_valid_ether_addr(saddr
->sa_data
))
3499 return -EADDRNOTAVAIL
;
3501 netif_addr_lock_bh(netdev
);
3502 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
3503 netif_addr_unlock_bh(netdev
);
3505 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3510 #define MLX5E_SET_FEATURE(features, feature, enable) \
3513 *features |= feature; \
3515 *features &= ~feature; \
3518 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
3520 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
3522 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3523 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3524 struct mlx5e_channels new_channels
= {};
3525 struct mlx5e_params
*old_params
;
3529 mutex_lock(&priv
->state_lock
);
3531 old_params
= &priv
->channels
.params
;
3532 if (enable
&& !MLX5E_GET_PFLAG(old_params
, MLX5E_PFLAG_RX_STRIDING_RQ
)) {
3533 netdev_warn(netdev
, "can't set LRO with legacy RQ\n");
3538 reset
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3540 new_channels
.params
= *old_params
;
3541 new_channels
.params
.lro_en
= enable
;
3543 if (old_params
->rq_wq_type
!= MLX5_WQ_TYPE_CYCLIC
) {
3544 if (mlx5e_rx_mpwqe_is_linear_skb(mdev
, old_params
) ==
3545 mlx5e_rx_mpwqe_is_linear_skb(mdev
, &new_channels
.params
))
3550 *old_params
= new_channels
.params
;
3551 err
= mlx5e_modify_tirs_lro(priv
);
3555 err
= mlx5e_open_channels(priv
, &new_channels
);
3559 mlx5e_switch_priv_channels(priv
, &new_channels
, mlx5e_modify_tirs_lro
);
3561 mutex_unlock(&priv
->state_lock
);
3565 static int set_feature_cvlan_filter(struct net_device
*netdev
, bool enable
)
3567 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3570 mlx5e_enable_cvlan_filter(priv
);
3572 mlx5e_disable_cvlan_filter(priv
);
3577 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
3579 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3581 if (!enable
&& mlx5e_tc_num_filters(priv
)) {
3583 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3590 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
3592 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3593 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3595 return mlx5_set_port_fcs(mdev
, !enable
);
3598 static int set_feature_rx_fcs(struct net_device
*netdev
, bool enable
)
3600 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3603 mutex_lock(&priv
->state_lock
);
3605 priv
->channels
.params
.scatter_fcs_en
= enable
;
3606 err
= mlx5e_modify_channels_scatter_fcs(&priv
->channels
, enable
);
3608 priv
->channels
.params
.scatter_fcs_en
= !enable
;
3610 mutex_unlock(&priv
->state_lock
);
3615 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
3617 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3620 mutex_lock(&priv
->state_lock
);
3622 priv
->channels
.params
.vlan_strip_disable
= !enable
;
3623 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3626 err
= mlx5e_modify_channels_vsd(&priv
->channels
, !enable
);
3628 priv
->channels
.params
.vlan_strip_disable
= enable
;
3631 mutex_unlock(&priv
->state_lock
);
3636 #ifdef CONFIG_MLX5_EN_ARFS
3637 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
3639 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3643 err
= mlx5e_arfs_enable(priv
);
3645 err
= mlx5e_arfs_disable(priv
);
3651 static int mlx5e_handle_feature(struct net_device
*netdev
,
3652 netdev_features_t
*features
,
3653 netdev_features_t wanted_features
,
3654 netdev_features_t feature
,
3655 mlx5e_feature_handler feature_handler
)
3657 netdev_features_t changes
= wanted_features
^ netdev
->features
;
3658 bool enable
= !!(wanted_features
& feature
);
3661 if (!(changes
& feature
))
3664 err
= feature_handler(netdev
, enable
);
3666 netdev_err(netdev
, "%s feature %pNF failed, err %d\n",
3667 enable
? "Enable" : "Disable", &feature
, err
);
3671 MLX5E_SET_FEATURE(features
, feature
, enable
);
3675 static int mlx5e_set_features(struct net_device
*netdev
,
3676 netdev_features_t features
)
3678 netdev_features_t oper_features
= netdev
->features
;
3681 #define MLX5E_HANDLE_FEATURE(feature, handler) \
3682 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
3684 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_LRO
, set_feature_lro
);
3685 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER
,
3686 set_feature_cvlan_filter
);
3687 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC
, set_feature_tc_num_filters
);
3688 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL
, set_feature_rx_all
);
3689 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS
, set_feature_rx_fcs
);
3690 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX
, set_feature_rx_vlan
);
3691 #ifdef CONFIG_MLX5_EN_ARFS
3692 err
|= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE
, set_feature_arfs
);
3696 netdev
->features
= oper_features
;
3703 static netdev_features_t
mlx5e_fix_features(struct net_device
*netdev
,
3704 netdev_features_t features
)
3706 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3707 struct mlx5e_params
*params
;
3709 mutex_lock(&priv
->state_lock
);
3710 params
= &priv
->channels
.params
;
3711 if (!bitmap_empty(priv
->fs
.vlan
.active_svlans
, VLAN_N_VID
)) {
3712 /* HW strips the outer C-tag header, this is a problem
3713 * for S-tag traffic.
3715 features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
3716 if (!params
->vlan_strip_disable
)
3717 netdev_warn(netdev
, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3719 if (!MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
)) {
3720 features
&= ~NETIF_F_LRO
;
3722 netdev_warn(netdev
, "Disabling LRO, not supported in legacy RQ\n");
3725 mutex_unlock(&priv
->state_lock
);
3730 int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
,
3731 change_hw_mtu_cb set_mtu_cb
)
3733 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3734 struct mlx5e_channels new_channels
= {};
3735 struct mlx5e_params
*params
;
3739 mutex_lock(&priv
->state_lock
);
3741 params
= &priv
->channels
.params
;
3743 reset
= !params
->lro_en
;
3744 reset
= reset
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3746 new_channels
.params
= *params
;
3747 new_channels
.params
.sw_mtu
= new_mtu
;
3749 if (params
->xdp_prog
&&
3750 !mlx5e_rx_is_linear_skb(priv
->mdev
, &new_channels
.params
)) {
3751 netdev_err(netdev
, "MTU(%d) > %d is not allowed while XDP enabled\n",
3752 new_mtu
, MLX5E_XDP_MAX_MTU
);
3757 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
3758 u8 ppw_old
= mlx5e_mpwqe_log_pkts_per_wqe(params
);
3759 u8 ppw_new
= mlx5e_mpwqe_log_pkts_per_wqe(&new_channels
.params
);
3761 reset
= reset
&& (ppw_old
!= ppw_new
);
3765 params
->sw_mtu
= new_mtu
;
3768 netdev
->mtu
= params
->sw_mtu
;
3772 err
= mlx5e_open_channels(priv
, &new_channels
);
3776 mlx5e_switch_priv_channels(priv
, &new_channels
, set_mtu_cb
);
3777 netdev
->mtu
= new_channels
.params
.sw_mtu
;
3780 mutex_unlock(&priv
->state_lock
);
3784 static int mlx5e_change_nic_mtu(struct net_device
*netdev
, int new_mtu
)
3786 return mlx5e_change_mtu(netdev
, new_mtu
, mlx5e_set_dev_port_mtu
);
3789 int mlx5e_hwstamp_set(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
3791 struct hwtstamp_config config
;
3794 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
) ||
3795 (mlx5_clock_get_ptp_index(priv
->mdev
) == -1))
3798 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
3801 /* TX HW timestamp */
3802 switch (config
.tx_type
) {
3803 case HWTSTAMP_TX_OFF
:
3804 case HWTSTAMP_TX_ON
:
3810 mutex_lock(&priv
->state_lock
);
3811 /* RX HW timestamp */
3812 switch (config
.rx_filter
) {
3813 case HWTSTAMP_FILTER_NONE
:
3814 /* Reset CQE compression to Admin default */
3815 mlx5e_modify_rx_cqe_compression_locked(priv
, priv
->channels
.params
.rx_cqe_compress_def
);
3817 case HWTSTAMP_FILTER_ALL
:
3818 case HWTSTAMP_FILTER_SOME
:
3819 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
3820 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
3821 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
3822 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
3823 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
3824 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
3825 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
3826 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
3827 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
3828 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
3829 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
3830 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
3831 case HWTSTAMP_FILTER_NTP_ALL
:
3832 /* Disable CQE compression */
3833 netdev_warn(priv
->netdev
, "Disabling cqe compression");
3834 err
= mlx5e_modify_rx_cqe_compression_locked(priv
, false);
3836 netdev_err(priv
->netdev
, "Failed disabling cqe compression err=%d\n", err
);
3837 mutex_unlock(&priv
->state_lock
);
3840 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
3843 mutex_unlock(&priv
->state_lock
);
3847 memcpy(&priv
->tstamp
, &config
, sizeof(config
));
3848 mutex_unlock(&priv
->state_lock
);
3850 return copy_to_user(ifr
->ifr_data
, &config
,
3851 sizeof(config
)) ? -EFAULT
: 0;
3854 int mlx5e_hwstamp_get(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
3856 struct hwtstamp_config
*cfg
= &priv
->tstamp
;
3858 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
3861 return copy_to_user(ifr
->ifr_data
, cfg
, sizeof(*cfg
)) ? -EFAULT
: 0;
3864 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3866 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3870 return mlx5e_hwstamp_set(priv
, ifr
);
3872 return mlx5e_hwstamp_get(priv
, ifr
);
3878 #ifdef CONFIG_MLX5_ESWITCH
3879 static int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
3881 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3882 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3884 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
3887 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
3890 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3891 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3893 if (vlan_proto
!= htons(ETH_P_8021Q
))
3894 return -EPROTONOSUPPORT
;
3896 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
3900 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
3902 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3903 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3905 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3908 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
3910 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3911 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3913 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3916 static int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
,
3919 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3920 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3922 return mlx5_eswitch_set_vport_rate(mdev
->priv
.eswitch
, vf
+ 1,
3923 max_tx_rate
, min_tx_rate
);
3926 static int mlx5_vport_link2ifla(u8 esw_link
)
3929 case MLX5_VPORT_ADMIN_STATE_DOWN
:
3930 return IFLA_VF_LINK_STATE_DISABLE
;
3931 case MLX5_VPORT_ADMIN_STATE_UP
:
3932 return IFLA_VF_LINK_STATE_ENABLE
;
3934 return IFLA_VF_LINK_STATE_AUTO
;
3937 static int mlx5_ifla_link2vport(u8 ifla_link
)
3939 switch (ifla_link
) {
3940 case IFLA_VF_LINK_STATE_DISABLE
:
3941 return MLX5_VPORT_ADMIN_STATE_DOWN
;
3942 case IFLA_VF_LINK_STATE_ENABLE
:
3943 return MLX5_VPORT_ADMIN_STATE_UP
;
3945 return MLX5_VPORT_ADMIN_STATE_AUTO
;
3948 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
3951 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3952 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3954 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
3955 mlx5_ifla_link2vport(link_state
));
3958 static int mlx5e_get_vf_config(struct net_device
*dev
,
3959 int vf
, struct ifla_vf_info
*ivi
)
3961 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3962 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3965 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
3968 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
3972 static int mlx5e_get_vf_stats(struct net_device
*dev
,
3973 int vf
, struct ifla_vf_stats
*vf_stats
)
3975 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3976 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3978 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
3983 struct mlx5e_vxlan_work
{
3984 struct work_struct work
;
3985 struct mlx5e_priv
*priv
;
3989 static void mlx5e_vxlan_add_work(struct work_struct
*work
)
3991 struct mlx5e_vxlan_work
*vxlan_work
=
3992 container_of(work
, struct mlx5e_vxlan_work
, work
);
3993 struct mlx5e_priv
*priv
= vxlan_work
->priv
;
3994 u16 port
= vxlan_work
->port
;
3996 mutex_lock(&priv
->state_lock
);
3997 mlx5_vxlan_add_port(priv
->mdev
->vxlan
, port
);
3998 mutex_unlock(&priv
->state_lock
);
4003 static void mlx5e_vxlan_del_work(struct work_struct
*work
)
4005 struct mlx5e_vxlan_work
*vxlan_work
=
4006 container_of(work
, struct mlx5e_vxlan_work
, work
);
4007 struct mlx5e_priv
*priv
= vxlan_work
->priv
;
4008 u16 port
= vxlan_work
->port
;
4010 mutex_lock(&priv
->state_lock
);
4011 mlx5_vxlan_del_port(priv
->mdev
->vxlan
, port
);
4012 mutex_unlock(&priv
->state_lock
);
4016 static void mlx5e_vxlan_queue_work(struct mlx5e_priv
*priv
, u16 port
, int add
)
4018 struct mlx5e_vxlan_work
*vxlan_work
;
4020 vxlan_work
= kmalloc(sizeof(*vxlan_work
), GFP_ATOMIC
);
4025 INIT_WORK(&vxlan_work
->work
, mlx5e_vxlan_add_work
);
4027 INIT_WORK(&vxlan_work
->work
, mlx5e_vxlan_del_work
);
4029 vxlan_work
->priv
= priv
;
4030 vxlan_work
->port
= port
;
4031 queue_work(priv
->wq
, &vxlan_work
->work
);
4034 static void mlx5e_add_vxlan_port(struct net_device
*netdev
,
4035 struct udp_tunnel_info
*ti
)
4037 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4039 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
4042 if (!mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
4045 mlx5e_vxlan_queue_work(priv
, be16_to_cpu(ti
->port
), 1);
4048 static void mlx5e_del_vxlan_port(struct net_device
*netdev
,
4049 struct udp_tunnel_info
*ti
)
4051 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4053 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
4056 if (!mlx5_vxlan_allowed(priv
->mdev
->vxlan
))
4059 mlx5e_vxlan_queue_work(priv
, be16_to_cpu(ti
->port
), 0);
4062 static netdev_features_t
mlx5e_tunnel_features_check(struct mlx5e_priv
*priv
,
4063 struct sk_buff
*skb
,
4064 netdev_features_t features
)
4066 unsigned int offset
= 0;
4067 struct udphdr
*udph
;
4071 switch (vlan_get_protocol(skb
)) {
4072 case htons(ETH_P_IP
):
4073 proto
= ip_hdr(skb
)->protocol
;
4075 case htons(ETH_P_IPV6
):
4076 proto
= ipv6_find_hdr(skb
, &offset
, -1, NULL
, NULL
);
4086 udph
= udp_hdr(skb
);
4087 port
= be16_to_cpu(udph
->dest
);
4089 /* Verify if UDP port is being offloaded by HW */
4090 if (mlx5_vxlan_lookup_port(priv
->mdev
->vxlan
, port
))
4095 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4096 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
4099 static netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
4100 struct net_device
*netdev
,
4101 netdev_features_t features
)
4103 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4105 features
= vlan_features_check(skb
, features
);
4106 features
= vxlan_features_check(skb
, features
);
4108 #ifdef CONFIG_MLX5_EN_IPSEC
4109 if (mlx5e_ipsec_feature_check(skb
, netdev
, features
))
4113 /* Validate if the tunneled packet is being offloaded by HW */
4114 if (skb
->encapsulation
&&
4115 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
4116 return mlx5e_tunnel_features_check(priv
, skb
, features
);
4121 static bool mlx5e_tx_timeout_eq_recover(struct net_device
*dev
,
4122 struct mlx5e_txqsq
*sq
)
4124 struct mlx5_eq
*eq
= sq
->cq
.mcq
.eq
;
4127 netdev_err(dev
, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
4128 eq
->eqn
, eq
->cons_index
, eq
->irqn
);
4130 eqe_count
= mlx5_eq_poll_irq_disabled(eq
);
4134 netdev_err(dev
, "Recover %d eqes on EQ 0x%x\n", eqe_count
, eq
->eqn
);
4135 sq
->channel
->stats
->eq_rearm
++;
4139 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
4141 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
4143 struct net_device
*dev
= priv
->netdev
;
4144 bool reopen_channels
= false;
4148 mutex_lock(&priv
->state_lock
);
4150 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
4153 for (i
= 0; i
< priv
->channels
.num
* priv
->channels
.params
.num_tc
; i
++) {
4154 struct netdev_queue
*dev_queue
= netdev_get_tx_queue(dev
, i
);
4155 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[i
];
4157 if (!netif_xmit_stopped(dev_queue
))
4161 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
4162 i
, sq
->sqn
, sq
->cq
.mcq
.cqn
, sq
->cc
, sq
->pc
,
4163 jiffies_to_usecs(jiffies
- dev_queue
->trans_start
));
4165 /* If we recover a lost interrupt, most likely TX timeout will
4166 * be resolved, skip reopening channels
4168 if (!mlx5e_tx_timeout_eq_recover(dev
, sq
)) {
4169 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
4170 reopen_channels
= true;
4174 if (!reopen_channels
)
4177 mlx5e_close_locked(dev
);
4178 err
= mlx5e_open_locked(dev
);
4180 netdev_err(priv
->netdev
,
4181 "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
4185 mutex_unlock(&priv
->state_lock
);
4189 static void mlx5e_tx_timeout(struct net_device
*dev
)
4191 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4193 netdev_err(dev
, "TX timeout detected\n");
4194 queue_work(priv
->wq
, &priv
->tx_timeout_work
);
4197 static int mlx5e_xdp_allowed(struct mlx5e_priv
*priv
, struct bpf_prog
*prog
)
4199 struct net_device
*netdev
= priv
->netdev
;
4200 struct mlx5e_channels new_channels
= {};
4202 if (priv
->channels
.params
.lro_en
) {
4203 netdev_warn(netdev
, "can't set XDP while LRO is on, disable LRO first\n");
4207 if (MLX5_IPSEC_DEV(priv
->mdev
)) {
4208 netdev_warn(netdev
, "can't set XDP with IPSec offload\n");
4212 new_channels
.params
= priv
->channels
.params
;
4213 new_channels
.params
.xdp_prog
= prog
;
4215 if (!mlx5e_rx_is_linear_skb(priv
->mdev
, &new_channels
.params
)) {
4216 netdev_warn(netdev
, "XDP is not allowed with MTU(%d) > %d\n",
4217 new_channels
.params
.sw_mtu
, MLX5E_XDP_MAX_MTU
);
4224 static int mlx5e_xdp_set(struct net_device
*netdev
, struct bpf_prog
*prog
)
4226 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4227 struct bpf_prog
*old_prog
;
4228 bool reset
, was_opened
;
4232 mutex_lock(&priv
->state_lock
);
4235 err
= mlx5e_xdp_allowed(priv
, prog
);
4240 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
4241 /* no need for full reset when exchanging programs */
4242 reset
= (!priv
->channels
.params
.xdp_prog
|| !prog
);
4244 if (was_opened
&& reset
)
4245 mlx5e_close_locked(netdev
);
4246 if (was_opened
&& !reset
) {
4247 /* num_channels is invariant here, so we can take the
4248 * batched reference right upfront.
4250 prog
= bpf_prog_add(prog
, priv
->channels
.num
);
4252 err
= PTR_ERR(prog
);
4257 /* exchange programs, extra prog reference we got from caller
4258 * as long as we don't fail from this point onwards.
4260 old_prog
= xchg(&priv
->channels
.params
.xdp_prog
, prog
);
4262 bpf_prog_put(old_prog
);
4264 if (reset
) /* change RQ type according to priv->xdp_prog */
4265 mlx5e_set_rq_type(priv
->mdev
, &priv
->channels
.params
);
4267 if (was_opened
&& reset
)
4268 mlx5e_open_locked(netdev
);
4270 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
) || reset
)
4273 /* exchanging programs w/o reset, we update ref counts on behalf
4274 * of the channels RQs here.
4276 for (i
= 0; i
< priv
->channels
.num
; i
++) {
4277 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
4279 clear_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
4280 napi_synchronize(&c
->napi
);
4281 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
4283 old_prog
= xchg(&c
->rq
.xdp_prog
, prog
);
4285 set_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
4286 /* napi_schedule in case we have missed anything */
4287 napi_schedule(&c
->napi
);
4290 bpf_prog_put(old_prog
);
4294 mutex_unlock(&priv
->state_lock
);
4298 static u32
mlx5e_xdp_query(struct net_device
*dev
)
4300 struct mlx5e_priv
*priv
= netdev_priv(dev
);
4301 const struct bpf_prog
*xdp_prog
;
4304 mutex_lock(&priv
->state_lock
);
4305 xdp_prog
= priv
->channels
.params
.xdp_prog
;
4307 prog_id
= xdp_prog
->aux
->id
;
4308 mutex_unlock(&priv
->state_lock
);
4313 static int mlx5e_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4315 switch (xdp
->command
) {
4316 case XDP_SETUP_PROG
:
4317 return mlx5e_xdp_set(dev
, xdp
->prog
);
4318 case XDP_QUERY_PROG
:
4319 xdp
->prog_id
= mlx5e_xdp_query(dev
);
4326 const struct net_device_ops mlx5e_netdev_ops
= {
4327 .ndo_open
= mlx5e_open
,
4328 .ndo_stop
= mlx5e_close
,
4329 .ndo_start_xmit
= mlx5e_xmit
,
4330 .ndo_setup_tc
= mlx5e_setup_tc
,
4331 .ndo_select_queue
= mlx5e_select_queue
,
4332 .ndo_get_stats64
= mlx5e_get_stats
,
4333 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
4334 .ndo_set_mac_address
= mlx5e_set_mac
,
4335 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
4336 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
4337 .ndo_set_features
= mlx5e_set_features
,
4338 .ndo_fix_features
= mlx5e_fix_features
,
4339 .ndo_change_mtu
= mlx5e_change_nic_mtu
,
4340 .ndo_do_ioctl
= mlx5e_ioctl
,
4341 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
4342 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
4343 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
4344 .ndo_features_check
= mlx5e_features_check
,
4345 .ndo_tx_timeout
= mlx5e_tx_timeout
,
4346 .ndo_bpf
= mlx5e_xdp
,
4347 .ndo_xdp_xmit
= mlx5e_xdp_xmit
,
4348 #ifdef CONFIG_MLX5_EN_ARFS
4349 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
4351 #ifdef CONFIG_MLX5_ESWITCH
4352 /* SRIOV E-Switch NDOs */
4353 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
4354 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
4355 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
4356 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
4357 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
4358 .ndo_get_vf_config
= mlx5e_get_vf_config
,
4359 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
4360 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
4361 .ndo_has_offload_stats
= mlx5e_has_offload_stats
,
4362 .ndo_get_offload_stats
= mlx5e_get_offload_stats
,
4366 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
4368 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
4370 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
4371 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
4372 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
4373 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
4374 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
4375 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
4376 MLX5_CAP_FLOWTABLE(mdev
,
4377 flow_table_properties_nic_receive
.max_ft_level
)
4379 mlx5_core_warn(mdev
,
4380 "Not creating net device, some required device capabilities are missing\n");
4383 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
4384 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
4385 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
4386 mlx5_core_warn(mdev
, "CQ moderation is not supported\n");
4391 void mlx5e_build_default_indir_rqt(u32
*indirection_rqt
, int len
,
4396 for (i
= 0; i
< len
; i
++)
4397 indirection_rqt
[i
] = i
% num_channels
;
4400 static bool slow_pci_heuristic(struct mlx5_core_dev
*mdev
)
4405 mlx5e_port_max_linkspeed(mdev
, &link_speed
);
4406 pci_bw
= pcie_bandwidth_available(mdev
->pdev
, NULL
, NULL
, NULL
);
4407 mlx5_core_dbg_once(mdev
, "Max link speed = %d, PCI BW = %d\n",
4408 link_speed
, pci_bw
);
4410 #define MLX5E_SLOW_PCI_RATIO (2)
4412 return link_speed
&& pci_bw
&&
4413 link_speed
> MLX5E_SLOW_PCI_RATIO
* pci_bw
;
4416 static struct net_dim_cq_moder
mlx5e_get_def_tx_moderation(u8 cq_period_mode
)
4418 struct net_dim_cq_moder moder
;
4420 moder
.cq_period_mode
= cq_period_mode
;
4421 moder
.pkts
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
4422 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
4423 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4424 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE
;
4429 static struct net_dim_cq_moder
mlx5e_get_def_rx_moderation(u8 cq_period_mode
)
4431 struct net_dim_cq_moder moder
;
4433 moder
.cq_period_mode
= cq_period_mode
;
4434 moder
.pkts
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
4435 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
4436 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4437 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
4442 static u8
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode
)
4444 return cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
?
4445 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE
:
4446 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
4449 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4451 if (params
->tx_dim_enabled
) {
4452 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
4454 params
->tx_cq_moderation
= net_dim_get_def_tx_moderation(dim_period_mode
);
4456 params
->tx_cq_moderation
= mlx5e_get_def_tx_moderation(cq_period_mode
);
4459 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_TX_CQE_BASED_MODER
,
4460 params
->tx_cq_moderation
.cq_period_mode
==
4461 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4464 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4466 if (params
->rx_dim_enabled
) {
4467 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
4469 params
->rx_cq_moderation
= net_dim_get_def_rx_moderation(dim_period_mode
);
4471 params
->rx_cq_moderation
= mlx5e_get_def_rx_moderation(cq_period_mode
);
4474 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
4475 params
->rx_cq_moderation
.cq_period_mode
==
4476 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4479 static u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
)
4483 /* The supported periods are organized in ascending order */
4484 for (i
= 0; i
< MLX5E_LRO_TIMEOUT_ARR_SIZE
- 1; i
++)
4485 if (MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]) >= wanted_timeout
)
4488 return MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]);
4491 void mlx5e_build_rq_params(struct mlx5_core_dev
*mdev
,
4492 struct mlx5e_params
*params
)
4494 /* Prefer Striding RQ, unless any of the following holds:
4495 * - Striding RQ configuration is not possible/supported.
4496 * - Slow PCI heuristic.
4497 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
4499 if (!slow_pci_heuristic(mdev
) &&
4500 mlx5e_striding_rq_possible(mdev
, params
) &&
4501 (mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
) ||
4502 !mlx5e_rx_is_linear_skb(mdev
, params
)))
4503 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
, true);
4504 mlx5e_set_rq_type(mdev
, params
);
4505 mlx5e_init_rq_type_params(mdev
, params
);
4508 void mlx5e_build_rss_params(struct mlx5e_params
*params
)
4510 params
->rss_hfunc
= ETH_RSS_HASH_XOR
;
4511 netdev_rss_key_fill(params
->toeplitz_hash_key
, sizeof(params
->toeplitz_hash_key
));
4512 mlx5e_build_default_indir_rqt(params
->indirection_rqt
,
4513 MLX5E_INDIR_RQT_SIZE
, params
->num_channels
);
4516 void mlx5e_build_nic_params(struct mlx5_core_dev
*mdev
,
4517 struct mlx5e_params
*params
,
4518 u16 max_channels
, u16 mtu
)
4520 u8 rx_cq_period_mode
;
4522 params
->sw_mtu
= mtu
;
4523 params
->hard_mtu
= MLX5E_ETH_HARD_MTU
;
4524 params
->num_channels
= max_channels
;
4528 params
->log_sq_size
= is_kdump_kernel() ?
4529 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
:
4530 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
4532 /* set CQE compression */
4533 params
->rx_cqe_compress_def
= false;
4534 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
4535 MLX5_CAP_GEN(mdev
, vport_group_manager
))
4536 params
->rx_cqe_compress_def
= slow_pci_heuristic(mdev
);
4538 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
, params
->rx_cqe_compress_def
);
4539 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
, false);
4542 mlx5e_build_rq_params(mdev
, params
);
4546 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4547 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
4548 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
))
4549 params
->lro_en
= !slow_pci_heuristic(mdev
);
4550 params
->lro_timeout
= mlx5e_choose_lro_timeout(mdev
, MLX5E_DEFAULT_LRO_TIMEOUT
);
4552 /* CQ moderation params */
4553 rx_cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
4554 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
4555 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
4556 params
->rx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
4557 params
->tx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
4558 mlx5e_set_rx_cq_mode_params(params
, rx_cq_period_mode
);
4559 mlx5e_set_tx_cq_mode_params(params
, MLX5_CQ_PERIOD_MODE_START_FROM_EQE
);
4562 params
->tx_min_inline_mode
= mlx5e_params_calculate_tx_min_inline(mdev
);
4565 mlx5e_build_rss_params(params
);
4568 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
4570 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4572 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
4573 if (is_zero_ether_addr(netdev
->dev_addr
) &&
4574 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
4575 eth_hw_addr_random(netdev
);
4576 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
4580 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4581 static const struct switchdev_ops mlx5e_switchdev_ops
= {
4582 .switchdev_port_attr_get
= mlx5e_attr_get
,
4586 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
4588 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4589 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4593 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
4595 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
4597 #ifdef CONFIG_MLX5_CORE_EN_DCB
4598 if (MLX5_CAP_GEN(mdev
, vport_group_manager
) && MLX5_CAP_GEN(mdev
, qos
))
4599 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
4602 netdev
->watchdog_timeo
= 15 * HZ
;
4604 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
4606 netdev
->vlan_features
|= NETIF_F_SG
;
4607 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
4608 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
4609 netdev
->vlan_features
|= NETIF_F_GRO
;
4610 netdev
->vlan_features
|= NETIF_F_TSO
;
4611 netdev
->vlan_features
|= NETIF_F_TSO6
;
4612 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
4613 netdev
->vlan_features
|= NETIF_F_RXHASH
;
4615 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
4616 netdev
->hw_enc_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
4618 if (!!MLX5_CAP_ETH(mdev
, lro_cap
) &&
4619 mlx5e_check_fragmented_striding_rq_cap(mdev
))
4620 netdev
->vlan_features
|= NETIF_F_LRO
;
4622 netdev
->hw_features
= netdev
->vlan_features
;
4623 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
4624 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
4625 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
4626 netdev
->hw_features
|= NETIF_F_HW_VLAN_STAG_TX
;
4628 if (mlx5_vxlan_allowed(mdev
->vxlan
) || MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
)) {
4629 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
;
4630 netdev
->hw_enc_features
|= NETIF_F_IPV6_CSUM
;
4631 netdev
->hw_enc_features
|= NETIF_F_TSO
;
4632 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
4633 netdev
->hw_enc_features
|= NETIF_F_GSO_PARTIAL
;
4636 if (mlx5_vxlan_allowed(mdev
->vxlan
)) {
4637 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4638 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4639 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4640 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4641 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4644 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
)) {
4645 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
4646 NETIF_F_GSO_GRE_CSUM
;
4647 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE
|
4648 NETIF_F_GSO_GRE_CSUM
;
4649 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE
|
4650 NETIF_F_GSO_GRE_CSUM
;
4653 netdev
->hw_features
|= NETIF_F_GSO_PARTIAL
;
4654 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_L4
;
4655 netdev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
4656 netdev
->features
|= NETIF_F_GSO_UDP_L4
;
4658 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
4661 netdev
->hw_features
|= NETIF_F_RXALL
;
4663 if (MLX5_CAP_ETH(mdev
, scatter_fcs
))
4664 netdev
->hw_features
|= NETIF_F_RXFCS
;
4666 netdev
->features
= netdev
->hw_features
;
4667 if (!priv
->channels
.params
.lro_en
)
4668 netdev
->features
&= ~NETIF_F_LRO
;
4671 netdev
->features
&= ~NETIF_F_RXALL
;
4673 if (!priv
->channels
.params
.scatter_fcs_en
)
4674 netdev
->features
&= ~NETIF_F_RXFCS
;
4676 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4677 if (FT_CAP(flow_modify_en
) &&
4678 FT_CAP(modify_root
) &&
4679 FT_CAP(identified_miss_table_mode
) &&
4680 FT_CAP(flow_table_modify
)) {
4681 netdev
->hw_features
|= NETIF_F_HW_TC
;
4682 #ifdef CONFIG_MLX5_EN_ARFS
4683 netdev
->hw_features
|= NETIF_F_NTUPLE
;
4687 netdev
->features
|= NETIF_F_HIGHDMA
;
4688 netdev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
4690 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4692 mlx5e_set_netdev_dev_addr(netdev
);
4694 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4695 if (MLX5_ESWITCH_MANAGER(mdev
))
4696 netdev
->switchdev_ops
= &mlx5e_switchdev_ops
;
4699 mlx5e_ipsec_build_netdev(priv
);
4700 mlx5e_tls_build_netdev(priv
);
4703 void mlx5e_create_q_counters(struct mlx5e_priv
*priv
)
4705 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4708 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
4710 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
4711 priv
->q_counter
= 0;
4714 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->drop_rq_q_counter
);
4716 mlx5_core_warn(mdev
, "alloc drop RQ counter failed, %d\n", err
);
4717 priv
->drop_rq_q_counter
= 0;
4721 void mlx5e_destroy_q_counters(struct mlx5e_priv
*priv
)
4723 if (priv
->q_counter
)
4724 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
4726 if (priv
->drop_rq_q_counter
)
4727 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->drop_rq_q_counter
);
4730 static int mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
4731 struct net_device
*netdev
,
4732 const struct mlx5e_profile
*profile
,
4735 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4738 err
= mlx5e_netdev_init(netdev
, priv
, mdev
, profile
, ppriv
);
4742 mlx5e_build_nic_params(mdev
, &priv
->channels
.params
,
4743 mlx5e_get_netdev_max_channels(netdev
), netdev
->mtu
);
4745 mlx5e_timestamp_init(priv
);
4747 err
= mlx5e_ipsec_init(priv
);
4749 mlx5_core_err(mdev
, "IPSec initialization failed, %d\n", err
);
4750 err
= mlx5e_tls_init(priv
);
4752 mlx5_core_err(mdev
, "TLS initialization failed, %d\n", err
);
4753 mlx5e_build_nic_netdev(netdev
);
4754 mlx5e_build_tc2txq_maps(priv
);
4759 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
4761 mlx5e_tls_cleanup(priv
);
4762 mlx5e_ipsec_cleanup(priv
);
4763 mlx5e_netdev_cleanup(priv
->netdev
, priv
);
4766 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
4768 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4771 mlx5e_create_q_counters(priv
);
4773 err
= mlx5e_open_drop_rq(priv
, &priv
->drop_rq
);
4775 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
4776 goto err_destroy_q_counters
;
4779 err
= mlx5e_create_indirect_rqt(priv
);
4781 goto err_close_drop_rq
;
4783 err
= mlx5e_create_direct_rqts(priv
);
4785 goto err_destroy_indirect_rqts
;
4787 err
= mlx5e_create_indirect_tirs(priv
, true);
4789 goto err_destroy_direct_rqts
;
4791 err
= mlx5e_create_direct_tirs(priv
);
4793 goto err_destroy_indirect_tirs
;
4795 err
= mlx5e_create_flow_steering(priv
);
4797 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
4798 goto err_destroy_direct_tirs
;
4801 err
= mlx5e_tc_nic_init(priv
);
4803 goto err_destroy_flow_steering
;
4807 err_destroy_flow_steering
:
4808 mlx5e_destroy_flow_steering(priv
);
4809 err_destroy_direct_tirs
:
4810 mlx5e_destroy_direct_tirs(priv
);
4811 err_destroy_indirect_tirs
:
4812 mlx5e_destroy_indirect_tirs(priv
, true);
4813 err_destroy_direct_rqts
:
4814 mlx5e_destroy_direct_rqts(priv
);
4815 err_destroy_indirect_rqts
:
4816 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4818 mlx5e_close_drop_rq(&priv
->drop_rq
);
4819 err_destroy_q_counters
:
4820 mlx5e_destroy_q_counters(priv
);
4824 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
4826 mlx5e_tc_nic_cleanup(priv
);
4827 mlx5e_destroy_flow_steering(priv
);
4828 mlx5e_destroy_direct_tirs(priv
);
4829 mlx5e_destroy_indirect_tirs(priv
, true);
4830 mlx5e_destroy_direct_rqts(priv
);
4831 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4832 mlx5e_close_drop_rq(&priv
->drop_rq
);
4833 mlx5e_destroy_q_counters(priv
);
4836 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
4840 err
= mlx5e_create_tises(priv
);
4842 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
4846 #ifdef CONFIG_MLX5_CORE_EN_DCB
4847 mlx5e_dcbnl_initialize(priv
);
4852 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
4854 struct net_device
*netdev
= priv
->netdev
;
4855 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4858 mlx5e_init_l2_addr(priv
);
4860 /* Marking the link as currently not needed by the Driver */
4861 if (!netif_running(netdev
))
4862 mlx5_set_port_admin_status(mdev
, MLX5_PORT_DOWN
);
4864 /* MTU range: 68 - hw-specific max */
4865 netdev
->min_mtu
= ETH_MIN_MTU
;
4866 mlx5_query_port_max_mtu(priv
->mdev
, &max_mtu
, 1);
4867 netdev
->max_mtu
= MLX5E_HW2SW_MTU(&priv
->channels
.params
, max_mtu
);
4868 mlx5e_set_dev_port_mtu(priv
);
4870 mlx5_lag_add(mdev
, netdev
);
4872 mlx5e_enable_async_events(priv
);
4874 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
4875 mlx5e_register_vport_reps(priv
);
4877 if (netdev
->reg_state
!= NETREG_REGISTERED
)
4879 #ifdef CONFIG_MLX5_CORE_EN_DCB
4880 mlx5e_dcbnl_init_app(priv
);
4883 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4886 if (netif_running(netdev
))
4888 netif_device_attach(netdev
);
4892 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
4894 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4896 #ifdef CONFIG_MLX5_CORE_EN_DCB
4897 if (priv
->netdev
->reg_state
== NETREG_REGISTERED
)
4898 mlx5e_dcbnl_delete_app(priv
);
4902 if (netif_running(priv
->netdev
))
4903 mlx5e_close(priv
->netdev
);
4904 netif_device_detach(priv
->netdev
);
4907 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4909 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
4910 mlx5e_unregister_vport_reps(priv
);
4912 mlx5e_disable_async_events(priv
);
4913 mlx5_lag_remove(mdev
);
4916 static const struct mlx5e_profile mlx5e_nic_profile
= {
4917 .init
= mlx5e_nic_init
,
4918 .cleanup
= mlx5e_nic_cleanup
,
4919 .init_rx
= mlx5e_init_nic_rx
,
4920 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
4921 .init_tx
= mlx5e_init_nic_tx
,
4922 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
4923 .enable
= mlx5e_nic_enable
,
4924 .disable
= mlx5e_nic_disable
,
4925 .update_stats
= mlx5e_update_ndo_stats
,
4926 .update_carrier
= mlx5e_update_carrier
,
4927 .rx_handlers
.handle_rx_cqe
= mlx5e_handle_rx_cqe
,
4928 .rx_handlers
.handle_rx_cqe_mpwqe
= mlx5e_handle_rx_cqe_mpwrq
,
4929 .max_tc
= MLX5E_MAX_NUM_TC
,
4932 /* mlx5e generic netdev management API (move to en_common.c) */
4934 /* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
4935 int mlx5e_netdev_init(struct net_device
*netdev
,
4936 struct mlx5e_priv
*priv
,
4937 struct mlx5_core_dev
*mdev
,
4938 const struct mlx5e_profile
*profile
,
4943 priv
->netdev
= netdev
;
4944 priv
->profile
= profile
;
4945 priv
->ppriv
= ppriv
;
4946 priv
->msglevel
= MLX5E_MSG_LEVEL
;
4947 priv
->max_opened_tc
= 1;
4949 mutex_init(&priv
->state_lock
);
4950 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
4951 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
4952 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
4953 INIT_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
4955 priv
->wq
= create_singlethread_workqueue("mlx5e");
4960 netif_carrier_off(netdev
);
4962 #ifdef CONFIG_MLX5_EN_ARFS
4963 netdev
->rx_cpu_rmap
= mdev
->rmap
;
4969 void mlx5e_netdev_cleanup(struct net_device
*netdev
, struct mlx5e_priv
*priv
)
4971 destroy_workqueue(priv
->wq
);
4974 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
4975 const struct mlx5e_profile
*profile
,
4979 struct net_device
*netdev
;
4982 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
4983 nch
* profile
->max_tc
,
4986 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
4990 err
= profile
->init(mdev
, netdev
, profile
, ppriv
);
4992 mlx5_core_err(mdev
, "failed to init mlx5e profile %d\n", err
);
4993 goto err_free_netdev
;
4999 free_netdev(netdev
);
5004 int mlx5e_attach_netdev(struct mlx5e_priv
*priv
)
5006 const struct mlx5e_profile
*profile
;
5009 profile
= priv
->profile
;
5010 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5012 err
= profile
->init_tx(priv
);
5016 err
= profile
->init_rx(priv
);
5018 goto err_cleanup_tx
;
5020 if (profile
->enable
)
5021 profile
->enable(priv
);
5026 profile
->cleanup_tx(priv
);
5032 void mlx5e_detach_netdev(struct mlx5e_priv
*priv
)
5034 const struct mlx5e_profile
*profile
= priv
->profile
;
5036 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
5038 if (profile
->disable
)
5039 profile
->disable(priv
);
5040 flush_workqueue(priv
->wq
);
5042 profile
->cleanup_rx(priv
);
5043 profile
->cleanup_tx(priv
);
5044 cancel_work_sync(&priv
->update_stats_work
);
5047 void mlx5e_destroy_netdev(struct mlx5e_priv
*priv
)
5049 const struct mlx5e_profile
*profile
= priv
->profile
;
5050 struct net_device
*netdev
= priv
->netdev
;
5052 if (profile
->cleanup
)
5053 profile
->cleanup(priv
);
5054 free_netdev(netdev
);
5057 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
5058 * hardware contexts and to connect it to the current netdev.
5060 static int mlx5e_attach(struct mlx5_core_dev
*mdev
, void *vpriv
)
5062 struct mlx5e_priv
*priv
= vpriv
;
5063 struct net_device
*netdev
= priv
->netdev
;
5066 if (netif_device_present(netdev
))
5069 err
= mlx5e_create_mdev_resources(mdev
);
5073 err
= mlx5e_attach_netdev(priv
);
5075 mlx5e_destroy_mdev_resources(mdev
);
5082 static void mlx5e_detach(struct mlx5_core_dev
*mdev
, void *vpriv
)
5084 struct mlx5e_priv
*priv
= vpriv
;
5085 struct net_device
*netdev
= priv
->netdev
;
5087 if (!netif_device_present(netdev
))
5090 mlx5e_detach_netdev(priv
);
5091 mlx5e_destroy_mdev_resources(mdev
);
5094 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
5096 struct net_device
*netdev
;
5102 err
= mlx5e_check_required_hca_cap(mdev
);
5106 #ifdef CONFIG_MLX5_ESWITCH
5107 if (MLX5_ESWITCH_MANAGER(mdev
)) {
5108 rpriv
= mlx5e_alloc_nic_rep_priv(mdev
);
5110 mlx5_core_warn(mdev
, "Failed to alloc NIC rep priv data\n");
5116 nch
= mlx5e_get_max_num_channels(mdev
);
5117 netdev
= mlx5e_create_netdev(mdev
, &mlx5e_nic_profile
, nch
, rpriv
);
5119 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
5120 goto err_free_rpriv
;
5123 priv
= netdev_priv(netdev
);
5125 err
= mlx5e_attach(mdev
, priv
);
5127 mlx5_core_err(mdev
, "mlx5e_attach failed, %d\n", err
);
5128 goto err_destroy_netdev
;
5131 err
= register_netdev(netdev
);
5133 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
5137 #ifdef CONFIG_MLX5_CORE_EN_DCB
5138 mlx5e_dcbnl_init_app(priv
);
5143 mlx5e_detach(mdev
, priv
);
5145 mlx5e_destroy_netdev(priv
);
5151 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
5153 struct mlx5e_priv
*priv
= vpriv
;
5154 void *ppriv
= priv
->ppriv
;
5156 #ifdef CONFIG_MLX5_CORE_EN_DCB
5157 mlx5e_dcbnl_delete_app(priv
);
5159 unregister_netdev(priv
->netdev
);
5160 mlx5e_detach(mdev
, vpriv
);
5161 mlx5e_destroy_netdev(priv
);
5165 static void *mlx5e_get_netdev(void *vpriv
)
5167 struct mlx5e_priv
*priv
= vpriv
;
5169 return priv
->netdev
;
5172 static struct mlx5_interface mlx5e_interface
= {
5174 .remove
= mlx5e_remove
,
5175 .attach
= mlx5e_attach
,
5176 .detach
= mlx5e_detach
,
5177 .event
= mlx5e_async_event
,
5178 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
5179 .get_dev
= mlx5e_get_netdev
,
5182 void mlx5e_init(void)
5184 mlx5e_ipsec_build_inverse_table();
5185 mlx5e_build_ptys2ethtool_map();
5186 mlx5_register_interface(&mlx5e_interface
);
5189 void mlx5e_cleanup(void)
5191 mlx5_unregister_interface(&mlx5e_interface
);