]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en.h
net/dim: Support adaptive TX moderation
[thirdparty/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en.h
CommitLineData
f62b8bb8 1/*
1afff42c 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
1afff42c
MF
32#ifndef __MLX5_EN_H__
33#define __MLX5_EN_H__
f62b8bb8
AV
34
35#include <linux/if_vlan.h>
36#include <linux/etherdevice.h>
ef9814de
EBE
37#include <linux/timecounter.h>
38#include <linux/net_tstamp.h>
3d8c38af 39#include <linux/ptp_clock_kernel.h>
48935bbb 40#include <linux/crash_dump.h>
f62b8bb8
AV
41#include <linux/mlx5/driver.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/cq.h>
ada68c31 44#include <linux/mlx5/port.h>
d18a9470 45#include <linux/mlx5/vport.h>
8d7f9ecb 46#include <linux/mlx5/transobj.h>
1ae1df3a 47#include <linux/mlx5/fs.h>
e8f887ac 48#include <linux/rhashtable.h>
cb67b832 49#include <net/switchdev.h>
0ddf5432 50#include <net/xdp.h>
4c4dbb4a 51#include <linux/net_dim.h>
f62b8bb8 52#include "wq.h"
f62b8bb8 53#include "mlx5_core.h"
9218b44d 54#include "en_stats.h"
f62b8bb8 55
60bbf7ee
JDB
56struct page_pool;
57
1cabe6b0
MG
58#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
59
c139dbfd
ES
60#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
61
472a1e44
TT
62#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
63#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
d8bec2b2 64
2a5e7a13 65#define MLX5E_MAX_DSCP 64
f62b8bb8
AV
66#define MLX5E_MAX_NUM_TC 8
67
1bfecfca 68#define MLX5_RX_HEADROOM NET_SKB_PAD
78aedd32
TT
69#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
70 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
1bfecfca 71
f32f5bd2
DJ
72#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
73 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
74#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
75 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
76#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
77#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
696a97cf
EE
78#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
79 (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
80 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
f32f5bd2 81
7e426671 82#define MLX5_MPWRQ_LOG_WQE_SZ 18
461017cb
TT
83#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
84 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
85#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
fe4c988b
SM
86
87#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
73281b78 88#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
b8a98a4c 89#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
73281b78
TT
90#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
91#define MLX5E_MAX_RQ_NUM_MTTS \
92 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
93#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
94#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
95 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
96#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
97 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
98 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
99
100#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
101#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
102#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
103
104#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
105#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
106#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
107 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
108
109#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
fe4c988b 110
cbad8cdd 111#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
461017cb 112
d9a40271 113#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
2b029556
SM
114#define MLX5E_DEFAULT_LRO_TIMEOUT 32
115#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
116
f62b8bb8 117#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
9908aa29 118#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
f62b8bb8
AV
119#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
120#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
0088cbbc 121#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
f62b8bb8
AV
122#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
123#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
461017cb 124#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
f62b8bb8 125
936896e9
AS
126#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
127#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
b4e029da 128#define MLX5E_MIN_NUM_CHANNELS 0x1
936896e9 129#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
507f0c81 130#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
f62b8bb8
AV
131#define MLX5E_TX_CQ_POLL_BUDGET 128
132#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
db75373c 133#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
f62b8bb8 134
ea3886ca
TT
135#define MLX5E_UMR_WQE_INLINE_SZ \
136 (sizeof(struct mlx5e_umr_wqe) + \
137 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
138 MLX5_UMR_MTT_ALIGNMENT))
139#define MLX5E_UMR_WQEBBS \
140 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
141#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
f10b7cc7 142
b5503b99 143#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
b5503b99 144#define MLX5E_XDP_TX_DS_COUNT \
b70149dd 145 ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
b5503b99 146
86d722ad 147#define MLX5E_NUM_MAIN_GROUPS 9
2f48af12 148
79c48764
GP
149#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
150
151#define mlx5e_dbg(mlevel, priv, format, ...) \
152do { \
153 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
154 netdev_warn(priv->netdev, format, \
155 ##__VA_ARGS__); \
156} while (0)
157
158
461017cb
TT
159static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
160{
161 switch (wq_type) {
162 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
163 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
164 wq_size / 2);
165 default:
166 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
167 wq_size / 2);
168 }
169}
170
48935bbb
SM
171static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
172{
173 return is_kdump_kernel() ?
174 MLX5E_MIN_NUM_CHANNELS :
175 min_t(int, mdev->priv.eq_table.num_comp_vectors,
176 MLX5E_MAX_NUM_CHANNELS);
177}
178
2f48af12
TT
179struct mlx5e_tx_wqe {
180 struct mlx5_wqe_ctrl_seg ctrl;
181 struct mlx5_wqe_eth_seg eth;
182};
183
184struct mlx5e_rx_wqe {
185 struct mlx5_wqe_srq_next_seg next;
186 struct mlx5_wqe_data_seg data;
187};
86d722ad 188
bc77b240
TT
189struct mlx5e_umr_wqe {
190 struct mlx5_wqe_ctrl_seg ctrl;
191 struct mlx5_wqe_umr_ctrl_seg uctrl;
192 struct mlx5_mkey_seg mkc;
ea3886ca 193 struct mlx5_mtt inline_mtts[0];
bc77b240
TT
194};
195
d605d668
KH
196extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
197
4e59e288 198static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
9908aa29 199 "rx_cqe_moder",
0088cbbc 200 "tx_cqe_moder",
9bcc8606 201 "rx_cqe_compress",
2ccb0a79 202 "rx_striding_rq",
4e59e288
GP
203};
204
205enum mlx5e_priv_flag {
9908aa29 206 MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
0088cbbc
TG
207 MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
208 MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
2ccb0a79 209 MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
4e59e288
GP
210};
211
6a9764ef 212#define MLX5E_SET_PFLAG(params, pflag, enable) \
59ece1c9
SD
213 do { \
214 if (enable) \
6a9764ef 215 (params)->pflags |= (pflag); \
59ece1c9 216 else \
6a9764ef 217 (params)->pflags &= ~(pflag); \
4e59e288
GP
218 } while (0)
219
6a9764ef 220#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
59ece1c9 221
08fb1dac
SM
222#ifdef CONFIG_MLX5_CORE_EN_DCB
223#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
08fb1dac
SM
224#endif
225
f62b8bb8
AV
226struct mlx5e_params {
227 u8 log_sq_size;
461017cb 228 u8 rq_wq_type;
73281b78 229 u8 log_rq_mtu_frames;
f62b8bb8 230 u16 num_channels;
f62b8bb8 231 u8 num_tc;
9bcc8606 232 bool rx_cqe_compress_def;
9a317425
AG
233 struct net_dim_cq_moder rx_cq_moderation;
234 struct net_dim_cq_moder tx_cq_moderation;
f62b8bb8
AV
235 bool lro_en;
236 u32 lro_wqe_sz;
cff92d7c 237 u8 tx_min_inline_mode;
2d75b2bc
AS
238 u8 rss_hfunc;
239 u8 toeplitz_hash_key[40];
240 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
36350114 241 bool vlan_strip_disable;
102722fc 242 bool scatter_fcs_en;
9a317425 243 bool rx_dim_enabled;
2b029556 244 u32 lro_timeout;
59ece1c9 245 u32 pflags;
6a9764ef 246 struct bpf_prog *xdp_prog;
472a1e44
TT
247 unsigned int sw_mtu;
248 int hard_mtu;
f62b8bb8
AV
249};
250
3a6a931d
HN
251#ifdef CONFIG_MLX5_CORE_EN_DCB
252struct mlx5e_cee_config {
253 /* bw pct for priority group */
254 u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
255 u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
256 bool pfc_setting[CEE_DCBX_MAX_PRIO];
257 bool pfc_enable;
258};
259
260enum {
261 MLX5_DCB_CHG_RESET,
262 MLX5_DCB_NO_CHG,
263 MLX5_DCB_CHG_NO_RESET,
264};
265
266struct mlx5e_dcbx {
e207b7e9 267 enum mlx5_dcbx_oper_mode mode;
3a6a931d 268 struct mlx5e_cee_config cee_cfg; /* pending configuration */
2a5e7a13 269 u8 dscp_app_cnt;
820c2c5e
HN
270
271 /* The only setting that cannot be read from FW */
272 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
9e10bf1d 273 u8 cap;
3a6a931d 274};
2a5e7a13
HN
275
276struct mlx5e_dcbx_dp {
277 u8 dscp2prio[MLX5E_MAX_DSCP];
278 u8 trust_state;
279};
3a6a931d
HN
280#endif
281
f62b8bb8 282enum {
c0f1147d 283 MLX5E_RQ_STATE_ENABLED,
cb3c7fd4 284 MLX5E_RQ_STATE_AM,
f62b8bb8
AV
285};
286
a1eaba4c
TT
287#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr))
288
f62b8bb8
AV
289struct mlx5e_cq {
290 /* data path - accessed per cqe */
291 struct mlx5_cqwq wq;
f62b8bb8
AV
292
293 /* data path - accessed per napi poll */
cb3c7fd4 294 u16 event_ctr;
f62b8bb8
AV
295 struct napi_struct *napi;
296 struct mlx5_core_cq mcq;
297 struct mlx5e_channel *channel;
298
7219ab34
TT
299 /* cqe decompression */
300 struct mlx5_cqe64 title;
301 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
302 u8 mini_arr_idx;
303 u16 decmprs_left;
304 u16 decmprs_wqe_counter;
305
f62b8bb8 306 /* control */
a43b25da 307 struct mlx5_core_dev *mdev;
1c1b5228 308 struct mlx5_frag_wq_ctrl wq_ctrl;
f62b8bb8
AV
309} ____cacheline_aligned_in_smp;
310
eba2db2b 311struct mlx5e_tx_wqe_info {
77bdf895 312 struct sk_buff *skb;
eba2db2b
SM
313 u32 num_bytes;
314 u8 num_wqebbs;
315 u8 num_dma;
316};
317
318enum mlx5e_dma_map_type {
319 MLX5E_DMA_MAP_SINGLE,
320 MLX5E_DMA_MAP_PAGE
321};
322
323struct mlx5e_sq_dma {
324 dma_addr_t addr;
325 u32 size;
326 enum mlx5e_dma_map_type type;
327};
328
329enum {
330 MLX5E_SQ_STATE_ENABLED,
db75373c 331 MLX5E_SQ_STATE_RECOVERING,
2ac9cfe7 332 MLX5E_SQ_STATE_IPSEC,
eba2db2b
SM
333};
334
335struct mlx5e_sq_wqe_info {
336 u8 opcode;
eba2db2b 337};
2f48af12 338
31391048 339struct mlx5e_txqsq {
eba2db2b
SM
340 /* data path */
341
342 /* dirtied @completion */
343 u16 cc;
344 u32 dma_fifo_cc;
345
346 /* dirtied @xmit */
347 u16 pc ____cacheline_aligned_in_smp;
348 u32 dma_fifo_pc;
349 struct mlx5e_sq_stats stats;
350
351 struct mlx5e_cq cq;
352
31391048
SM
353 /* write@xmit, read@completion */
354 struct {
31391048
SM
355 struct mlx5e_sq_dma *dma_fifo;
356 struct mlx5e_tx_wqe_info *wqe_info;
eba2db2b
SM
357 } db;
358
359 /* read only */
360 struct mlx5_wq_cyc wq;
361 u32 dma_fifo_mask;
362 void __iomem *uar_map;
363 struct netdev_queue *txq;
364 u32 sqn;
eba2db2b
SM
365 u8 min_inline_mode;
366 u16 edge;
367 struct device *pdev;
eba2db2b
SM
368 __be32 mkey_be;
369 unsigned long state;
7c39afb3
FD
370 struct hwtstamp_config *tstamp;
371 struct mlx5_clock *clock;
eba2db2b
SM
372
373 /* control path */
374 struct mlx5_wq_ctrl wq_ctrl;
375 struct mlx5e_channel *channel;
acc6c595 376 int txq_ix;
eba2db2b 377 u32 rate_limit;
db75373c
EBE
378 struct mlx5e_txqsq_recover {
379 struct work_struct recover_work;
380 u64 last_recover;
381 } recover;
31391048
SM
382} ____cacheline_aligned_in_smp;
383
384struct mlx5e_xdpsq {
385 /* data path */
386
387 /* dirtied @rx completion */
388 u16 cc;
389 u16 pc;
390
391 struct mlx5e_cq cq;
392
393 /* write@xmit, read@completion */
394 struct {
395 struct mlx5e_dma_info *di;
396 bool doorbell;
5168d732 397 bool redirect_flush;
31391048
SM
398 } db;
399
400 /* read only */
401 struct mlx5_wq_cyc wq;
402 void __iomem *uar_map;
403 u32 sqn;
404 struct device *pdev;
405 __be32 mkey_be;
406 u8 min_inline_mode;
407 unsigned long state;
408
409 /* control path */
410 struct mlx5_wq_ctrl wq_ctrl;
411 struct mlx5e_channel *channel;
412} ____cacheline_aligned_in_smp;
413
414struct mlx5e_icosq {
415 /* data path */
416
31391048
SM
417 /* dirtied @xmit */
418 u16 pc ____cacheline_aligned_in_smp;
31391048
SM
419
420 struct mlx5e_cq cq;
421
422 /* write@xmit, read@completion */
423 struct {
424 struct mlx5e_sq_wqe_info *ico_wqe;
425 } db;
426
427 /* read only */
428 struct mlx5_wq_cyc wq;
429 void __iomem *uar_map;
430 u32 sqn;
431 u16 edge;
31391048
SM
432 unsigned long state;
433
434 /* control path */
435 struct mlx5_wq_ctrl wq_ctrl;
436 struct mlx5e_channel *channel;
eba2db2b
SM
437} ____cacheline_aligned_in_smp;
438
864b2d71
SM
439static inline bool
440mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
eba2db2b 441{
864b2d71 442 return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
eba2db2b 443}
6cd392a0 444
461017cb
TT
445struct mlx5e_dma_info {
446 struct page *page;
447 dma_addr_t addr;
448};
449
accd5883
TT
450struct mlx5e_wqe_frag_info {
451 struct mlx5e_dma_info di;
452 u32 offset;
453};
454
eba2db2b 455struct mlx5e_umr_dma_info {
eba2db2b 456 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
eba2db2b
SM
457};
458
459struct mlx5e_mpw_info {
460 struct mlx5e_umr_dma_info umr;
461 u16 consumed_strides;
22f45398 462 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
eba2db2b
SM
463};
464
4415a031
TT
465/* a single cache unit is capable to serve one napi call (for non-striding rq)
466 * or a MPWQE (for striding rq).
467 */
468#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
469 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
29c2849e 470#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
4415a031
TT
471struct mlx5e_page_cache {
472 u32 head;
473 u32 tail;
474 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
475};
476
eba2db2b
SM
477struct mlx5e_rq;
478typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
619a8f2a
TT
479typedef struct sk_buff *
480(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
481 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
7cc6d77b 482typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
eba2db2b
SM
483typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
484
121e8927
TT
485enum mlx5e_rq_flag {
486 MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
487};
488
f62b8bb8
AV
489struct mlx5e_rq {
490 /* data path */
491 struct mlx5_wq_ll wq;
1bfecfca 492
21c59685 493 union {
accd5883
TT
494 struct {
495 struct mlx5e_wqe_frag_info *frag_info;
496 u32 frag_sz; /* max possible skb frag_sz */
b45d8b50
TT
497 union {
498 bool page_reuse;
b45d8b50 499 };
accd5883 500 } wqe;
21c59685 501 struct {
b8a98a4c 502 struct mlx5e_umr_wqe umr_wqe;
21c59685 503 struct mlx5e_mpw_info *info;
619a8f2a 504 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
b45d8b50 505 u16 num_strides;
89e89f7a 506 u8 log_stride_sz;
a071cb9f 507 bool umr_in_progress;
21c59685
SM
508 } mpwqe;
509 };
1bfecfca 510 struct {
b45d8b50 511 u16 headroom;
1bfecfca 512 u8 page_order;
b5503b99 513 u8 map_dir; /* dma map direction */
1bfecfca 514 } buff;
f62b8bb8 515
7cc6d77b 516 struct mlx5e_channel *channel;
f62b8bb8
AV
517 struct device *pdev;
518 struct net_device *netdev;
519 struct mlx5e_rq_stats stats;
520 struct mlx5e_cq cq;
4415a031 521 struct mlx5e_page_cache page_cache;
7c39afb3
FD
522 struct hwtstamp_config *tstamp;
523 struct mlx5_clock *clock;
4415a031 524
2f48af12 525 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
7cc6d77b 526 mlx5e_fp_post_rx_wqes post_wqes;
6cd392a0 527 mlx5e_fp_dealloc_wqe dealloc_wqe;
f62b8bb8
AV
528
529 unsigned long state;
530 int ix;
531
9a317425 532 struct net_dim dim; /* Dynamic Interrupt Moderation */
31871f87
SM
533
534 /* XDP */
86994156 535 struct bpf_prog *xdp_prog;
472a1e44 536 unsigned int hw_mtu;
31391048 537 struct mlx5e_xdpsq xdpsq;
121e8927 538 DECLARE_BITMAP(flags, 8);
60bbf7ee 539 struct page_pool *page_pool;
cb3c7fd4 540
f62b8bb8
AV
541 /* control */
542 struct mlx5_wq_ctrl wq_ctrl;
b45d8b50 543 __be32 mkey_be;
461017cb 544 u8 wq_type;
f62b8bb8 545 u32 rqn;
a43b25da 546 struct mlx5_core_dev *mdev;
ec8b9981 547 struct mlx5_core_mkey umr_mkey;
0ddf5432
JDB
548
549 /* XDP read-mostly */
550 struct xdp_rxq_info xdp_rxq;
f62b8bb8
AV
551} ____cacheline_aligned_in_smp;
552
f62b8bb8
AV
553struct mlx5e_channel {
554 /* data path */
555 struct mlx5e_rq rq;
31391048
SM
556 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
557 struct mlx5e_icosq icosq; /* internal control operations */
b5503b99 558 bool xdp;
f62b8bb8
AV
559 struct napi_struct napi;
560 struct device *pdev;
561 struct net_device *netdev;
562 __be32 mkey_be;
563 u8 num_tc;
f62b8bb8 564
a8c2eb15
TT
565 /* data path - accessed per napi poll */
566 struct irq_desc *irq_desc;
57d689a8 567 struct mlx5e_ch_stats stats;
f62b8bb8
AV
568
569 /* control */
570 struct mlx5e_priv *priv;
a43b25da 571 struct mlx5_core_dev *mdev;
7c39afb3 572 struct hwtstamp_config *tstamp;
f62b8bb8 573 int ix;
231243c8 574 int cpu;
f62b8bb8
AV
575};
576
ff9c852f
SM
577struct mlx5e_channels {
578 struct mlx5e_channel **c;
579 unsigned int num;
6a9764ef 580 struct mlx5e_params params;
ff9c852f
SM
581};
582
f62b8bb8 583enum mlx5e_traffic_types {
5a6f8aef
AS
584 MLX5E_TT_IPV4_TCP,
585 MLX5E_TT_IPV6_TCP,
586 MLX5E_TT_IPV4_UDP,
587 MLX5E_TT_IPV6_UDP,
a741749f
AS
588 MLX5E_TT_IPV4_IPSEC_AH,
589 MLX5E_TT_IPV6_IPSEC_AH,
590 MLX5E_TT_IPV4_IPSEC_ESP,
591 MLX5E_TT_IPV6_IPSEC_ESP,
5a6f8aef
AS
592 MLX5E_TT_IPV4,
593 MLX5E_TT_IPV6,
594 MLX5E_TT_ANY,
595 MLX5E_NUM_TT,
1da36696 596 MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
f62b8bb8
AV
597};
598
7b3722fa
GP
599enum mlx5e_tunnel_types {
600 MLX5E_TT_IPV4_GRE,
601 MLX5E_TT_IPV6_GRE,
602 MLX5E_NUM_TUNNEL_TT,
603};
604
acff797c 605enum {
e0f46eb9 606 MLX5E_STATE_ASYNC_EVENTS_ENABLED,
acff797c
MG
607 MLX5E_STATE_OPENED,
608 MLX5E_STATE_DESTROYING,
609};
610
611struct mlx5e_vxlan_db {
612 spinlock_t lock; /* protect vxlan table */
613 struct radix_tree_root tree;
614};
615
33cfaaa8 616struct mlx5e_l2_rule {
f62b8bb8 617 u8 addr[ETH_ALEN + 2];
74491de9 618 struct mlx5_flow_handle *rule;
f62b8bb8
AV
619};
620
acff797c
MG
621struct mlx5e_flow_table {
622 int num_groups;
623 struct mlx5_flow_table *t;
624 struct mlx5_flow_group **g;
625};
626
33cfaaa8 627#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
f62b8bb8 628
acff797c
MG
629struct mlx5e_tc_table {
630 struct mlx5_flow_table *t;
631
632 struct rhashtable_params ht_params;
633 struct rhashtable ht;
11c9c548
OG
634
635 DECLARE_HASHTABLE(mod_hdr_tbl, 8);
5c65c564 636 DECLARE_HASHTABLE(hairpin_tbl, 8);
f62b8bb8
AV
637};
638
acff797c
MG
639struct mlx5e_vlan_table {
640 struct mlx5e_flow_table ft;
03eda954 641 DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
7d92d580 642 DECLARE_BITMAP(active_svlans, VLAN_N_VID);
2b52a283 643 struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
7d92d580 644 struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
74491de9 645 struct mlx5_flow_handle *untagged_rule;
8a271746
MHY
646 struct mlx5_flow_handle *any_cvlan_rule;
647 struct mlx5_flow_handle *any_svlan_rule;
2b52a283 648 bool cvlan_filter_disabled;
f62b8bb8
AV
649};
650
33cfaaa8
MG
651struct mlx5e_l2_table {
652 struct mlx5e_flow_table ft;
653 struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
654 struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
655 struct mlx5e_l2_rule broadcast;
656 struct mlx5e_l2_rule allmulti;
657 struct mlx5e_l2_rule promisc;
658 bool broadcast_enabled;
659 bool allmulti_enabled;
660 bool promisc_enabled;
661};
662
663/* L3/L4 traffic type classifier */
664struct mlx5e_ttc_table {
665 struct mlx5e_flow_table ft;
74491de9 666 struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
7b3722fa 667 struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
33cfaaa8
MG
668};
669
18c908e4
MG
670#define ARFS_HASH_SHIFT BITS_PER_BYTE
671#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
1cabe6b0
MG
672struct arfs_table {
673 struct mlx5e_flow_table ft;
74491de9 674 struct mlx5_flow_handle *default_rule;
18c908e4 675 struct hlist_head rules_hash[ARFS_HASH_SIZE];
1cabe6b0
MG
676};
677
678enum arfs_type {
679 ARFS_IPV4_TCP,
680 ARFS_IPV6_TCP,
681 ARFS_IPV4_UDP,
682 ARFS_IPV6_UDP,
683 ARFS_NUM_TYPES,
684};
685
686struct mlx5e_arfs_tables {
687 struct arfs_table arfs_tables[ARFS_NUM_TYPES];
18c908e4
MG
688 /* Protect aRFS rules list */
689 spinlock_t arfs_lock;
690 struct list_head rules;
691 int last_filter_id;
692 struct workqueue_struct *wq;
1cabe6b0
MG
693};
694
695/* NIC prio FTS */
696enum {
697 MLX5E_VLAN_FT_LEVEL = 0,
698 MLX5E_L2_FT_LEVEL,
699 MLX5E_TTC_FT_LEVEL,
7b3722fa 700 MLX5E_INNER_TTC_FT_LEVEL,
1cabe6b0
MG
701 MLX5E_ARFS_FT_LEVEL
702};
703
3f6d08d1
OG
704enum {
705 MLX5E_TC_FT_LEVEL = 0,
706 MLX5E_TC_TTC_FT_LEVEL,
707};
708
6dc6071c
MG
709struct mlx5e_ethtool_table {
710 struct mlx5_flow_table *ft;
711 int num_rules;
712};
713
1174fce8 714#define ETHTOOL_NUM_L3_L4_FTS 7
6dc6071c
MG
715#define ETHTOOL_NUM_L2_FTS 4
716
717struct mlx5e_ethtool_steering {
1174fce8 718 struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
6dc6071c
MG
719 struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
720 struct list_head rules;
721 int tot_num_rules;
722};
723
acff797c
MG
724struct mlx5e_flow_steering {
725 struct mlx5_flow_namespace *ns;
6dc6071c 726 struct mlx5e_ethtool_steering ethtool;
acff797c
MG
727 struct mlx5e_tc_table tc;
728 struct mlx5e_vlan_table vlan;
33cfaaa8
MG
729 struct mlx5e_l2_table l2;
730 struct mlx5e_ttc_table ttc;
7b3722fa 731 struct mlx5e_ttc_table inner_ttc;
1cabe6b0 732 struct mlx5e_arfs_tables arfs;
f62b8bb8
AV
733};
734
398f3351 735struct mlx5e_rqt {
1da36696 736 u32 rqtn;
398f3351
HHZ
737 bool enabled;
738};
739
740struct mlx5e_tir {
741 u32 tirn;
742 struct mlx5e_rqt rqt;
743 struct list_head list;
1da36696
TT
744};
745
acff797c
MG
746enum {
747 MLX5E_TC_PRIO = 0,
748 MLX5E_NIC_PRIO
749};
750
f62b8bb8
AV
751struct mlx5e_priv {
752 /* priv data path fields - start */
acc6c595
SM
753 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
754 int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
2a5e7a13
HN
755#ifdef CONFIG_MLX5_CORE_EN_DCB
756 struct mlx5e_dcbx_dp dcbx_dp;
757#endif
f62b8bb8
AV
758 /* priv data path fields - end */
759
79c48764 760 u32 msglevel;
f62b8bb8
AV
761 unsigned long state;
762 struct mutex state_lock; /* Protects Interface state */
50cfa25a 763 struct mlx5e_rq drop_rq;
f62b8bb8 764
ff9c852f 765 struct mlx5e_channels channels;
f62b8bb8 766 u32 tisn[MLX5E_MAX_NUM_TC];
398f3351 767 struct mlx5e_rqt indir_rqt;
724b2aa1 768 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
7b3722fa 769 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
724b2aa1 770 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
507f0c81 771 u32 tx_rates[MLX5E_MAX_NUM_SQS];
f62b8bb8 772
acff797c 773 struct mlx5e_flow_steering fs;
b3f63c3d 774 struct mlx5e_vxlan_db vxlan;
f62b8bb8 775
7bb29755 776 struct workqueue_struct *wq;
f62b8bb8
AV
777 struct work_struct update_carrier_work;
778 struct work_struct set_rx_mode_work;
3947ca18 779 struct work_struct tx_timeout_work;
f62b8bb8
AV
780 struct delayed_work update_stats_work;
781
782 struct mlx5_core_dev *mdev;
783 struct net_device *netdev;
784 struct mlx5e_stats stats;
7c39afb3 785 struct hwtstamp_config tstamp;
7cbaf9a3
MS
786 u16 q_counter;
787 u16 drop_rq_q_counter;
3a6a931d
HN
788#ifdef CONFIG_MLX5_CORE_EN_DCB
789 struct mlx5e_dcbx dcbx;
790#endif
791
6bfd390b 792 const struct mlx5e_profile *profile;
127ea380 793 void *ppriv;
547eede0
IT
794#ifdef CONFIG_MLX5_EN_IPSEC
795 struct mlx5e_ipsec *ipsec;
796#endif
f62b8bb8
AV
797};
798
a43b25da
SM
799struct mlx5e_profile {
800 void (*init)(struct mlx5_core_dev *mdev,
801 struct net_device *netdev,
802 const struct mlx5e_profile *profile, void *ppriv);
803 void (*cleanup)(struct mlx5e_priv *priv);
804 int (*init_rx)(struct mlx5e_priv *priv);
805 void (*cleanup_rx)(struct mlx5e_priv *priv);
806 int (*init_tx)(struct mlx5e_priv *priv);
807 void (*cleanup_tx)(struct mlx5e_priv *priv);
808 void (*enable)(struct mlx5e_priv *priv);
809 void (*disable)(struct mlx5e_priv *priv);
810 void (*update_stats)(struct mlx5e_priv *priv);
7ca42c80 811 void (*update_carrier)(struct mlx5e_priv *priv);
a43b25da 812 int (*max_nch)(struct mlx5_core_dev *mdev);
20fd0c19
SM
813 struct {
814 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
815 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
816 } rx_handlers;
2a5e7a13
HN
817 void (*netdev_registered_init)(struct mlx5e_priv *priv);
818 void (*netdev_registered_remove)(struct mlx5e_priv *priv);
a43b25da
SM
819 int max_tc;
820};
821
665bc539
GP
822void mlx5e_build_ptys2ethtool_map(void);
823
f62b8bb8
AV
824u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
825 void *accel_priv, select_queue_fallback_t fallback);
826netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
f62b8bb8
AV
827
828void mlx5e_completion_event(struct mlx5_core_cq *mcq);
829void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
830int mlx5e_napi_poll(struct napi_struct *napi, int budget);
8ec736e5 831bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
44fb6fbb 832int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
1c4bf940 833bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
31391048
SM
834void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
835void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
461017cb 836
2ccb0a79
TT
837bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
838bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
839 struct mlx5e_params *params);
840
4415a031
TT
841void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
842 bool recycle);
2f48af12 843void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
461017cb 844void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
f62b8bb8 845bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
7cc6d77b 846bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
6cd392a0
DJ
847void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
848void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
7e426671 849void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
619a8f2a
TT
850struct sk_buff *
851mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
852 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
853struct sk_buff *
854mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
855 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
f62b8bb8 856
19386177 857void mlx5e_update_stats(struct mlx5e_priv *priv);
f62b8bb8 858
acff797c
MG
859int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
860void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
33cfaaa8 861void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
1cabe6b0 862void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
d605d668
KH
863int mlx5e_self_test_num(struct mlx5e_priv *priv);
864void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
865 u64 *buf);
f913a72a
MG
866int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
867 int location);
868int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
869 struct ethtool_rxnfc *info, u32 *rule_locs);
6dc6071c
MG
870int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
871 struct ethtool_rx_flow_spec *fs);
872int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
873 int location);
874void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
875void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
f62b8bb8
AV
876void mlx5e_set_rx_mode_work(struct work_struct *work);
877
1170fbd8
FD
878int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
879int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
be7e87f9 880int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
ef9814de 881
f62b8bb8
AV
882int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
883 u16 vid);
884int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
885 u16 vid);
2b52a283
GP
886void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
887void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
237f258c 888void mlx5e_timestamp_init(struct mlx5e_priv *priv);
f62b8bb8 889
a5f97fee
SM
890struct mlx5e_redirect_rqt_param {
891 bool is_rss;
892 union {
893 u32 rqn; /* Direct RQN (Non-RSS) */
894 struct {
895 u8 hfunc;
896 struct mlx5e_channels *channels;
897 } rss; /* RSS data */
898 };
899};
900
901int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
902 struct mlx5e_redirect_rqt_param rrp);
6a9764ef
SM
903void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
904 enum mlx5e_traffic_types tt,
7b3722fa 905 void *tirc, bool inner);
2d75b2bc 906
f62b8bb8
AV
907int mlx5e_open_locked(struct net_device *netdev);
908int mlx5e_close_locked(struct net_device *netdev);
55c2503d
SM
909
910int mlx5e_open_channels(struct mlx5e_priv *priv,
911 struct mlx5e_channels *chs);
912void mlx5e_close_channels(struct mlx5e_channels *chs);
2e20a151
SM
913
914/* Function pointer to be used to modify WH settings while
915 * switching channels
916 */
917typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
55c2503d 918void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2e20a151
SM
919 struct mlx5e_channels *new_chs,
920 mlx5e_fp_hw_modify hw_modify);
603f4a45
SM
921void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
922void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
55c2503d 923
d4b6c488 924void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba 925 int num_channels);
b797a684 926int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
f62b8bb8 927
0088cbbc
TG
928void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
929 u8 cq_period_mode);
9908aa29
TT
930void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
931 u8 cq_period_mode);
2ccb0a79 932void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
696a97cf 933void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
2a0f561b 934 struct mlx5e_params *params);
9908aa29 935
7b3722fa
GP
936static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
937{
938 return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
939 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
940}
941
864b2d71
SM
942static inline
943struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
f62b8bb8 944{
864b2d71
SM
945 u16 pi = *pc & wq->sz_m1;
946 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
947 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
948
949 memset(cseg, 0, sizeof(*cseg));
950
951 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
952 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
953
954 (*pc)++;
955
956 return wqe;
957}
958
959static inline
960void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
961 void __iomem *uar_map,
962 struct mlx5_wqe_ctrl_seg *ctrl)
963{
964 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
f62b8bb8
AV
965 /* ensure wqe is visible to device before updating doorbell record */
966 dma_wmb();
967
864b2d71 968 *wq->db = cpu_to_be32(pc);
f62b8bb8
AV
969
970 /* ensure doorbell record is visible to device before ringing the
971 * doorbell
972 */
973 wmb();
f62b8bb8 974
864b2d71 975 mlx5_write64((__be32 *)ctrl, uar_map, NULL);
f62b8bb8
AV
976}
977
978static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
979{
980 struct mlx5_core_cq *mcq;
981
982 mcq = &cq->mcq;
5fe9dec0 983 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
f62b8bb8
AV
984}
985
986extern const struct ethtool_ops mlx5e_ethtool_ops;
08fb1dac
SM
987#ifdef CONFIG_MLX5_CORE_EN_DCB
988extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
989int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
e207b7e9 990void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
2a5e7a13
HN
991void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
992void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
08fb1dac
SM
993#endif
994
1cabe6b0
MG
995#ifndef CONFIG_RFS_ACCEL
996static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
997{
998 return 0;
999}
1000
1001static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
45bf454a
MG
1002
1003static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
1004{
9eb78923 1005 return -EOPNOTSUPP;
45bf454a
MG
1006}
1007
1008static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
1009{
9eb78923 1010 return -EOPNOTSUPP;
45bf454a 1011}
1cabe6b0
MG
1012#else
1013int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
1014void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
45bf454a
MG
1015int mlx5e_arfs_enable(struct mlx5e_priv *priv);
1016int mlx5e_arfs_disable(struct mlx5e_priv *priv);
18c908e4
MG
1017int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1018 u16 rxq_index, u32 flow_id);
1cabe6b0
MG
1019#endif
1020
724b2aa1
HHZ
1021int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1022 struct mlx5e_tir *tir, u32 *in, int inlen);
1023void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1024 struct mlx5e_tir *tir);
b50d292b
HHZ
1025int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1026void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
b676f653 1027int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
1afff42c 1028
bc81b9d3 1029/* common netdev helpers */
8f493ffd
SM
1030int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1031
1032int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
1033void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
1034
cb67b832 1035int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
8f493ffd 1036void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
cb67b832
HHZ
1037int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
1038void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
8f493ffd
SM
1039void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1040
1ae1df3a
OG
1041struct ttc_params {
1042 struct mlx5_flow_table_attr ft_attr;
1043 u32 any_tt_tirn;
1044 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
1045 struct mlx5e_ttc_table *inner_ttc;
1046};
1047
1048void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
1049void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
1050void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
1051
1052int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1053 struct mlx5e_ttc_table *ttc);
1054void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1055 struct mlx5e_ttc_table *ttc);
bc81b9d3 1056
1ae1df3a
OG
1057int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1058 struct mlx5e_ttc_table *ttc);
1059void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1060 struct mlx5e_ttc_table *ttc);
458821c7 1061
5426a0b2
SM
1062int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
1063 u32 underlay_qpn, u32 *tisn);
1064void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1065
cb67b832
HHZ
1066int mlx5e_create_tises(struct mlx5e_priv *priv);
1067void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
1068int mlx5e_close(struct net_device *netdev);
1069int mlx5e_open(struct net_device *netdev);
1070void mlx5e_update_stats_work(struct work_struct *work);
cb67b832 1071
3f6d08d1
OG
1072int mlx5e_bits_invert(unsigned long a, int size);
1073
076b0936
ES
1074/* ethtool helpers */
1075void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1076 struct ethtool_drvinfo *drvinfo);
1077void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1078 uint32_t stringset, uint8_t *data);
1079int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1080void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1081 struct ethtool_stats *stats, u64 *data);
1082void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1083 struct ethtool_ringparam *param);
1084int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1085 struct ethtool_ringparam *param);
1086void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1087 struct ethtool_channels *ch);
1088int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1089 struct ethtool_channels *ch);
1090int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1091 struct ethtool_coalesce *coal);
1092int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1093 struct ethtool_coalesce *coal);
3844b07e
FD
1094int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1095 struct ethtool_ts_info *info);
3ffaabec
OG
1096int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1097 struct ethtool_flash *flash);
076b0936 1098
d6c862ba
JP
1099int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1100 void *cb_priv);
717503b9 1101
2c3b5bee
SM
1102/* mlx5e generic netdev management API */
1103struct net_device*
1104mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1105 void *ppriv);
1106int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1107void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1108void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
8f493ffd
SM
1109void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
1110 struct mlx5e_params *params,
472a1e44 1111 u16 max_channels, u16 mtu);
fbcb127e 1112u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
9a317425 1113void mlx5e_rx_dim_work(struct work_struct *work);
1afff42c 1114#endif /* __MLX5_EN_H__ */