1 From fb3dda82fd38ca42140f29b3082324dcdc128293 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Fri, 28 Feb 2025 11:54:09 +0100
4 Subject: [PATCH 01/15] net: airoha: Move airoha_eth driver in a dedicated
7 The airoha_eth driver has no codebase shared with mtk_eth_soc one.
8 Moreover, the upcoming features (flowtable hw offloading, PCS, ..) will
9 not reuse any code from MediaTek driver. Move the Airoha driver in a
12 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
13 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
15 drivers/net/ethernet/Kconfig | 2 ++
16 drivers/net/ethernet/Makefile | 1 +
17 drivers/net/ethernet/airoha/Kconfig | 18 ++++++++++++++++++
18 drivers/net/ethernet/airoha/Makefile | 6 ++++++
19 .../ethernet/{mediatek => airoha}/airoha_eth.c | 0
20 drivers/net/ethernet/mediatek/Kconfig | 8 --------
21 drivers/net/ethernet/mediatek/Makefile | 1 -
22 7 files changed, 27 insertions(+), 9 deletions(-)
23 create mode 100644 drivers/net/ethernet/airoha/Kconfig
24 create mode 100644 drivers/net/ethernet/airoha/Makefile
25 rename drivers/net/ethernet/{mediatek => airoha}/airoha_eth.c (100%)
27 --- a/drivers/net/ethernet/Kconfig
28 +++ b/drivers/net/ethernet/Kconfig
29 @@ -23,6 +23,8 @@ source "drivers/net/ethernet/actions/Kco
30 source "drivers/net/ethernet/adaptec/Kconfig"
31 source "drivers/net/ethernet/aeroflex/Kconfig"
32 source "drivers/net/ethernet/agere/Kconfig"
33 +source "drivers/net/ethernet/airoha/Kconfig"
34 +source "drivers/net/ethernet/mellanox/Kconfig"
35 source "drivers/net/ethernet/alacritech/Kconfig"
36 source "drivers/net/ethernet/allwinner/Kconfig"
37 source "drivers/net/ethernet/alteon/Kconfig"
38 --- a/drivers/net/ethernet/Makefile
39 +++ b/drivers/net/ethernet/Makefile
40 @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adap
41 obj-$(CONFIG_GRETH) += aeroflex/
42 obj-$(CONFIG_NET_VENDOR_ADI) += adi/
43 obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
44 +obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
45 obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
46 obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
47 obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
49 +++ b/drivers/net/ethernet/airoha/Kconfig
51 +# SPDX-License-Identifier: GPL-2.0-only
52 +config NET_VENDOR_AIROHA
53 + bool "Airoha devices"
54 + depends on ARCH_AIROHA || COMPILE_TEST
56 + If you have a Airoha SoC with ethernet, say Y.
61 + tristate "Airoha SoC Gigabit Ethernet support"
62 + depends on NET_DSA || !NET_DSA
65 + This driver supports the gigabit ethernet MACs in the
68 +endif #NET_VENDOR_AIROHA
70 +++ b/drivers/net/ethernet/airoha/Makefile
72 +# SPDX-License-Identifier: GPL-2.0-only
74 +# Airoha for the Mediatek SoCs built-in ethernet macs
77 +obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
78 --- a/drivers/net/ethernet/mediatek/Kconfig
79 +++ b/drivers/net/ethernet/mediatek/Kconfig
80 @@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
82 if NET_VENDOR_MEDIATEK
85 - tristate "Airoha SoC Gigabit Ethernet support"
86 - depends on NET_DSA || !NET_DSA
89 - This driver supports the gigabit ethernet MACs in the
92 config NET_MEDIATEK_SOC_WED
93 depends on ARCH_MEDIATEK || COMPILE_TEST
94 def_bool NET_MEDIATEK_SOC != n
95 --- a/drivers/net/ethernet/mediatek/Makefile
96 +++ b/drivers/net/ethernet/mediatek/Makefile
97 @@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) +
99 obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
100 obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
101 -obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
103 +++ b/drivers/net/ethernet/airoha/airoha_eth.c
105 +// SPDX-License-Identifier: GPL-2.0-only
107 + * Copyright (c) 2024 AIROHA Inc
108 + * Author: Lorenzo Bianconi <lorenzo@kernel.org>
110 +#include <linux/etherdevice.h>
111 +#include <linux/iopoll.h>
112 +#include <linux/kernel.h>
113 +#include <linux/netdevice.h>
114 +#include <linux/of.h>
115 +#include <linux/of_net.h>
116 +#include <linux/platform_device.h>
117 +#include <linux/reset.h>
118 +#include <linux/tcp.h>
119 +#include <linux/u64_stats_sync.h>
120 +#include <net/dsa.h>
121 +#include <net/page_pool/helpers.h>
122 +#include <net/pkt_cls.h>
123 +#include <uapi/linux/ppp_defs.h>
125 +#define AIROHA_MAX_NUM_GDM_PORTS 1
126 +#define AIROHA_MAX_NUM_QDMA 2
127 +#define AIROHA_MAX_NUM_RSTS 3
128 +#define AIROHA_MAX_NUM_XSI_RSTS 5
129 +#define AIROHA_MAX_MTU 2000
130 +#define AIROHA_MAX_PACKET_SIZE 2048
131 +#define AIROHA_NUM_QOS_CHANNELS 4
132 +#define AIROHA_NUM_QOS_QUEUES 8
133 +#define AIROHA_NUM_TX_RING 32
134 +#define AIROHA_NUM_RX_RING 32
135 +#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
136 + AIROHA_NUM_QOS_CHANNELS)
137 +#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
138 +#define AIROHA_FE_MC_MAX_VLAN_PORT 16
139 +#define AIROHA_NUM_TX_IRQ 2
140 +#define HW_DSCP_NUM 2048
141 +#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
142 +#define TX_DSCP_NUM 1024
143 +#define RX_DSCP_NUM(_n) \
144 + ((_n) == 2 ? 128 : \
145 + (_n) == 11 ? 128 : \
146 + (_n) == 15 ? 128 : \
147 + (_n) == 0 ? 1024 : 16)
149 +#define PSE_RSV_PAGES 128
150 +#define PSE_QUEUE_RSV_PAGES 64
152 +#define QDMA_METER_IDX(_n) ((_n) & 0xff)
153 +#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
156 +#define PSE_BASE 0x0100
157 +#define CSR_IFC_BASE 0x0200
158 +#define CDM1_BASE 0x0400
159 +#define GDM1_BASE 0x0500
160 +#define PPE1_BASE 0x0c00
162 +#define CDM2_BASE 0x1400
163 +#define GDM2_BASE 0x1500
165 +#define GDM3_BASE 0x1100
166 +#define GDM4_BASE 0x2500
168 +#define GDM_BASE(_n) \
169 + ((_n) == 4 ? GDM4_BASE : \
170 + (_n) == 3 ? GDM3_BASE : \
171 + (_n) == 2 ? GDM2_BASE : GDM1_BASE)
173 +#define REG_FE_DMA_GLO_CFG 0x0000
174 +#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
175 +#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
177 +#define REG_FE_RST_GLO_CFG 0x0004
178 +#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
179 +#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
180 +#define FE_RST_CORE_MASK BIT(0)
182 +#define REG_FE_WAN_MAC_H 0x0030
183 +#define REG_FE_LAN_MAC_H 0x0040
185 +#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
186 +#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
188 +#define REG_FE_CDM1_OQ_MAP0 0x0050
189 +#define REG_FE_CDM1_OQ_MAP1 0x0054
190 +#define REG_FE_CDM1_OQ_MAP2 0x0058
191 +#define REG_FE_CDM1_OQ_MAP3 0x005c
193 +#define REG_FE_PCE_CFG 0x0070
194 +#define PCE_DPI_EN_MASK BIT(2)
195 +#define PCE_KA_EN_MASK BIT(1)
196 +#define PCE_MC_EN_MASK BIT(0)
198 +#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
199 +#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
200 +#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
201 +#define PSE_CFG_WR_EN_MASK BIT(8)
202 +#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
204 +#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
205 +#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
207 +#define PSE_FQ_CFG 0x008c
208 +#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
210 +#define REG_FE_PSE_BUF_SET 0x0090
211 +#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
212 +#define PSE_ALLRSV_MASK GENMASK(14, 0)
214 +#define REG_PSE_SHARE_USED_THD 0x0094
215 +#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
216 +#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
218 +#define REG_GDM_MISC_CFG 0x0148
219 +#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
220 +#define GDM2_CHN_VLD_MODE_MASK BIT(5)
222 +#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
223 +#define FE_IFC_EN_MASK BIT(0)
225 +#define REG_FE_VIP_PORT_EN 0x01f0
226 +#define REG_FE_IFC_PORT_EN 0x01f4
228 +#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
229 +#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
231 +#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
232 +#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
233 +#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
235 +#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
236 +#define PATN_FCPU_EN_MASK BIT(7)
237 +#define PATN_SWP_EN_MASK BIT(6)
238 +#define PATN_DP_EN_MASK BIT(5)
239 +#define PATN_SP_EN_MASK BIT(4)
240 +#define PATN_TYPE_MASK GENMASK(3, 1)
241 +#define PATN_EN_MASK BIT(0)
243 +#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
244 +#define PATN_DP_MASK GENMASK(31, 16)
245 +#define PATN_SP_MASK GENMASK(15, 0)
247 +#define REG_CDM1_VLAN_CTRL CDM1_BASE
248 +#define CDM1_VLAN_MASK GENMASK(31, 16)
250 +#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
251 +#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
253 +#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
254 +#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
255 + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
257 +#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
258 +#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
259 +#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
261 +#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
262 +#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
263 + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
265 +#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
266 +#define GDM_DROP_CRC_ERR BIT(23)
267 +#define GDM_IP4_CKSUM BIT(22)
268 +#define GDM_TCP_CKSUM BIT(21)
269 +#define GDM_UDP_CKSUM BIT(20)
270 +#define GDM_UCFQ_MASK GENMASK(15, 12)
271 +#define GDM_BCFQ_MASK GENMASK(11, 8)
272 +#define GDM_MCFQ_MASK GENMASK(7, 4)
273 +#define GDM_OCFQ_MASK GENMASK(3, 0)
275 +#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
276 +#define GDM_INGRESS_FC_EN_MASK BIT(1)
277 +#define GDM_STAG_EN_MASK BIT(0)
279 +#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
280 +#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
281 +#define GDM_LONG_LEN_MASK GENMASK(29, 16)
283 +#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
284 +#define FE_CPORT_PAD BIT(26)
285 +#define FE_CPORT_PORT_XFC_MASK BIT(25)
286 +#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
288 +#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
289 +#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
290 +#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
292 +#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
293 +#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
294 +#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
295 +#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
296 +#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
297 +#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
299 +#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
300 +#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
301 +#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
302 +#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
303 +#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
304 +#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
305 +#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
306 +#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
307 +#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
308 +#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
309 +#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
310 +#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
311 +#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
312 +#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
313 +#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
315 +#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
316 +#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
317 +#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
318 +#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
319 +#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
320 +#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
321 +#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
322 +#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
323 +#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
324 +#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
325 +#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
326 +#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
327 +#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
328 +#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
329 +#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
330 +#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
331 +#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
332 +#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
333 +#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
334 +#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
335 +#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
336 +#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
338 +#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
339 +#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
340 +#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
341 +#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
342 +#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
344 +#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
345 +#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
346 +#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
347 +#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
349 +#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
350 +#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
351 +#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
352 +#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
353 +#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
354 +#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
355 +#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
356 +#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
357 +#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
358 +#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
359 +#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
360 +#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
361 +#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
362 +#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
363 +#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
364 +#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
366 +#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
367 +#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
368 +#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
370 +#define REG_GDM3_FWD_CFG GDM3_BASE
371 +#define GDM3_PAD_EN_MASK BIT(28)
373 +#define REG_GDM4_FWD_CFG GDM4_BASE
374 +#define GDM4_PAD_EN_MASK BIT(28)
375 +#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
377 +#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
378 +#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
379 +#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
380 +#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
382 +#define REG_IP_FRAG_FP 0x2010
383 +#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
384 +#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
385 +#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
386 +#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
388 +#define REG_MC_VLAN_EN 0x2100
389 +#define MC_VLAN_EN_MASK BIT(0)
391 +#define REG_MC_VLAN_CFG 0x2104
392 +#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
393 +#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
394 +#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
395 +#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
396 +#define MC_VLAN_CFG_RW_MASK BIT(0)
398 +#define REG_MC_VLAN_DATA 0x2108
400 +#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
403 +#define REG_QDMA_GLOBAL_CFG 0x0004
404 +#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
405 +#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
406 +#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
407 +#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
408 +#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
409 +#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
410 +#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
411 +#define GLOBAL_CFG_RESET_MASK BIT(23)
412 +#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
413 +#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
414 +#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
415 +#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
416 +#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
417 +#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
418 +#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
419 +#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
420 +#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
421 +#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
422 +#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
423 +#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
424 +#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
425 +#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
426 +#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
428 +#define REG_FWD_DSCP_BASE 0x0010
429 +#define REG_FWD_BUF_BASE 0x0014
431 +#define REG_HW_FWD_DSCP_CFG 0x0018
432 +#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
433 +#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
434 +#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
436 +#define REG_INT_STATUS(_n) \
437 + (((_n) == 4) ? 0x0730 : \
438 + ((_n) == 3) ? 0x0724 : \
439 + ((_n) == 2) ? 0x0720 : \
440 + ((_n) == 1) ? 0x0024 : 0x0020)
442 +#define REG_INT_ENABLE(_n) \
443 + (((_n) == 4) ? 0x0750 : \
444 + ((_n) == 3) ? 0x0744 : \
445 + ((_n) == 2) ? 0x0740 : \
446 + ((_n) == 1) ? 0x002c : 0x0028)
448 +/* QDMA_CSR_INT_ENABLE1 */
449 +#define RX15_COHERENT_INT_MASK BIT(31)
450 +#define RX14_COHERENT_INT_MASK BIT(30)
451 +#define RX13_COHERENT_INT_MASK BIT(29)
452 +#define RX12_COHERENT_INT_MASK BIT(28)
453 +#define RX11_COHERENT_INT_MASK BIT(27)
454 +#define RX10_COHERENT_INT_MASK BIT(26)
455 +#define RX9_COHERENT_INT_MASK BIT(25)
456 +#define RX8_COHERENT_INT_MASK BIT(24)
457 +#define RX7_COHERENT_INT_MASK BIT(23)
458 +#define RX6_COHERENT_INT_MASK BIT(22)
459 +#define RX5_COHERENT_INT_MASK BIT(21)
460 +#define RX4_COHERENT_INT_MASK BIT(20)
461 +#define RX3_COHERENT_INT_MASK BIT(19)
462 +#define RX2_COHERENT_INT_MASK BIT(18)
463 +#define RX1_COHERENT_INT_MASK BIT(17)
464 +#define RX0_COHERENT_INT_MASK BIT(16)
465 +#define TX7_COHERENT_INT_MASK BIT(15)
466 +#define TX6_COHERENT_INT_MASK BIT(14)
467 +#define TX5_COHERENT_INT_MASK BIT(13)
468 +#define TX4_COHERENT_INT_MASK BIT(12)
469 +#define TX3_COHERENT_INT_MASK BIT(11)
470 +#define TX2_COHERENT_INT_MASK BIT(10)
471 +#define TX1_COHERENT_INT_MASK BIT(9)
472 +#define TX0_COHERENT_INT_MASK BIT(8)
473 +#define CNT_OVER_FLOW_INT_MASK BIT(7)
474 +#define IRQ1_FULL_INT_MASK BIT(5)
475 +#define IRQ1_INT_MASK BIT(4)
476 +#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
477 +#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
478 +#define IRQ0_FULL_INT_MASK BIT(1)
479 +#define IRQ0_INT_MASK BIT(0)
481 +#define TX_DONE_INT_MASK(_n) \
482 + ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
483 + : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
485 +#define INT_TX_MASK \
486 + (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
487 + IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
489 +#define INT_IDX0_MASK \
490 + (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
491 + TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
492 + TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
493 + TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
494 + RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
495 + RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
496 + RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
497 + RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
498 + RX15_COHERENT_INT_MASK | INT_TX_MASK)
500 +/* QDMA_CSR_INT_ENABLE2 */
501 +#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
502 +#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
503 +#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
504 +#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
505 +#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
506 +#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
507 +#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
508 +#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
509 +#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
510 +#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
511 +#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
512 +#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
513 +#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
514 +#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
515 +#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
516 +#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
517 +#define RX15_DONE_INT_MASK BIT(15)
518 +#define RX14_DONE_INT_MASK BIT(14)
519 +#define RX13_DONE_INT_MASK BIT(13)
520 +#define RX12_DONE_INT_MASK BIT(12)
521 +#define RX11_DONE_INT_MASK BIT(11)
522 +#define RX10_DONE_INT_MASK BIT(10)
523 +#define RX9_DONE_INT_MASK BIT(9)
524 +#define RX8_DONE_INT_MASK BIT(8)
525 +#define RX7_DONE_INT_MASK BIT(7)
526 +#define RX6_DONE_INT_MASK BIT(6)
527 +#define RX5_DONE_INT_MASK BIT(5)
528 +#define RX4_DONE_INT_MASK BIT(4)
529 +#define RX3_DONE_INT_MASK BIT(3)
530 +#define RX2_DONE_INT_MASK BIT(2)
531 +#define RX1_DONE_INT_MASK BIT(1)
532 +#define RX0_DONE_INT_MASK BIT(0)
534 +#define RX_DONE_INT_MASK \
535 + (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
536 + RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
537 + RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
538 + RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
539 + RX15_DONE_INT_MASK)
540 +#define INT_IDX1_MASK \
541 + (RX_DONE_INT_MASK | \
542 + RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
543 + RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
544 + RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
545 + RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
546 + RX15_NO_CPU_DSCP_INT_MASK)
548 +/* QDMA_CSR_INT_ENABLE5 */
549 +#define TX31_COHERENT_INT_MASK BIT(31)
550 +#define TX30_COHERENT_INT_MASK BIT(30)
551 +#define TX29_COHERENT_INT_MASK BIT(29)
552 +#define TX28_COHERENT_INT_MASK BIT(28)
553 +#define TX27_COHERENT_INT_MASK BIT(27)
554 +#define TX26_COHERENT_INT_MASK BIT(26)
555 +#define TX25_COHERENT_INT_MASK BIT(25)
556 +#define TX24_COHERENT_INT_MASK BIT(24)
557 +#define TX23_COHERENT_INT_MASK BIT(23)
558 +#define TX22_COHERENT_INT_MASK BIT(22)
559 +#define TX21_COHERENT_INT_MASK BIT(21)
560 +#define TX20_COHERENT_INT_MASK BIT(20)
561 +#define TX19_COHERENT_INT_MASK BIT(19)
562 +#define TX18_COHERENT_INT_MASK BIT(18)
563 +#define TX17_COHERENT_INT_MASK BIT(17)
564 +#define TX16_COHERENT_INT_MASK BIT(16)
565 +#define TX15_COHERENT_INT_MASK BIT(15)
566 +#define TX14_COHERENT_INT_MASK BIT(14)
567 +#define TX13_COHERENT_INT_MASK BIT(13)
568 +#define TX12_COHERENT_INT_MASK BIT(12)
569 +#define TX11_COHERENT_INT_MASK BIT(11)
570 +#define TX10_COHERENT_INT_MASK BIT(10)
571 +#define TX9_COHERENT_INT_MASK BIT(9)
572 +#define TX8_COHERENT_INT_MASK BIT(8)
574 +#define INT_IDX4_MASK \
575 + (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
576 + TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
577 + TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
578 + TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
579 + TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
580 + TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
581 + TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
582 + TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
583 + TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
584 + TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
585 + TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
586 + TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
588 +#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
590 +#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
591 +#define TX_IRQ_THR_MASK GENMASK(27, 16)
592 +#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
594 +#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
595 +#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
597 +#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
598 +#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
599 +#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
601 +#define REG_TX_RING_BASE(_n) \
602 + (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
604 +#define REG_TX_RING_BLOCKING(_n) \
605 + (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
607 +#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
608 +#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
609 +#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
610 +#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
611 +#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
613 +#define REG_TX_CPU_IDX(_n) \
614 + (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
616 +#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
618 +#define REG_TX_DMA_IDX(_n) \
619 + (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
621 +#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
623 +#define IRQ_RING_IDX_MASK GENMASK(20, 16)
624 +#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
626 +#define REG_RX_RING_BASE(_n) \
627 + (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
629 +#define REG_RX_RING_SIZE(_n) \
630 + (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
632 +#define RX_RING_THR_MASK GENMASK(31, 16)
633 +#define RX_RING_SIZE_MASK GENMASK(15, 0)
635 +#define REG_RX_CPU_IDX(_n) \
636 + (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
638 +#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
640 +#define REG_RX_DMA_IDX(_n) \
641 + (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
643 +#define REG_RX_DELAY_INT_IDX(_n) \
644 + (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
646 +#define RX_DELAY_INT_MASK GENMASK(15, 0)
648 +#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
650 +#define REG_INGRESS_TRTCM_CFG 0x0070
651 +#define INGRESS_TRTCM_EN_MASK BIT(31)
652 +#define INGRESS_TRTCM_MODE_MASK BIT(30)
653 +#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
654 +#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
656 +#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
657 +#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
659 +#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
660 +#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
662 +#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
663 +#define CNTR_EN_MASK BIT(31)
664 +#define CNTR_ALL_CHAN_EN_MASK BIT(30)
665 +#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
666 +#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
667 +#define CNTR_SRC_MASK GENMASK(27, 24)
668 +#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
669 +#define CNTR_CHAN_MASK GENMASK(7, 3)
670 +#define CNTR_QUEUE_MASK GENMASK(2, 0)
672 +#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
674 +#define REG_LMGR_INIT_CFG 0x1000
675 +#define LMGR_INIT_START BIT(31)
676 +#define LMGR_SRAM_MODE_MASK BIT(30)
677 +#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
678 +#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
680 +#define REG_FWD_DSCP_LOW_THR 0x1004
681 +#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
683 +#define REG_EGRESS_RATE_METER_CFG 0x100c
684 +#define EGRESS_RATE_METER_EN_MASK BIT(31)
685 +#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
686 +#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
687 +#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
689 +#define REG_EGRESS_TRTCM_CFG 0x1010
690 +#define EGRESS_TRTCM_EN_MASK BIT(31)
691 +#define EGRESS_TRTCM_MODE_MASK BIT(30)
692 +#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
693 +#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
695 +#define TRTCM_PARAM_RW_MASK BIT(31)
696 +#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
697 +#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
698 +#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
699 +#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
700 +#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
702 +#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
703 +#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
704 +#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
706 +#define REG_TXWRR_MODE_CFG 0x1020
707 +#define TWRR_WEIGHT_SCALE_MASK BIT(31)
708 +#define TWRR_WEIGHT_BASE_MASK BIT(3)
710 +#define REG_TXWRR_WEIGHT_CFG 0x1024
711 +#define TWRR_RW_CMD_MASK BIT(31)
712 +#define TWRR_RW_CMD_DONE BIT(30)
713 +#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
714 +#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
715 +#define TWRR_VALUE_MASK GENMASK(15, 0)
717 +#define REG_PSE_BUF_USAGE_CFG 0x1028
718 +#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
720 +#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
721 +#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
723 +#define REG_GLB_TRTCM_CFG 0x1080
724 +#define GLB_TRTCM_EN_MASK BIT(31)
725 +#define GLB_TRTCM_MODE_MASK BIT(30)
726 +#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
727 +#define GLB_FAST_TICK_MASK GENMASK(15, 0)
729 +#define REG_TXQ_CNGST_CFG 0x10a0
730 +#define TXQ_CNGST_DROP_EN BIT(31)
731 +#define TXQ_CNGST_DEI_DROP_EN BIT(30)
733 +#define REG_SLA_TRTCM_CFG 0x1150
734 +#define SLA_TRTCM_EN_MASK BIT(31)
735 +#define SLA_TRTCM_MODE_MASK BIT(30)
736 +#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
737 +#define SLA_FAST_TICK_MASK GENMASK(15, 0)
740 +#define QDMA_DESC_DONE_MASK BIT(31)
741 +#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
742 +#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
743 +#define QDMA_DESC_DEI_MASK BIT(25)
744 +#define QDMA_DESC_NO_DROP_MASK BIT(24)
745 +#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
747 +#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
749 +#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
750 +#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
751 +#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
752 +#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
753 +#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
754 +#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
755 +#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
756 +#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
757 +#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
758 +#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
760 +#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
761 +#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
762 +#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
763 +#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
764 +#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
765 +#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
766 +#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
767 +#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
768 +#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
771 +#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
772 +#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
773 +#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
774 +#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
775 +#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
776 +#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
777 +#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
778 +#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
779 +#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
781 +struct airoha_qdma_desc {
793 +#define QDMA_FWD_DESC_CTX_MASK BIT(31)
794 +#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
795 +#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
796 +#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
798 +#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
800 +#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
802 +struct airoha_qdma_fwd_desc {
831 + XSI_PCIE0_VIP_PORT_MASK = BIT(22),
832 + XSI_PCIE1_VIP_PORT_MASK = BIT(23),
833 + XSI_USB_VIP_PORT_MASK = BIT(25),
834 + XSI_ETH_VIP_PORT_MASK = BIT(24),
838 + DEV_STATE_INITIALIZED,
842 + CDM_CRSN_QSEL_Q1 = 1,
843 + CDM_CRSN_QSEL_Q5 = 5,
844 + CDM_CRSN_QSEL_Q6 = 6,
845 + CDM_CRSN_QSEL_Q15 = 15,
850 + CRSN_21 = 0x15, /* KA */
851 + CRSN_22 = 0x16, /* hit bind and force route to CPU */
868 + FE_PSE_PORT_DROP = 0xf,
871 +enum tx_sched_mode {
882 +enum trtcm_param_type {
883 + TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
884 + TRTCM_TOKEN_RATE_MODE,
885 + TRTCM_BUCKETSIZE_SHIFT_MODE,
886 + TRTCM_BUCKET_COUNTER_MODE,
889 +enum trtcm_mode_type {
895 + TRTCM_TICK_SEL = BIT(0),
896 + TRTCM_PKT_MODE = BIT(1),
897 + TRTCM_METER_MODE = BIT(2),
900 +#define MIN_TOKEN_SIZE 4096
901 +#define MAX_TOKEN_SIZE_OFFSET 17
902 +#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
903 +#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
905 +struct airoha_queue_entry {
908 + struct sk_buff *skb;
910 + dma_addr_t dma_addr;
914 +struct airoha_queue {
915 + struct airoha_qdma *qdma;
917 + /* protect concurrent queue accesses */
919 + struct airoha_queue_entry *entry;
920 + struct airoha_qdma_desc *desc;
929 + struct napi_struct napi;
930 + struct page_pool *page_pool;
933 +struct airoha_tx_irq_queue {
934 + struct airoha_qdma *qdma;
936 + struct napi_struct napi;
942 +struct airoha_hw_stats {
943 + /* protect concurrent hw_stats accesses */
945 + struct u64_stats_sync syncp;
957 + u64 rx_over_errors;
958 + /* ethtool stats */
968 +struct airoha_qdma {
969 + struct airoha_eth *eth;
970 + void __iomem *regs;
972 + /* protect concurrent irqmask accesses */
973 + spinlock_t irq_lock;
974 + u32 irqmask[QDMA_INT_REG_MAX];
977 + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
979 + struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
980 + struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
982 + /* descriptor and packet buffers for qdma hw forward */
989 +struct airoha_gdm_port {
990 + struct airoha_qdma *qdma;
991 + struct net_device *dev;
994 + struct airoha_hw_stats stats;
996 + DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
998 + /* qos stats counters */
999 + u64 cpu_tx_packets;
1000 + u64 fwd_tx_packets;
1003 +struct airoha_eth {
1004 + struct device *dev;
1006 + unsigned long state;
1007 + void __iomem *fe_regs;
1009 + struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
1010 + struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
1012 + struct net_device *napi_dev;
1014 + struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
1015 + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
1018 +static u32 airoha_rr(void __iomem *base, u32 offset)
1020 + return readl(base + offset);
1023 +static void airoha_wr(void __iomem *base, u32 offset, u32 val)
1025 + writel(val, base + offset);
1028 +static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
1030 + val |= (airoha_rr(base, offset) & ~mask);
1031 + airoha_wr(base, offset, val);
1036 +#define airoha_fe_rr(eth, offset) \
1037 + airoha_rr((eth)->fe_regs, (offset))
1038 +#define airoha_fe_wr(eth, offset, val) \
1039 + airoha_wr((eth)->fe_regs, (offset), (val))
1040 +#define airoha_fe_rmw(eth, offset, mask, val) \
1041 + airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
1042 +#define airoha_fe_set(eth, offset, val) \
1043 + airoha_rmw((eth)->fe_regs, (offset), 0, (val))
1044 +#define airoha_fe_clear(eth, offset, val) \
1045 + airoha_rmw((eth)->fe_regs, (offset), (val), 0)
1047 +#define airoha_qdma_rr(qdma, offset) \
1048 + airoha_rr((qdma)->regs, (offset))
1049 +#define airoha_qdma_wr(qdma, offset, val) \
1050 + airoha_wr((qdma)->regs, (offset), (val))
1051 +#define airoha_qdma_rmw(qdma, offset, mask, val) \
1052 + airoha_rmw((qdma)->regs, (offset), (mask), (val))
1053 +#define airoha_qdma_set(qdma, offset, val) \
1054 + airoha_rmw((qdma)->regs, (offset), 0, (val))
1055 +#define airoha_qdma_clear(qdma, offset, val) \
1056 + airoha_rmw((qdma)->regs, (offset), (val), 0)
1058 +static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
1059 + u32 clear, u32 set)
1061 + unsigned long flags;
1063 + if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
1066 + spin_lock_irqsave(&qdma->irq_lock, flags);
1068 + qdma->irqmask[index] &= ~clear;
1069 + qdma->irqmask[index] |= set;
1070 + airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
1071 + /* Read irq_enable register in order to guarantee the update above
1072 + * completes in the spinlock critical section.
1074 + airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
1076 + spin_unlock_irqrestore(&qdma->irq_lock, flags);
1079 +static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
1082 + airoha_qdma_set_irqmask(qdma, index, 0, mask);
1085 +static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
1088 + airoha_qdma_set_irqmask(qdma, index, mask, 0);
1091 +static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
1093 + /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
1094 + * GDM{2,3,4} can be used as wan port connected to an external
1097 + return port->id == 1;
1100 +static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
1102 + struct airoha_eth *eth = port->qdma->eth;
1105 + reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
1106 + : REG_FE_WAN_MAC_H;
1107 + val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
1108 + airoha_fe_wr(eth, reg, val);
1110 + val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
1111 + airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
1112 + airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
1115 +static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
1118 + airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
1119 + FIELD_PREP(GDM_OCFQ_MASK, val));
1120 + airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
1121 + FIELD_PREP(GDM_MCFQ_MASK, val));
1122 + airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
1123 + FIELD_PREP(GDM_BCFQ_MASK, val));
1124 + airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
1125 + FIELD_PREP(GDM_UCFQ_MASK, val));
1128 +static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
1130 + u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
1131 + u32 vip_port, cfg_addr;
1134 + case XSI_PCIE0_PORT:
1135 + vip_port = XSI_PCIE0_VIP_PORT_MASK;
1136 + cfg_addr = REG_GDM_FWD_CFG(3);
1138 + case XSI_PCIE1_PORT:
1139 + vip_port = XSI_PCIE1_VIP_PORT_MASK;
1140 + cfg_addr = REG_GDM_FWD_CFG(3);
1142 + case XSI_USB_PORT:
1143 + vip_port = XSI_USB_VIP_PORT_MASK;
1144 + cfg_addr = REG_GDM_FWD_CFG(4);
1146 + case XSI_ETH_PORT:
1147 + vip_port = XSI_ETH_VIP_PORT_MASK;
1148 + cfg_addr = REG_GDM_FWD_CFG(4);
1155 + airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
1156 + airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
1158 + airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
1159 + airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
1162 + airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
1167 +static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
1169 + const int port_list[] = {
1177 + for (i = 0; i < ARRAY_SIZE(port_list); i++) {
1178 + err = airoha_set_gdm_port(eth, port_list[i], enable);
1186 + for (i--; i >= 0; i--)
1187 + airoha_set_gdm_port(eth, port_list[i], false);
1192 +static void airoha_fe_maccr_init(struct airoha_eth *eth)
1196 + for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
1197 + airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
1198 + GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
1199 + GDM_DROP_CRC_ERR);
1200 + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
1201 + FE_PSE_PORT_CDM1);
1202 + airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
1203 + GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1204 + FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1205 + FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
1208 + airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
1209 + FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
1211 + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
1214 +static void airoha_fe_vip_setup(struct airoha_eth *eth)
1216 + airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
1217 + airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
1219 + airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
1220 + airoha_fe_wr(eth, REG_FE_VIP_EN(4),
1221 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1224 + airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
1225 + airoha_fe_wr(eth, REG_FE_VIP_EN(6),
1226 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1229 + airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
1230 + airoha_fe_wr(eth, REG_FE_VIP_EN(7),
1231 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1234 + /* BOOTP (0x43) */
1235 + airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
1236 + airoha_fe_wr(eth, REG_FE_VIP_EN(8),
1237 + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1238 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1240 + /* BOOTP (0x44) */
1241 + airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
1242 + airoha_fe_wr(eth, REG_FE_VIP_EN(9),
1243 + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1244 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1247 + airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
1248 + airoha_fe_wr(eth, REG_FE_VIP_EN(10),
1249 + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1250 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1252 + airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
1253 + airoha_fe_wr(eth, REG_FE_VIP_EN(11),
1254 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1258 + airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
1259 + airoha_fe_wr(eth, REG_FE_VIP_EN(12),
1260 + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1261 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1263 + airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
1264 + airoha_fe_wr(eth, REG_FE_VIP_EN(19),
1265 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1268 + /* ETH->ETH_P_1905 (0x893a) */
1269 + airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
1270 + airoha_fe_wr(eth, REG_FE_VIP_EN(20),
1271 + PATN_FCPU_EN_MASK | PATN_EN_MASK);
1273 + airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
1274 + airoha_fe_wr(eth, REG_FE_VIP_EN(21),
1275 + PATN_FCPU_EN_MASK | PATN_EN_MASK);
1278 +static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
1279 + u32 port, u32 queue)
1283 + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1284 + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
1285 + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1286 + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
1287 + val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
1289 + return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
1292 +static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
1293 + u32 port, u32 queue, u32 val)
1295 + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
1296 + FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
1297 + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1298 + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
1299 + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
1300 + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1301 + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
1302 + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
1305 +static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
1307 + u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
1309 + return FIELD_GET(PSE_ALLRSV_MASK, val);
1312 +static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
1313 + u32 port, u32 queue, u32 val)
1315 + u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
1316 + u32 tmp, all_rsv, fq_limit;
1318 + airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
1320 + /* modify all rsv */
1321 + all_rsv = airoha_fe_get_pse_all_rsv(eth);
1322 + all_rsv += (val - orig_val);
1323 + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
1324 + FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
1327 + tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
1328 + fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
1329 + tmp = fq_limit - all_rsv - 0x20;
1330 + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1331 + PSE_SHARE_USED_HTHD_MASK,
1332 + FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
1334 + tmp = fq_limit - all_rsv - 0x100;
1335 + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1336 + PSE_SHARE_USED_MTHD_MASK,
1337 + FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
1338 + tmp = (3 * tmp) >> 2;
1339 + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
1340 + PSE_SHARE_USED_LTHD_MASK,
1341 + FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
1346 +static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
1348 + const u32 pse_port_num_queues[] = {
1349 + [FE_PSE_PORT_CDM1] = 6,
1350 + [FE_PSE_PORT_GDM1] = 6,
1351 + [FE_PSE_PORT_GDM2] = 32,
1352 + [FE_PSE_PORT_GDM3] = 6,
1353 + [FE_PSE_PORT_PPE1] = 4,
1354 + [FE_PSE_PORT_CDM2] = 6,
1355 + [FE_PSE_PORT_CDM3] = 8,
1356 + [FE_PSE_PORT_CDM4] = 10,
1357 + [FE_PSE_PORT_PPE2] = 4,
1358 + [FE_PSE_PORT_GDM4] = 2,
1359 + [FE_PSE_PORT_CDM5] = 2,
1364 + all_rsv = airoha_fe_get_pse_all_rsv(eth);
1365 + /* hw misses PPE2 oq rsv */
1366 + all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
1367 + airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
1370 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
1371 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
1372 + PSE_QUEUE_RSV_PAGES);
1374 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
1375 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
1376 + PSE_QUEUE_RSV_PAGES);
1378 + for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
1379 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
1381 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
1382 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
1383 + PSE_QUEUE_RSV_PAGES);
1385 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
1386 + if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
1387 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
1388 + PSE_QUEUE_RSV_PAGES);
1390 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
1393 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
1394 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
1395 + PSE_QUEUE_RSV_PAGES);
1397 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
1398 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
1400 + for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
1401 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
1402 + PSE_QUEUE_RSV_PAGES);
1404 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
1405 + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
1406 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
1407 + PSE_QUEUE_RSV_PAGES);
1409 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
1412 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
1413 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
1414 + PSE_QUEUE_RSV_PAGES);
1416 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
1417 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
1418 + PSE_QUEUE_RSV_PAGES);
1421 +static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
1425 + for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
1429 + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1431 + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1432 + MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
1433 + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1434 + err = read_poll_timeout(airoha_fe_rr, val,
1435 + val & MC_VLAN_CFG_CMD_DONE_MASK,
1436 + USEC_PER_MSEC, 5 * USEC_PER_MSEC,
1437 + false, eth, REG_MC_VLAN_CFG);
1441 + for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
1442 + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1444 + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1445 + FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
1446 + MC_VLAN_CFG_RW_MASK;
1447 + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1448 + err = read_poll_timeout(airoha_fe_rr, val,
1449 + val & MC_VLAN_CFG_CMD_DONE_MASK,
1451 + 5 * USEC_PER_MSEC, false, eth,
1461 +static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
1463 + /* CDM1_CRSN_QSEL */
1464 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
1465 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1466 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1467 + CDM_CRSN_QSEL_Q1));
1468 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
1469 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1470 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1471 + CDM_CRSN_QSEL_Q1));
1472 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
1473 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1474 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1475 + CDM_CRSN_QSEL_Q1));
1476 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
1477 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1478 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1479 + CDM_CRSN_QSEL_Q6));
1480 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
1481 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1482 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1483 + CDM_CRSN_QSEL_Q1));
1484 + /* CDM2_CRSN_QSEL */
1485 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
1486 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1487 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1488 + CDM_CRSN_QSEL_Q1));
1489 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
1490 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1491 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1492 + CDM_CRSN_QSEL_Q1));
1493 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
1494 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1495 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1496 + CDM_CRSN_QSEL_Q1));
1497 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
1498 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1499 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1500 + CDM_CRSN_QSEL_Q6));
1501 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
1502 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1503 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1504 + CDM_CRSN_QSEL_Q1));
1507 +static int airoha_fe_init(struct airoha_eth *eth)
1509 + airoha_fe_maccr_init(eth);
1511 + /* PSE IQ reserve */
1512 + airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
1513 + FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
1514 + airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
1515 + PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
1516 + FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
1517 + FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
1519 + /* enable FE copy engine for MC/KA/DPI */
1520 + airoha_fe_wr(eth, REG_FE_PCE_CFG,
1521 + PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
1522 + /* set vip queue selection to ring 1 */
1523 + airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
1524 + FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
1525 + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
1526 + FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
1527 + /* set GDM4 source interface offset to 8 */
1528 + airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
1529 + GDM4_SPORT_OFF2_MASK |
1530 + GDM4_SPORT_OFF1_MASK |
1531 + GDM4_SPORT_OFF0_MASK,
1532 + FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
1533 + FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
1534 + FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
1536 + /* set PSE Page as 128B */
1537 + airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
1538 + FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
1539 + FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
1540 + FE_DMA_GLO_PG_SZ_MASK);
1541 + airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
1542 + FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
1543 + FE_RST_GDM4_MBI_ARB_MASK);
1544 + usleep_range(1000, 2000);
1546 + /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
1547 + * connect other rings to PSE Port0 OQ-0
1549 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
1550 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
1551 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
1552 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
1554 + airoha_fe_vip_setup(eth);
1555 + airoha_fe_pse_ports_init(eth);
1557 + airoha_fe_set(eth, REG_GDM_MISC_CFG,
1558 + GDM2_RDM_ACK_WAIT_PREF_MASK |
1559 + GDM2_CHN_VLD_MODE_MASK);
1560 + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
1561 + FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
1563 + /* init fragment and assemble Force Port */
1564 + /* NPU Core-3, NPU Bridge Channel-3 */
1565 + airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1566 + IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
1567 + FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
1568 + FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
1569 + /* QDMA LAN, RX Ring-22 */
1570 + airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1571 + IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
1572 + FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
1573 + FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
1575 + airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
1576 + airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
1578 + airoha_fe_crsn_qsel_init(eth);
1580 + airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
1581 + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
1583 + /* default aging mode for mbi unlock issue */
1584 + airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
1585 + MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
1586 + FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
1587 + FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
1589 + /* disable IFC by default */
1590 + airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
1592 + /* enable 1:N vlan action, init vlan table */
1593 + airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
1595 + return airoha_fe_mc_vlan_clear(eth);
1598 +static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
1600 + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1601 + struct airoha_qdma *qdma = q->qdma;
1602 + struct airoha_eth *eth = qdma->eth;
1603 + int qid = q - &qdma->q_rx[0];
1606 + while (q->queued < q->ndesc - 1) {
1607 + struct airoha_queue_entry *e = &q->entry[q->head];
1608 + struct airoha_qdma_desc *desc = &q->desc[q->head];
1609 + struct page *page;
1613 + page = page_pool_dev_alloc_frag(q->page_pool, &offset,
1618 + q->head = (q->head + 1) % q->ndesc;
1622 + e->buf = page_address(page) + offset;
1623 + e->dma_addr = page_pool_get_dma_addr(page) + offset;
1624 + e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
1626 + dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
1629 + val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
1630 + WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
1631 + WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
1632 + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
1633 + WRITE_ONCE(desc->data, cpu_to_le32(val));
1634 + WRITE_ONCE(desc->msg0, 0);
1635 + WRITE_ONCE(desc->msg1, 0);
1636 + WRITE_ONCE(desc->msg2, 0);
1637 + WRITE_ONCE(desc->msg3, 0);
1639 + airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
1640 + RX_RING_CPU_IDX_MASK,
1641 + FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
1647 +static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
1648 + struct airoha_qdma_desc *desc)
1650 + u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
1652 + sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
1654 + case 0x10 ... 0x13:
1664 + return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
1667 +static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
1669 + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1670 + struct airoha_qdma *qdma = q->qdma;
1671 + struct airoha_eth *eth = qdma->eth;
1672 + int qid = q - &qdma->q_rx[0];
1675 + while (done < budget) {
1676 + struct airoha_queue_entry *e = &q->entry[q->tail];
1677 + struct airoha_qdma_desc *desc = &q->desc[q->tail];
1678 + dma_addr_t dma_addr = le32_to_cpu(desc->addr);
1679 + u32 desc_ctrl = le32_to_cpu(desc->ctrl);
1680 + struct sk_buff *skb;
1683 + if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
1689 + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
1693 + q->tail = (q->tail + 1) % q->ndesc;
1696 + dma_sync_single_for_cpu(eth->dev, dma_addr,
1697 + SKB_WITH_OVERHEAD(q->buf_size), dir);
1699 + p = airoha_qdma_get_gdm_port(eth, desc);
1700 + if (p < 0 || !eth->ports[p]) {
1701 + page_pool_put_full_page(q->page_pool,
1702 + virt_to_head_page(e->buf),
1707 + skb = napi_build_skb(e->buf, q->buf_size);
1709 + page_pool_put_full_page(q->page_pool,
1710 + virt_to_head_page(e->buf),
1715 + skb_reserve(skb, 2);
1716 + __skb_put(skb, len);
1717 + skb_mark_for_recycle(skb);
1718 + skb->dev = eth->ports[p]->dev;
1719 + skb->protocol = eth_type_trans(skb, skb->dev);
1720 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1721 + skb_record_rx_queue(skb, qid);
1722 + napi_gro_receive(&q->napi, skb);
1726 + airoha_qdma_fill_rx_queue(q);
1731 +static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
1733 + struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
1734 + int cur, done = 0;
1737 + cur = airoha_qdma_rx_process(q, budget - done);
1739 + } while (cur && done < budget);
1741 + if (done < budget && napi_complete(napi))
1742 + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
1743 + RX_DONE_INT_MASK);
1748 +static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
1749 + struct airoha_qdma *qdma, int ndesc)
1751 + const struct page_pool_params pp_params = {
1754 + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
1755 + PP_FLAG_PAGE_FRAG,
1756 + .dma_dir = DMA_FROM_DEVICE,
1757 + .max_len = PAGE_SIZE,
1758 + .nid = NUMA_NO_NODE,
1759 + .dev = qdma->eth->dev,
1762 + struct airoha_eth *eth = qdma->eth;
1763 + int qid = q - &qdma->q_rx[0], thr;
1764 + dma_addr_t dma_addr;
1766 + q->buf_size = PAGE_SIZE / 2;
1770 + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1775 + q->page_pool = page_pool_create(&pp_params);
1776 + if (IS_ERR(q->page_pool)) {
1777 + int err = PTR_ERR(q->page_pool);
1779 + q->page_pool = NULL;
1783 + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1784 + &dma_addr, GFP_KERNEL);
1788 + netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
1790 + airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
1791 + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
1792 + RX_RING_SIZE_MASK,
1793 + FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
1795 + thr = clamp(ndesc >> 3, 1, 32);
1796 + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
1797 + FIELD_PREP(RX_RING_THR_MASK, thr));
1798 + airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
1799 + FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
1801 + airoha_qdma_fill_rx_queue(q);
1806 +static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
1808 + struct airoha_eth *eth = q->qdma->eth;
1810 + while (q->queued) {
1811 + struct airoha_queue_entry *e = &q->entry[q->tail];
1812 + struct page *page = virt_to_head_page(e->buf);
1814 + dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
1815 + page_pool_get_dma_dir(q->page_pool));
1816 + page_pool_put_full_page(q->page_pool, page, false);
1817 + q->tail = (q->tail + 1) % q->ndesc;
1822 +static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
1826 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1829 + if (!(RX_DONE_INT_MASK & BIT(i))) {
1830 + /* rx-queue not binded to irq */
1834 + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
1843 +static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
1845 + struct airoha_tx_irq_queue *irq_q;
1846 + int id, done = 0, irq_queued;
1847 + struct airoha_qdma *qdma;
1848 + struct airoha_eth *eth;
1851 + irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
1852 + qdma = irq_q->qdma;
1853 + id = irq_q - &qdma->q_tx_irq[0];
1856 + status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
1857 + head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
1858 + head = head % irq_q->size;
1859 + irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
1861 + while (irq_queued > 0 && done < budget) {
1862 + u32 qid, val = irq_q->q[head];
1863 + struct airoha_qdma_desc *desc;
1864 + struct airoha_queue_entry *e;
1865 + struct airoha_queue *q;
1866 + u32 index, desc_ctrl;
1867 + struct sk_buff *skb;
1872 + irq_q->q[head] = 0xff; /* mark as done */
1873 + head = (head + 1) % irq_q->size;
1877 + qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
1878 + if (qid >= ARRAY_SIZE(qdma->q_tx))
1881 + q = &qdma->q_tx[qid];
1885 + index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
1886 + if (index >= q->ndesc)
1889 + spin_lock_bh(&q->lock);
1894 + desc = &q->desc[index];
1895 + desc_ctrl = le32_to_cpu(desc->ctrl);
1897 + if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
1898 + !(desc_ctrl & QDMA_DESC_DROP_MASK))
1901 + e = &q->entry[index];
1904 + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1906 + memset(e, 0, sizeof(*e));
1907 + WRITE_ONCE(desc->msg0, 0);
1908 + WRITE_ONCE(desc->msg1, 0);
1911 + /* completion ring can report out-of-order indexes if hw QoS
1912 + * is enabled and packets with different priority are queued
1913 + * to same DMA ring. Take into account possible out-of-order
1914 + * reports incrementing DMA ring tail pointer
1916 + while (q->tail != q->head && !q->entry[q->tail].dma_addr)
1917 + q->tail = (q->tail + 1) % q->ndesc;
1920 + u16 queue = skb_get_queue_mapping(skb);
1921 + struct netdev_queue *txq;
1923 + txq = netdev_get_tx_queue(skb->dev, queue);
1924 + netdev_tx_completed_queue(txq, 1, skb->len);
1925 + if (netif_tx_queue_stopped(txq) &&
1926 + q->ndesc - q->queued >= q->free_thr)
1927 + netif_tx_wake_queue(txq);
1929 + dev_kfree_skb_any(skb);
1932 + spin_unlock_bh(&q->lock);
1936 + int i, len = done >> 7;
1938 + for (i = 0; i < len; i++)
1939 + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1940 + IRQ_CLEAR_LEN_MASK, 0x80);
1941 + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1942 + IRQ_CLEAR_LEN_MASK, (done & 0x7f));
1945 + if (done < budget && napi_complete(napi))
1946 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
1947 + TX_DONE_INT_MASK(id));
1952 +static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
1953 + struct airoha_qdma *qdma, int size)
1955 + struct airoha_eth *eth = qdma->eth;
1956 + int i, qid = q - &qdma->q_tx[0];
1957 + dma_addr_t dma_addr;
1959 + spin_lock_init(&q->lock);
1962 + q->free_thr = 1 + MAX_SKB_FRAGS;
1964 + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1969 + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1970 + &dma_addr, GFP_KERNEL);
1974 + for (i = 0; i < q->ndesc; i++) {
1977 + val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
1978 + WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
1981 + /* xmit ring drop default setting */
1982 + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
1983 + TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
1985 + airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
1986 + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
1987 + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
1988 + airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
1989 + FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
1994 +static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
1995 + struct airoha_qdma *qdma, int size)
1997 + int id = irq_q - &qdma->q_tx_irq[0];
1998 + struct airoha_eth *eth = qdma->eth;
1999 + dma_addr_t dma_addr;
2001 + netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
2002 + airoha_qdma_tx_napi_poll);
2003 + irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
2004 + &dma_addr, GFP_KERNEL);
2008 + memset(irq_q->q, 0xff, size * sizeof(u32));
2009 + irq_q->size = size;
2010 + irq_q->qdma = qdma;
2012 + airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
2013 + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
2014 + FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
2015 + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
2016 + FIELD_PREP(TX_IRQ_THR_MASK, 1));
2021 +static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
2025 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2026 + err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
2027 + IRQ_QUEUE_LEN(i));
2032 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2033 + err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
2042 +static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
2044 + struct airoha_eth *eth = q->qdma->eth;
2046 + spin_lock_bh(&q->lock);
2047 + while (q->queued) {
2048 + struct airoha_queue_entry *e = &q->entry[q->tail];
2050 + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
2052 + dev_kfree_skb_any(e->skb);
2055 + q->tail = (q->tail + 1) % q->ndesc;
2058 + spin_unlock_bh(&q->lock);
2061 +static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
2063 + struct airoha_eth *eth = qdma->eth;
2064 + dma_addr_t dma_addr;
2068 + size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
2069 + qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
2071 + if (!qdma->hfwd.desc)
2074 + airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
2076 + size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
2077 + qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
2079 + if (!qdma->hfwd.q)
2082 + airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
2084 + airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
2085 + HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
2086 + FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
2087 + airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
2088 + FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
2089 + airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
2090 + LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
2091 + HW_FWD_DESC_NUM_MASK,
2092 + FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
2095 + return read_poll_timeout(airoha_qdma_rr, status,
2096 + !(status & LMGR_INIT_START), USEC_PER_MSEC,
2097 + 30 * USEC_PER_MSEC, true, qdma,
2098 + REG_LMGR_INIT_CFG);
2101 +static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
2103 + airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
2104 + airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
2106 + airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
2107 + PSE_BUF_ESTIMATE_EN_MASK);
2109 + airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
2110 + EGRESS_RATE_METER_EN_MASK |
2111 + EGRESS_RATE_METER_EQ_RATE_EN_MASK);
2112 + /* 2047us x 31 = 63.457ms */
2113 + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
2114 + EGRESS_RATE_METER_WINDOW_SZ_MASK,
2115 + FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
2116 + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
2117 + EGRESS_RATE_METER_TIMESLICE_MASK,
2118 + FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
2120 + /* ratelimit init */
2121 + airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
2122 + /* fast-tick 25us */
2123 + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
2124 + FIELD_PREP(GLB_FAST_TICK_MASK, 25));
2125 + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
2126 + FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
2128 + airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
2129 + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
2130 + FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
2131 + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
2132 + EGRESS_SLOW_TICK_RATIO_MASK,
2133 + FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
2135 + airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
2136 + airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
2137 + INGRESS_TRTCM_MODE_MASK);
2138 + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
2139 + FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
2140 + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
2141 + INGRESS_SLOW_TICK_RATIO_MASK,
2142 + FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
2144 + airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
2145 + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
2146 + FIELD_PREP(SLA_FAST_TICK_MASK, 25));
2147 + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
2148 + FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
2151 +static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
2155 + for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
2156 + /* Tx-cpu transferred count */
2157 + airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
2158 + airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
2159 + CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
2160 + CNTR_ALL_DSCP_RING_EN_MASK |
2161 + FIELD_PREP(CNTR_CHAN_MASK, i));
2162 + /* Tx-fwd transferred count */
2163 + airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
2164 + airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
2165 + CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
2166 + CNTR_ALL_DSCP_RING_EN_MASK |
2167 + FIELD_PREP(CNTR_SRC_MASK, 1) |
2168 + FIELD_PREP(CNTR_CHAN_MASK, i));
2172 +static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
2176 + /* clear pending irqs */
2177 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
2178 + airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
2181 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
2182 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
2183 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
2185 + /* setup irq binding */
2186 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2187 + if (!qdma->q_tx[i].ndesc)
2190 + if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
2191 + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
2192 + TX_RING_IRQ_BLOCKING_CFG_MASK);
2194 + airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
2195 + TX_RING_IRQ_BLOCKING_CFG_MASK);
2198 + airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
2199 + GLOBAL_CFG_RX_2B_OFFSET_MASK |
2200 + FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
2201 + GLOBAL_CFG_CPU_TXR_RR_MASK |
2202 + GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
2203 + GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
2204 + GLOBAL_CFG_MULTICAST_EN_MASK |
2205 + GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
2206 + GLOBAL_CFG_TX_WB_DONE_MASK |
2207 + FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
2209 + airoha_qdma_init_qos(qdma);
2211 + /* disable qdma rx delay interrupt */
2212 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2213 + if (!qdma->q_rx[i].ndesc)
2216 + airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
2217 + RX_DELAY_INT_MASK);
2220 + airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
2221 + TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
2222 + airoha_qdma_init_qos_stats(qdma);
2227 +static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
2229 + struct airoha_qdma *qdma = dev_instance;
2230 + u32 intr[ARRAY_SIZE(qdma->irqmask)];
2233 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
2234 + intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
2235 + intr[i] &= qdma->irqmask[i];
2236 + airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
2239 + if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
2242 + if (intr[1] & RX_DONE_INT_MASK) {
2243 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
2244 + RX_DONE_INT_MASK);
2246 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2247 + if (!qdma->q_rx[i].ndesc)
2250 + if (intr[1] & BIT(i))
2251 + napi_schedule(&qdma->q_rx[i].napi);
2255 + if (intr[0] & INT_TX_MASK) {
2256 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2257 + if (!(intr[0] & TX_DONE_INT_MASK(i)))
2260 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
2261 + TX_DONE_INT_MASK(i));
2262 + napi_schedule(&qdma->q_tx_irq[i].napi);
2266 + return IRQ_HANDLED;
2269 +static int airoha_qdma_init(struct platform_device *pdev,
2270 + struct airoha_eth *eth,
2271 + struct airoha_qdma *qdma)
2273 + int err, id = qdma - ð->qdma[0];
2276 + spin_lock_init(&qdma->irq_lock);
2279 + res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
2283 + qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
2284 + if (IS_ERR(qdma->regs))
2285 + return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
2286 + "failed to iomap qdma%d regs\n", id);
2288 + qdma->irq = platform_get_irq(pdev, 4 * id);
2289 + if (qdma->irq < 0)
2292 + err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
2293 + IRQF_SHARED, KBUILD_MODNAME, qdma);
2297 + err = airoha_qdma_init_rx(qdma);
2301 + err = airoha_qdma_init_tx(qdma);
2305 + err = airoha_qdma_init_hfwd_queues(qdma);
2309 + return airoha_qdma_hw_init(qdma);
2312 +static int airoha_hw_init(struct platform_device *pdev,
2313 + struct airoha_eth *eth)
2318 + err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
2323 + err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
2328 + err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
2333 + err = airoha_fe_init(eth);
2337 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
2338 + err = airoha_qdma_init(pdev, eth, ð->qdma[i]);
2343 + set_bit(DEV_STATE_INITIALIZED, ð->state);
2348 +static void airoha_hw_cleanup(struct airoha_qdma *qdma)
2352 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2353 + if (!qdma->q_rx[i].ndesc)
2356 + netif_napi_del(&qdma->q_rx[i].napi);
2357 + airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
2358 + if (qdma->q_rx[i].page_pool)
2359 + page_pool_destroy(qdma->q_rx[i].page_pool);
2362 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2363 + netif_napi_del(&qdma->q_tx_irq[i].napi);
2365 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2366 + if (!qdma->q_tx[i].ndesc)
2369 + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
2373 +static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
2377 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2378 + napi_enable(&qdma->q_tx_irq[i].napi);
2380 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2381 + if (!qdma->q_rx[i].ndesc)
2384 + napi_enable(&qdma->q_rx[i].napi);
2388 +static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
2392 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2393 + napi_disable(&qdma->q_tx_irq[i].napi);
2395 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2396 + if (!qdma->q_rx[i].ndesc)
2399 + napi_disable(&qdma->q_rx[i].napi);
2403 +static void airoha_update_hw_stats(struct airoha_gdm_port *port)
2405 + struct airoha_eth *eth = port->qdma->eth;
2408 + spin_lock(&port->stats.lock);
2409 + u64_stats_update_begin(&port->stats.syncp);
2412 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
2413 + port->stats.tx_ok_pkts += ((u64)val << 32);
2414 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
2415 + port->stats.tx_ok_pkts += val;
2417 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
2418 + port->stats.tx_ok_bytes += ((u64)val << 32);
2419 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
2420 + port->stats.tx_ok_bytes += val;
2422 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
2423 + port->stats.tx_drops += val;
2425 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
2426 + port->stats.tx_broadcast += val;
2428 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
2429 + port->stats.tx_multicast += val;
2431 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
2432 + port->stats.tx_len[i] += val;
2434 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
2435 + port->stats.tx_len[i] += ((u64)val << 32);
2436 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
2437 + port->stats.tx_len[i++] += val;
2439 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
2440 + port->stats.tx_len[i] += ((u64)val << 32);
2441 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
2442 + port->stats.tx_len[i++] += val;
2444 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
2445 + port->stats.tx_len[i] += ((u64)val << 32);
2446 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
2447 + port->stats.tx_len[i++] += val;
2449 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
2450 + port->stats.tx_len[i] += ((u64)val << 32);
2451 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
2452 + port->stats.tx_len[i++] += val;
2454 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
2455 + port->stats.tx_len[i] += ((u64)val << 32);
2456 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
2457 + port->stats.tx_len[i++] += val;
2459 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
2460 + port->stats.tx_len[i] += ((u64)val << 32);
2461 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
2462 + port->stats.tx_len[i++] += val;
2464 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
2465 + port->stats.tx_len[i++] += val;
2468 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
2469 + port->stats.rx_ok_pkts += ((u64)val << 32);
2470 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
2471 + port->stats.rx_ok_pkts += val;
2473 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
2474 + port->stats.rx_ok_bytes += ((u64)val << 32);
2475 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
2476 + port->stats.rx_ok_bytes += val;
2478 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
2479 + port->stats.rx_drops += val;
2481 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
2482 + port->stats.rx_broadcast += val;
2484 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
2485 + port->stats.rx_multicast += val;
2487 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
2488 + port->stats.rx_errors += val;
2490 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
2491 + port->stats.rx_crc_error += val;
2493 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
2494 + port->stats.rx_over_errors += val;
2496 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
2497 + port->stats.rx_fragment += val;
2499 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
2500 + port->stats.rx_jabber += val;
2503 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
2504 + port->stats.rx_len[i] += val;
2506 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
2507 + port->stats.rx_len[i] += ((u64)val << 32);
2508 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
2509 + port->stats.rx_len[i++] += val;
2511 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
2512 + port->stats.rx_len[i] += ((u64)val << 32);
2513 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
2514 + port->stats.rx_len[i++] += val;
2516 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
2517 + port->stats.rx_len[i] += ((u64)val << 32);
2518 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
2519 + port->stats.rx_len[i++] += val;
2521 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
2522 + port->stats.rx_len[i] += ((u64)val << 32);
2523 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
2524 + port->stats.rx_len[i++] += val;
2526 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
2527 + port->stats.rx_len[i] += ((u64)val << 32);
2528 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
2529 + port->stats.rx_len[i++] += val;
2531 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
2532 + port->stats.rx_len[i] += ((u64)val << 32);
2533 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
2534 + port->stats.rx_len[i++] += val;
2536 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
2537 + port->stats.rx_len[i++] += val;
2539 + /* reset mib counters */
2540 + airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
2541 + FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
2543 + u64_stats_update_end(&port->stats.syncp);
2544 + spin_unlock(&port->stats.lock);
2547 +static int airoha_dev_open(struct net_device *dev)
2549 + struct airoha_gdm_port *port = netdev_priv(dev);
2550 + struct airoha_qdma *qdma = port->qdma;
2553 + netif_tx_start_all_queues(dev);
2554 + err = airoha_set_gdm_ports(qdma->eth, true);
2558 + if (netdev_uses_dsa(dev))
2559 + airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2560 + GDM_STAG_EN_MASK);
2562 + airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2563 + GDM_STAG_EN_MASK);
2565 + airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
2566 + GLOBAL_CFG_TX_DMA_EN_MASK |
2567 + GLOBAL_CFG_RX_DMA_EN_MASK);
2572 +static int airoha_dev_stop(struct net_device *dev)
2574 + struct airoha_gdm_port *port = netdev_priv(dev);
2575 + struct airoha_qdma *qdma = port->qdma;
2578 + netif_tx_disable(dev);
2579 + err = airoha_set_gdm_ports(qdma->eth, false);
2583 + airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
2584 + GLOBAL_CFG_TX_DMA_EN_MASK |
2585 + GLOBAL_CFG_RX_DMA_EN_MASK);
2587 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2588 + if (!qdma->q_tx[i].ndesc)
2591 + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
2592 + netdev_tx_reset_subqueue(dev, i);
2598 +static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
2600 + struct airoha_gdm_port *port = netdev_priv(dev);
2603 + err = eth_mac_addr(dev, p);
2607 + airoha_set_macaddr(port, dev->dev_addr);
2612 +static int airoha_dev_init(struct net_device *dev)
2614 + struct airoha_gdm_port *port = netdev_priv(dev);
2616 + airoha_set_macaddr(port, dev->dev_addr);
2621 +static void airoha_dev_get_stats64(struct net_device *dev,
2622 + struct rtnl_link_stats64 *storage)
2624 + struct airoha_gdm_port *port = netdev_priv(dev);
2625 + unsigned int start;
2627 + airoha_update_hw_stats(port);
2629 + start = u64_stats_fetch_begin(&port->stats.syncp);
2630 + storage->rx_packets = port->stats.rx_ok_pkts;
2631 + storage->tx_packets = port->stats.tx_ok_pkts;
2632 + storage->rx_bytes = port->stats.rx_ok_bytes;
2633 + storage->tx_bytes = port->stats.tx_ok_bytes;
2634 + storage->multicast = port->stats.rx_multicast;
2635 + storage->rx_errors = port->stats.rx_errors;
2636 + storage->rx_dropped = port->stats.rx_drops;
2637 + storage->tx_dropped = port->stats.tx_drops;
2638 + storage->rx_crc_errors = port->stats.rx_crc_error;
2639 + storage->rx_over_errors = port->stats.rx_over_errors;
2640 + } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2643 +static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
2644 + struct net_device *sb_dev)
2646 + struct airoha_gdm_port *port = netdev_priv(dev);
2647 + int queue, channel;
2649 + /* For dsa device select QoS channel according to the dsa user port
2650 + * index, rely on port id otherwise. Select QoS queue based on the
2653 + channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
2654 + channel = channel % AIROHA_NUM_QOS_CHANNELS;
2655 + queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
2656 + queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
2658 + return queue < dev->num_tx_queues ? queue : 0;
2661 +static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
2662 + struct net_device *dev)
2664 + struct airoha_gdm_port *port = netdev_priv(dev);
2665 + u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
2666 + u32 msg0, msg1, len = skb_headlen(skb);
2667 + struct airoha_qdma *qdma = port->qdma;
2668 + struct netdev_queue *txq;
2669 + struct airoha_queue *q;
2670 + void *data = skb->data;
2675 + qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
2676 + msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
2677 + qid / AIROHA_NUM_QOS_QUEUES) |
2678 + FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
2679 + qid % AIROHA_NUM_QOS_QUEUES);
2680 + if (skb->ip_summed == CHECKSUM_PARTIAL)
2681 + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
2682 + FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
2683 + FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
2685 + /* TSO: fill MSS info in tcp checksum field */
2686 + if (skb_is_gso(skb)) {
2687 + if (skb_cow_head(skb, 0))
2690 + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
2692 + __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
2694 + tcp_hdr(skb)->check = (__force __sum16)csum;
2695 + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
2699 + fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
2700 + msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
2701 + FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
2703 + q = &qdma->q_tx[qid];
2704 + if (WARN_ON_ONCE(!q->ndesc))
2707 + spin_lock_bh(&q->lock);
2709 + txq = netdev_get_tx_queue(dev, qid);
2710 + if (q->queued + nr_frags > q->ndesc) {
2711 + /* not enough space in the queue */
2712 + netif_tx_stop_queue(txq);
2713 + spin_unlock_bh(&q->lock);
2714 + return NETDEV_TX_BUSY;
2718 + for (i = 0; i < nr_frags; i++) {
2719 + struct airoha_qdma_desc *desc = &q->desc[index];
2720 + struct airoha_queue_entry *e = &q->entry[index];
2721 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2725 + addr = dma_map_single(dev->dev.parent, data, len,
2727 + if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
2730 + index = (index + 1) % q->ndesc;
2732 + val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
2733 + if (i < nr_frags - 1)
2734 + val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
2735 + WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
2736 + WRITE_ONCE(desc->addr, cpu_to_le32(addr));
2737 + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
2738 + WRITE_ONCE(desc->data, cpu_to_le32(val));
2739 + WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
2740 + WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
2741 + WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
2743 + e->skb = i ? NULL : skb;
2744 + e->dma_addr = addr;
2747 + data = skb_frag_address(frag);
2748 + len = skb_frag_size(frag);
2754 + skb_tx_timestamp(skb);
2755 + netdev_tx_sent_queue(txq, skb->len);
2757 + if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2758 + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
2759 + TX_RING_CPU_IDX_MASK,
2760 + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
2762 + if (q->ndesc - q->queued < q->free_thr)
2763 + netif_tx_stop_queue(txq);
2765 + spin_unlock_bh(&q->lock);
2767 + return NETDEV_TX_OK;
2770 + for (i--; i >= 0; i--) {
2771 + index = (q->head + i) % q->ndesc;
2772 + dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
2773 + q->entry[index].dma_len, DMA_TO_DEVICE);
2776 + spin_unlock_bh(&q->lock);
2778 + dev_kfree_skb_any(skb);
2779 + dev->stats.tx_dropped++;
2781 + return NETDEV_TX_OK;
2784 +static void airoha_ethtool_get_drvinfo(struct net_device *dev,
2785 + struct ethtool_drvinfo *info)
2787 + struct airoha_gdm_port *port = netdev_priv(dev);
2788 + struct airoha_eth *eth = port->qdma->eth;
2790 + strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
2791 + strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
2794 +static void airoha_ethtool_get_mac_stats(struct net_device *dev,
2795 + struct ethtool_eth_mac_stats *stats)
2797 + struct airoha_gdm_port *port = netdev_priv(dev);
2798 + unsigned int start;
2800 + airoha_update_hw_stats(port);
2802 + start = u64_stats_fetch_begin(&port->stats.syncp);
2803 + stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
2804 + stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
2805 + stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
2806 + } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2809 +static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
2821 +airoha_ethtool_get_rmon_stats(struct net_device *dev,
2822 + struct ethtool_rmon_stats *stats,
2823 + const struct ethtool_rmon_hist_range **ranges)
2825 + struct airoha_gdm_port *port = netdev_priv(dev);
2826 + struct airoha_hw_stats *hw_stats = &port->stats;
2827 + unsigned int start;
2829 + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2830 + ARRAY_SIZE(hw_stats->tx_len) + 1);
2831 + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2832 + ARRAY_SIZE(hw_stats->rx_len) + 1);
2834 + *ranges = airoha_ethtool_rmon_ranges;
2835 + airoha_update_hw_stats(port);
2839 + start = u64_stats_fetch_begin(&port->stats.syncp);
2840 + stats->fragments = hw_stats->rx_fragment;
2841 + stats->jabbers = hw_stats->rx_jabber;
2842 + for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
2844 + stats->hist[i] = hw_stats->rx_len[i];
2845 + stats->hist_tx[i] = hw_stats->tx_len[i];
2847 + } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2850 +static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
2851 + int channel, enum tx_sched_mode mode,
2852 + const u16 *weights, u8 n_weights)
2856 + for (i = 0; i < AIROHA_NUM_TX_RING; i++)
2857 + airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
2858 + TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
2860 + for (i = 0; i < n_weights; i++) {
2864 + airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
2865 + TWRR_RW_CMD_MASK |
2866 + FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
2867 + FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
2868 + FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
2869 + err = read_poll_timeout(airoha_qdma_rr, status,
2870 + status & TWRR_RW_CMD_DONE,
2871 + USEC_PER_MSEC, 10 * USEC_PER_MSEC,
2873 + REG_TXWRR_WEIGHT_CFG);
2878 + airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
2879 + CHAN_QOS_MODE_MASK(channel),
2880 + mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
2885 +static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
2888 + static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2890 + return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
2894 +static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
2896 + struct tc_ets_qopt_offload *opt)
2898 + struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
2899 + enum tx_sched_mode mode = TC_SCH_SP;
2900 + u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2901 + int i, nstrict = 0, nwrr, qidx;
2903 + if (p->bands > AIROHA_NUM_QOS_QUEUES)
2906 + for (i = 0; i < p->bands; i++) {
2907 + if (!p->quanta[i])
2911 + /* this configuration is not supported by the hw */
2912 + if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
2915 + /* EN7581 SoC supports fixed QoS band priority where WRR queues have
2916 + * lowest priorities with respect to SP ones.
2917 + * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
2919 + nwrr = p->bands - nstrict;
2920 + qidx = nstrict && nwrr ? nstrict : 0;
2921 + for (i = 1; i <= p->bands; i++) {
2922 + if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
2925 + qidx = i == nwrr ? 0 : qidx + 1;
2928 + for (i = 0; i < nwrr; i++)
2929 + w[i] = p->weights[nstrict + i];
2932 + mode = TC_SCH_WRR8;
2933 + else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
2934 + mode = nstrict + 1;
2936 + return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
2940 +static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
2942 + struct tc_ets_qopt_offload *opt)
2944 + u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
2945 + REG_CNTR_VAL(channel << 1));
2946 + u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
2947 + REG_CNTR_VAL((channel << 1) + 1));
2948 + u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
2949 + (fwd_tx_packets - port->fwd_tx_packets);
2950 + _bstats_update(opt->stats.bstats, 0, tx_packets);
2952 + port->cpu_tx_packets = cpu_tx_packets;
2953 + port->fwd_tx_packets = fwd_tx_packets;
2958 +static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
2959 + struct tc_ets_qopt_offload *opt)
2961 + int channel = TC_H_MAJ(opt->handle) >> 16;
2963 + if (opt->parent == TC_H_ROOT)
2966 + switch (opt->command) {
2967 + case TC_ETS_REPLACE:
2968 + return airoha_qdma_set_tx_ets_sched(port, channel, opt);
2969 + case TC_ETS_DESTROY:
2970 + /* PRIO is default qdisc scheduler */
2971 + return airoha_qdma_set_tx_prio_sched(port, channel);
2972 + case TC_ETS_STATS:
2973 + return airoha_qdma_get_tx_ets_stats(port, channel, opt);
2975 + return -EOPNOTSUPP;
2979 +static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
2980 + u32 addr, enum trtcm_param_type param,
2981 + enum trtcm_mode_type mode,
2982 + u32 *val_low, u32 *val_high)
2984 + u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2985 + u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2986 + FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2987 + FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2988 + FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2990 + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2991 + if (read_poll_timeout(airoha_qdma_rr, val,
2992 + val & TRTCM_PARAM_RW_DONE_MASK,
2993 + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2994 + qdma, REG_TRTCM_CFG_PARAM(addr)))
2995 + return -ETIMEDOUT;
2997 + *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2999 + *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
3004 +static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
3005 + u32 addr, enum trtcm_param_type param,
3006 + enum trtcm_mode_type mode, u32 val)
3008 + u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
3009 + u32 config = TRTCM_PARAM_RW_MASK |
3010 + FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
3011 + FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
3012 + FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
3013 + FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
3015 + airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
3016 + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
3018 + return read_poll_timeout(airoha_qdma_rr, val,
3019 + val & TRTCM_PARAM_RW_DONE_MASK,
3020 + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
3021 + qdma, REG_TRTCM_CFG_PARAM(addr));
3024 +static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
3025 + u32 addr, enum trtcm_mode_type mode,
3026 + bool enable, u32 enable_mask)
3030 + if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
3031 + mode, &val, NULL))
3034 + val = enable ? val | enable_mask : val & ~enable_mask;
3036 + return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
3040 +static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
3041 + int channel, u32 addr,
3042 + enum trtcm_mode_type mode,
3043 + u32 rate_val, u32 bucket_size)
3045 + u32 val, config, tick, unit, rate, rate_frac;
3048 + if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
3049 + mode, &config, NULL))
3052 + val = airoha_qdma_rr(qdma, addr);
3053 + tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
3054 + if (config & TRTCM_TICK_SEL)
3055 + tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
3059 + unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
3063 + rate = rate_val / unit;
3064 + rate_frac = rate_val % unit;
3065 + rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
3066 + rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
3067 + FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
3069 + err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
3070 + TRTCM_TOKEN_RATE_MODE, mode, rate);
3074 + val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
3075 + val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
3077 + return airoha_qdma_set_trtcm_param(qdma, channel, addr,
3078 + TRTCM_BUCKETSIZE_SHIFT_MODE,
3082 +static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
3083 + int channel, u32 rate,
3088 + for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
3089 + err = airoha_qdma_set_trtcm_config(port->qdma, channel,
3090 + REG_EGRESS_TRTCM_CFG, i,
3091 + !!rate, TRTCM_METER_MODE);
3095 + err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
3096 + REG_EGRESS_TRTCM_CFG,
3097 + i, rate, bucket_size);
3105 +static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
3106 + struct tc_htb_qopt_offload *opt)
3108 + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
3109 + u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
3110 + struct net_device *dev = port->dev;
3111 + int num_tx_queues = dev->real_num_tx_queues;
3114 + if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
3115 + NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
3119 + err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
3121 + NL_SET_ERR_MSG_MOD(opt->extack,
3122 + "failed configuring htb offload");
3126 + if (opt->command == TC_HTB_NODE_MODIFY)
3129 + err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
3131 + airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
3132 + NL_SET_ERR_MSG_MOD(opt->extack,
3133 + "failed setting real_num_tx_queues");
3137 + set_bit(channel, port->qos_sq_bmap);
3138 + opt->qid = AIROHA_NUM_TX_RING + channel;
3143 +static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
3145 + struct net_device *dev = port->dev;
3147 + netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
3148 + airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
3149 + clear_bit(queue, port->qos_sq_bmap);
3152 +static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
3153 + struct tc_htb_qopt_offload *opt)
3155 + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
3157 + if (!test_bit(channel, port->qos_sq_bmap)) {
3158 + NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
3162 + airoha_tc_remove_htb_queue(port, channel);
3167 +static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
3171 + for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
3172 + airoha_tc_remove_htb_queue(port, q);
3177 +static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
3178 + struct tc_htb_qopt_offload *opt)
3180 + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
3182 + if (!test_bit(channel, port->qos_sq_bmap)) {
3183 + NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
3187 + opt->qid = channel;
3192 +static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
3193 + struct tc_htb_qopt_offload *opt)
3195 + switch (opt->command) {
3196 + case TC_HTB_CREATE:
3198 + case TC_HTB_DESTROY:
3199 + return airoha_tc_htb_destroy(port);
3200 + case TC_HTB_NODE_MODIFY:
3201 + case TC_HTB_LEAF_ALLOC_QUEUE:
3202 + return airoha_tc_htb_alloc_leaf_queue(port, opt);
3203 + case TC_HTB_LEAF_DEL:
3204 + case TC_HTB_LEAF_DEL_LAST:
3205 + case TC_HTB_LEAF_DEL_LAST_FORCE:
3206 + return airoha_tc_htb_delete_leaf_queue(port, opt);
3207 + case TC_HTB_LEAF_QUERY_QUEUE:
3208 + return airoha_tc_get_htb_get_leaf_queue(port, opt);
3210 + return -EOPNOTSUPP;
3216 +static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
3219 + struct airoha_gdm_port *port = netdev_priv(dev);
3222 + case TC_SETUP_QDISC_ETS:
3223 + return airoha_tc_setup_qdisc_ets(port, type_data);
3224 + case TC_SETUP_QDISC_HTB:
3225 + return airoha_tc_setup_qdisc_htb(port, type_data);
3227 + return -EOPNOTSUPP;
3231 +static const struct net_device_ops airoha_netdev_ops = {
3232 + .ndo_init = airoha_dev_init,
3233 + .ndo_open = airoha_dev_open,
3234 + .ndo_stop = airoha_dev_stop,
3235 + .ndo_select_queue = airoha_dev_select_queue,
3236 + .ndo_start_xmit = airoha_dev_xmit,
3237 + .ndo_get_stats64 = airoha_dev_get_stats64,
3238 + .ndo_set_mac_address = airoha_dev_set_macaddr,
3239 + .ndo_setup_tc = airoha_dev_tc_setup,
3242 +static const struct ethtool_ops airoha_ethtool_ops = {
3243 + .get_drvinfo = airoha_ethtool_get_drvinfo,
3244 + .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
3245 + .get_rmon_stats = airoha_ethtool_get_rmon_stats,
3248 +static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
3250 + const __be32 *id_ptr = of_get_property(np, "reg", NULL);
3251 + struct airoha_gdm_port *port;
3252 + struct airoha_qdma *qdma;
3253 + struct net_device *dev;
3258 + dev_err(eth->dev, "missing gdm port id\n");
3262 + id = be32_to_cpup(id_ptr);
3265 + if (!id || id > ARRAY_SIZE(eth->ports)) {
3266 + dev_err(eth->dev, "invalid gdm port id: %d\n", id);
3270 + if (eth->ports[index]) {
3271 + dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
3275 + dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
3276 + AIROHA_NUM_NETDEV_TX_RINGS,
3277 + AIROHA_NUM_RX_RING);
3279 + dev_err(eth->dev, "alloc_etherdev failed\n");
3283 + qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA];
3284 + dev->netdev_ops = &airoha_netdev_ops;
3285 + dev->ethtool_ops = &airoha_ethtool_ops;
3286 + dev->max_mtu = AIROHA_MAX_MTU;
3287 + dev->watchdog_timeo = 5 * HZ;
3288 + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
3289 + NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
3290 + NETIF_F_SG | NETIF_F_TSO |
3292 + dev->features |= dev->hw_features;
3293 + dev->dev.of_node = np;
3294 + dev->irq = qdma->irq;
3295 + SET_NETDEV_DEV(dev, eth->dev);
3297 + /* reserve hw queues for HTB offloading */
3298 + err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
3302 + err = of_get_ethdev_address(np, dev);
3304 + if (err == -EPROBE_DEFER)
3307 + eth_hw_addr_random(dev);
3308 + dev_info(eth->dev, "generated random MAC address %pM\n",
3312 + port = netdev_priv(dev);
3313 + u64_stats_init(&port->stats.syncp);
3314 + spin_lock_init(&port->stats.lock);
3315 + port->qdma = qdma;
3318 + eth->ports[index] = port;
3320 + return register_netdev(dev);
3323 +static int airoha_probe(struct platform_device *pdev)
3325 + struct device_node *np;
3326 + struct airoha_eth *eth;
3329 + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3333 + eth->dev = &pdev->dev;
3335 + err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
3337 + dev_err(eth->dev, "failed configuring DMA mask\n");
3341 + eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
3342 + if (IS_ERR(eth->fe_regs))
3343 + return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
3344 + "failed to iomap fe regs\n");
3346 + eth->rsts[0].id = "fe";
3347 + eth->rsts[1].id = "pdma";
3348 + eth->rsts[2].id = "qdma";
3349 + err = devm_reset_control_bulk_get_exclusive(eth->dev,
3350 + ARRAY_SIZE(eth->rsts),
3353 + dev_err(eth->dev, "failed to get bulk reset lines\n");
3357 + eth->xsi_rsts[0].id = "xsi-mac";
3358 + eth->xsi_rsts[1].id = "hsi0-mac";
3359 + eth->xsi_rsts[2].id = "hsi1-mac";
3360 + eth->xsi_rsts[3].id = "hsi-mac";
3361 + eth->xsi_rsts[4].id = "xfp-mac";
3362 + err = devm_reset_control_bulk_get_exclusive(eth->dev,
3363 + ARRAY_SIZE(eth->xsi_rsts),
3366 + dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
3370 + eth->napi_dev = alloc_netdev_dummy(0);
3371 + if (!eth->napi_dev)
3374 + /* Enable threaded NAPI by default */
3375 + eth->napi_dev->threaded = true;
3376 + strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
3377 + platform_set_drvdata(pdev, eth);
3379 + err = airoha_hw_init(pdev, eth);
3381 + goto error_hw_cleanup;
3383 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3384 + airoha_qdma_start_napi(ð->qdma[i]);
3386 + for_each_child_of_node(pdev->dev.of_node, np) {
3387 + if (!of_device_is_compatible(np, "airoha,eth-mac"))
3390 + if (!of_device_is_available(np))
3393 + err = airoha_alloc_gdm_port(eth, np);
3396 + goto error_napi_stop;
3403 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3404 + airoha_qdma_stop_napi(ð->qdma[i]);
3406 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3407 + airoha_hw_cleanup(ð->qdma[i]);
3409 + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3410 + struct airoha_gdm_port *port = eth->ports[i];
3412 + if (port && port->dev->reg_state == NETREG_REGISTERED)
3413 + unregister_netdev(port->dev);
3415 + free_netdev(eth->napi_dev);
3416 + platform_set_drvdata(pdev, NULL);
3421 +static void airoha_remove(struct platform_device *pdev)
3423 + struct airoha_eth *eth = platform_get_drvdata(pdev);
3426 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
3427 + airoha_qdma_stop_napi(ð->qdma[i]);
3428 + airoha_hw_cleanup(ð->qdma[i]);
3431 + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3432 + struct airoha_gdm_port *port = eth->ports[i];
3437 + airoha_dev_stop(port->dev);
3438 + unregister_netdev(port->dev);
3440 + free_netdev(eth->napi_dev);
3442 + platform_set_drvdata(pdev, NULL);
3445 +static const struct of_device_id of_airoha_match[] = {
3446 + { .compatible = "airoha,en7581-eth" },
3447 + { /* sentinel */ }
3449 +MODULE_DEVICE_TABLE(of, of_airoha_match);
3451 +static struct platform_driver airoha_driver = {
3452 + .probe = airoha_probe,
3453 + .remove_new = airoha_remove,
3455 + .name = KBUILD_MODNAME,
3456 + .of_match_table = of_airoha_match,
3459 +module_platform_driver(airoha_driver);
3461 +MODULE_LICENSE("GPL");
3462 +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
3463 +MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
3464 --- a/drivers/net/ethernet/mediatek/airoha_eth.c
3467 -// SPDX-License-Identifier: GPL-2.0-only
3469 - * Copyright (c) 2024 AIROHA Inc
3470 - * Author: Lorenzo Bianconi <lorenzo@kernel.org>
3472 -#include <linux/etherdevice.h>
3473 -#include <linux/iopoll.h>
3474 -#include <linux/kernel.h>
3475 -#include <linux/netdevice.h>
3476 -#include <linux/of.h>
3477 -#include <linux/of_net.h>
3478 -#include <linux/platform_device.h>
3479 -#include <linux/reset.h>
3480 -#include <linux/tcp.h>
3481 -#include <linux/u64_stats_sync.h>
3482 -#include <net/dsa.h>
3483 -#include <net/page_pool/helpers.h>
3484 -#include <net/pkt_cls.h>
3485 -#include <uapi/linux/ppp_defs.h>
3487 -#define AIROHA_MAX_NUM_GDM_PORTS 1
3488 -#define AIROHA_MAX_NUM_QDMA 2
3489 -#define AIROHA_MAX_NUM_RSTS 3
3490 -#define AIROHA_MAX_NUM_XSI_RSTS 5
3491 -#define AIROHA_MAX_MTU 2000
3492 -#define AIROHA_MAX_PACKET_SIZE 2048
3493 -#define AIROHA_NUM_QOS_CHANNELS 4
3494 -#define AIROHA_NUM_QOS_QUEUES 8
3495 -#define AIROHA_NUM_TX_RING 32
3496 -#define AIROHA_NUM_RX_RING 32
3497 -#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
3498 - AIROHA_NUM_QOS_CHANNELS)
3499 -#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
3500 -#define AIROHA_FE_MC_MAX_VLAN_PORT 16
3501 -#define AIROHA_NUM_TX_IRQ 2
3502 -#define HW_DSCP_NUM 2048
3503 -#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
3504 -#define TX_DSCP_NUM 1024
3505 -#define RX_DSCP_NUM(_n) \
3506 - ((_n) == 2 ? 128 : \
3507 - (_n) == 11 ? 128 : \
3508 - (_n) == 15 ? 128 : \
3509 - (_n) == 0 ? 1024 : 16)
3511 -#define PSE_RSV_PAGES 128
3512 -#define PSE_QUEUE_RSV_PAGES 64
3514 -#define QDMA_METER_IDX(_n) ((_n) & 0xff)
3515 -#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
3518 -#define PSE_BASE 0x0100
3519 -#define CSR_IFC_BASE 0x0200
3520 -#define CDM1_BASE 0x0400
3521 -#define GDM1_BASE 0x0500
3522 -#define PPE1_BASE 0x0c00
3524 -#define CDM2_BASE 0x1400
3525 -#define GDM2_BASE 0x1500
3527 -#define GDM3_BASE 0x1100
3528 -#define GDM4_BASE 0x2500
3530 -#define GDM_BASE(_n) \
3531 - ((_n) == 4 ? GDM4_BASE : \
3532 - (_n) == 3 ? GDM3_BASE : \
3533 - (_n) == 2 ? GDM2_BASE : GDM1_BASE)
3535 -#define REG_FE_DMA_GLO_CFG 0x0000
3536 -#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
3537 -#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
3539 -#define REG_FE_RST_GLO_CFG 0x0004
3540 -#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
3541 -#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
3542 -#define FE_RST_CORE_MASK BIT(0)
3544 -#define REG_FE_WAN_MAC_H 0x0030
3545 -#define REG_FE_LAN_MAC_H 0x0040
3547 -#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
3548 -#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
3550 -#define REG_FE_CDM1_OQ_MAP0 0x0050
3551 -#define REG_FE_CDM1_OQ_MAP1 0x0054
3552 -#define REG_FE_CDM1_OQ_MAP2 0x0058
3553 -#define REG_FE_CDM1_OQ_MAP3 0x005c
3555 -#define REG_FE_PCE_CFG 0x0070
3556 -#define PCE_DPI_EN_MASK BIT(2)
3557 -#define PCE_KA_EN_MASK BIT(1)
3558 -#define PCE_MC_EN_MASK BIT(0)
3560 -#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
3561 -#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
3562 -#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
3563 -#define PSE_CFG_WR_EN_MASK BIT(8)
3564 -#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
3566 -#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
3567 -#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
3569 -#define PSE_FQ_CFG 0x008c
3570 -#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
3572 -#define REG_FE_PSE_BUF_SET 0x0090
3573 -#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
3574 -#define PSE_ALLRSV_MASK GENMASK(14, 0)
3576 -#define REG_PSE_SHARE_USED_THD 0x0094
3577 -#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
3578 -#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
3580 -#define REG_GDM_MISC_CFG 0x0148
3581 -#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
3582 -#define GDM2_CHN_VLD_MODE_MASK BIT(5)
3584 -#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
3585 -#define FE_IFC_EN_MASK BIT(0)
3587 -#define REG_FE_VIP_PORT_EN 0x01f0
3588 -#define REG_FE_IFC_PORT_EN 0x01f4
3590 -#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
3591 -#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
3593 -#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
3594 -#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
3595 -#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
3597 -#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
3598 -#define PATN_FCPU_EN_MASK BIT(7)
3599 -#define PATN_SWP_EN_MASK BIT(6)
3600 -#define PATN_DP_EN_MASK BIT(5)
3601 -#define PATN_SP_EN_MASK BIT(4)
3602 -#define PATN_TYPE_MASK GENMASK(3, 1)
3603 -#define PATN_EN_MASK BIT(0)
3605 -#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
3606 -#define PATN_DP_MASK GENMASK(31, 16)
3607 -#define PATN_SP_MASK GENMASK(15, 0)
3609 -#define REG_CDM1_VLAN_CTRL CDM1_BASE
3610 -#define CDM1_VLAN_MASK GENMASK(31, 16)
3612 -#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
3613 -#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
3615 -#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
3616 -#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
3617 - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
3619 -#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
3620 -#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
3621 -#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
3623 -#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
3624 -#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
3625 - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
3627 -#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
3628 -#define GDM_DROP_CRC_ERR BIT(23)
3629 -#define GDM_IP4_CKSUM BIT(22)
3630 -#define GDM_TCP_CKSUM BIT(21)
3631 -#define GDM_UDP_CKSUM BIT(20)
3632 -#define GDM_UCFQ_MASK GENMASK(15, 12)
3633 -#define GDM_BCFQ_MASK GENMASK(11, 8)
3634 -#define GDM_MCFQ_MASK GENMASK(7, 4)
3635 -#define GDM_OCFQ_MASK GENMASK(3, 0)
3637 -#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
3638 -#define GDM_INGRESS_FC_EN_MASK BIT(1)
3639 -#define GDM_STAG_EN_MASK BIT(0)
3641 -#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
3642 -#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
3643 -#define GDM_LONG_LEN_MASK GENMASK(29, 16)
3645 -#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
3646 -#define FE_CPORT_PAD BIT(26)
3647 -#define FE_CPORT_PORT_XFC_MASK BIT(25)
3648 -#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
3650 -#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
3651 -#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
3652 -#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
3654 -#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
3655 -#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
3656 -#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
3657 -#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
3658 -#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
3659 -#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
3661 -#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
3662 -#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
3663 -#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
3664 -#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
3665 -#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
3666 -#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
3667 -#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
3668 -#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
3669 -#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
3670 -#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
3671 -#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
3672 -#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
3673 -#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
3674 -#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
3675 -#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
3677 -#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
3678 -#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
3679 -#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
3680 -#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
3681 -#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
3682 -#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
3683 -#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
3684 -#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
3685 -#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
3686 -#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
3687 -#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
3688 -#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
3689 -#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
3690 -#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
3691 -#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
3692 -#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
3693 -#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
3694 -#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
3695 -#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
3696 -#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
3697 -#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
3698 -#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
3700 -#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
3701 -#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
3702 -#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
3703 -#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
3704 -#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
3706 -#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
3707 -#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
3708 -#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
3709 -#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
3711 -#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
3712 -#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
3713 -#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
3714 -#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
3715 -#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
3716 -#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
3717 -#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
3718 -#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
3719 -#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
3720 -#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
3721 -#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
3722 -#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
3723 -#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
3724 -#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
3725 -#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
3726 -#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
3728 -#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
3729 -#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
3730 -#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
3732 -#define REG_GDM3_FWD_CFG GDM3_BASE
3733 -#define GDM3_PAD_EN_MASK BIT(28)
3735 -#define REG_GDM4_FWD_CFG GDM4_BASE
3736 -#define GDM4_PAD_EN_MASK BIT(28)
3737 -#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
3739 -#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
3740 -#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
3741 -#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
3742 -#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
3744 -#define REG_IP_FRAG_FP 0x2010
3745 -#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
3746 -#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
3747 -#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
3748 -#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
3750 -#define REG_MC_VLAN_EN 0x2100
3751 -#define MC_VLAN_EN_MASK BIT(0)
3753 -#define REG_MC_VLAN_CFG 0x2104
3754 -#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
3755 -#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
3756 -#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
3757 -#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
3758 -#define MC_VLAN_CFG_RW_MASK BIT(0)
3760 -#define REG_MC_VLAN_DATA 0x2108
3762 -#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
3765 -#define REG_QDMA_GLOBAL_CFG 0x0004
3766 -#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
3767 -#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
3768 -#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
3769 -#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
3770 -#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
3771 -#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
3772 -#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
3773 -#define GLOBAL_CFG_RESET_MASK BIT(23)
3774 -#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
3775 -#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
3776 -#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
3777 -#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
3778 -#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
3779 -#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
3780 -#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
3781 -#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
3782 -#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
3783 -#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
3784 -#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
3785 -#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
3786 -#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
3787 -#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
3788 -#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
3790 -#define REG_FWD_DSCP_BASE 0x0010
3791 -#define REG_FWD_BUF_BASE 0x0014
3793 -#define REG_HW_FWD_DSCP_CFG 0x0018
3794 -#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
3795 -#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
3796 -#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
3798 -#define REG_INT_STATUS(_n) \
3799 - (((_n) == 4) ? 0x0730 : \
3800 - ((_n) == 3) ? 0x0724 : \
3801 - ((_n) == 2) ? 0x0720 : \
3802 - ((_n) == 1) ? 0x0024 : 0x0020)
3804 -#define REG_INT_ENABLE(_n) \
3805 - (((_n) == 4) ? 0x0750 : \
3806 - ((_n) == 3) ? 0x0744 : \
3807 - ((_n) == 2) ? 0x0740 : \
3808 - ((_n) == 1) ? 0x002c : 0x0028)
3810 -/* QDMA_CSR_INT_ENABLE1 */
3811 -#define RX15_COHERENT_INT_MASK BIT(31)
3812 -#define RX14_COHERENT_INT_MASK BIT(30)
3813 -#define RX13_COHERENT_INT_MASK BIT(29)
3814 -#define RX12_COHERENT_INT_MASK BIT(28)
3815 -#define RX11_COHERENT_INT_MASK BIT(27)
3816 -#define RX10_COHERENT_INT_MASK BIT(26)
3817 -#define RX9_COHERENT_INT_MASK BIT(25)
3818 -#define RX8_COHERENT_INT_MASK BIT(24)
3819 -#define RX7_COHERENT_INT_MASK BIT(23)
3820 -#define RX6_COHERENT_INT_MASK BIT(22)
3821 -#define RX5_COHERENT_INT_MASK BIT(21)
3822 -#define RX4_COHERENT_INT_MASK BIT(20)
3823 -#define RX3_COHERENT_INT_MASK BIT(19)
3824 -#define RX2_COHERENT_INT_MASK BIT(18)
3825 -#define RX1_COHERENT_INT_MASK BIT(17)
3826 -#define RX0_COHERENT_INT_MASK BIT(16)
3827 -#define TX7_COHERENT_INT_MASK BIT(15)
3828 -#define TX6_COHERENT_INT_MASK BIT(14)
3829 -#define TX5_COHERENT_INT_MASK BIT(13)
3830 -#define TX4_COHERENT_INT_MASK BIT(12)
3831 -#define TX3_COHERENT_INT_MASK BIT(11)
3832 -#define TX2_COHERENT_INT_MASK BIT(10)
3833 -#define TX1_COHERENT_INT_MASK BIT(9)
3834 -#define TX0_COHERENT_INT_MASK BIT(8)
3835 -#define CNT_OVER_FLOW_INT_MASK BIT(7)
3836 -#define IRQ1_FULL_INT_MASK BIT(5)
3837 -#define IRQ1_INT_MASK BIT(4)
3838 -#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
3839 -#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
3840 -#define IRQ0_FULL_INT_MASK BIT(1)
3841 -#define IRQ0_INT_MASK BIT(0)
3843 -#define TX_DONE_INT_MASK(_n) \
3844 - ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
3845 - : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
3847 -#define INT_TX_MASK \
3848 - (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
3849 - IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
3851 -#define INT_IDX0_MASK \
3852 - (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
3853 - TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
3854 - TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
3855 - TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
3856 - RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
3857 - RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
3858 - RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
3859 - RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
3860 - RX15_COHERENT_INT_MASK | INT_TX_MASK)
3862 -/* QDMA_CSR_INT_ENABLE2 */
3863 -#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
3864 -#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
3865 -#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
3866 -#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
3867 -#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
3868 -#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
3869 -#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
3870 -#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
3871 -#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
3872 -#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
3873 -#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
3874 -#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
3875 -#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
3876 -#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
3877 -#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
3878 -#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
3879 -#define RX15_DONE_INT_MASK BIT(15)
3880 -#define RX14_DONE_INT_MASK BIT(14)
3881 -#define RX13_DONE_INT_MASK BIT(13)
3882 -#define RX12_DONE_INT_MASK BIT(12)
3883 -#define RX11_DONE_INT_MASK BIT(11)
3884 -#define RX10_DONE_INT_MASK BIT(10)
3885 -#define RX9_DONE_INT_MASK BIT(9)
3886 -#define RX8_DONE_INT_MASK BIT(8)
3887 -#define RX7_DONE_INT_MASK BIT(7)
3888 -#define RX6_DONE_INT_MASK BIT(6)
3889 -#define RX5_DONE_INT_MASK BIT(5)
3890 -#define RX4_DONE_INT_MASK BIT(4)
3891 -#define RX3_DONE_INT_MASK BIT(3)
3892 -#define RX2_DONE_INT_MASK BIT(2)
3893 -#define RX1_DONE_INT_MASK BIT(1)
3894 -#define RX0_DONE_INT_MASK BIT(0)
3896 -#define RX_DONE_INT_MASK \
3897 - (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
3898 - RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
3899 - RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
3900 - RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
3901 - RX15_DONE_INT_MASK)
3902 -#define INT_IDX1_MASK \
3903 - (RX_DONE_INT_MASK | \
3904 - RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
3905 - RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
3906 - RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
3907 - RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
3908 - RX15_NO_CPU_DSCP_INT_MASK)
3910 -/* QDMA_CSR_INT_ENABLE5 */
3911 -#define TX31_COHERENT_INT_MASK BIT(31)
3912 -#define TX30_COHERENT_INT_MASK BIT(30)
3913 -#define TX29_COHERENT_INT_MASK BIT(29)
3914 -#define TX28_COHERENT_INT_MASK BIT(28)
3915 -#define TX27_COHERENT_INT_MASK BIT(27)
3916 -#define TX26_COHERENT_INT_MASK BIT(26)
3917 -#define TX25_COHERENT_INT_MASK BIT(25)
3918 -#define TX24_COHERENT_INT_MASK BIT(24)
3919 -#define TX23_COHERENT_INT_MASK BIT(23)
3920 -#define TX22_COHERENT_INT_MASK BIT(22)
3921 -#define TX21_COHERENT_INT_MASK BIT(21)
3922 -#define TX20_COHERENT_INT_MASK BIT(20)
3923 -#define TX19_COHERENT_INT_MASK BIT(19)
3924 -#define TX18_COHERENT_INT_MASK BIT(18)
3925 -#define TX17_COHERENT_INT_MASK BIT(17)
3926 -#define TX16_COHERENT_INT_MASK BIT(16)
3927 -#define TX15_COHERENT_INT_MASK BIT(15)
3928 -#define TX14_COHERENT_INT_MASK BIT(14)
3929 -#define TX13_COHERENT_INT_MASK BIT(13)
3930 -#define TX12_COHERENT_INT_MASK BIT(12)
3931 -#define TX11_COHERENT_INT_MASK BIT(11)
3932 -#define TX10_COHERENT_INT_MASK BIT(10)
3933 -#define TX9_COHERENT_INT_MASK BIT(9)
3934 -#define TX8_COHERENT_INT_MASK BIT(8)
3936 -#define INT_IDX4_MASK \
3937 - (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
3938 - TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
3939 - TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
3940 - TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
3941 - TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
3942 - TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
3943 - TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
3944 - TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
3945 - TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
3946 - TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
3947 - TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
3948 - TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
3950 -#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
3952 -#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
3953 -#define TX_IRQ_THR_MASK GENMASK(27, 16)
3954 -#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
3956 -#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
3957 -#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
3959 -#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
3960 -#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
3961 -#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
3963 -#define REG_TX_RING_BASE(_n) \
3964 - (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
3966 -#define REG_TX_RING_BLOCKING(_n) \
3967 - (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
3969 -#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
3970 -#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
3971 -#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
3972 -#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
3973 -#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
3975 -#define REG_TX_CPU_IDX(_n) \
3976 - (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
3978 -#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
3980 -#define REG_TX_DMA_IDX(_n) \
3981 - (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
3983 -#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
3985 -#define IRQ_RING_IDX_MASK GENMASK(20, 16)
3986 -#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
3988 -#define REG_RX_RING_BASE(_n) \
3989 - (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
3991 -#define REG_RX_RING_SIZE(_n) \
3992 - (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
3994 -#define RX_RING_THR_MASK GENMASK(31, 16)
3995 -#define RX_RING_SIZE_MASK GENMASK(15, 0)
3997 -#define REG_RX_CPU_IDX(_n) \
3998 - (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
4000 -#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
4002 -#define REG_RX_DMA_IDX(_n) \
4003 - (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
4005 -#define REG_RX_DELAY_INT_IDX(_n) \
4006 - (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
4008 -#define RX_DELAY_INT_MASK GENMASK(15, 0)
4010 -#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
4012 -#define REG_INGRESS_TRTCM_CFG 0x0070
4013 -#define INGRESS_TRTCM_EN_MASK BIT(31)
4014 -#define INGRESS_TRTCM_MODE_MASK BIT(30)
4015 -#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4016 -#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
4018 -#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
4019 -#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
4021 -#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
4022 -#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
4024 -#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
4025 -#define CNTR_EN_MASK BIT(31)
4026 -#define CNTR_ALL_CHAN_EN_MASK BIT(30)
4027 -#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
4028 -#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
4029 -#define CNTR_SRC_MASK GENMASK(27, 24)
4030 -#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
4031 -#define CNTR_CHAN_MASK GENMASK(7, 3)
4032 -#define CNTR_QUEUE_MASK GENMASK(2, 0)
4034 -#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
4036 -#define REG_LMGR_INIT_CFG 0x1000
4037 -#define LMGR_INIT_START BIT(31)
4038 -#define LMGR_SRAM_MODE_MASK BIT(30)
4039 -#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
4040 -#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
4042 -#define REG_FWD_DSCP_LOW_THR 0x1004
4043 -#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
4045 -#define REG_EGRESS_RATE_METER_CFG 0x100c
4046 -#define EGRESS_RATE_METER_EN_MASK BIT(31)
4047 -#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
4048 -#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
4049 -#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
4051 -#define REG_EGRESS_TRTCM_CFG 0x1010
4052 -#define EGRESS_TRTCM_EN_MASK BIT(31)
4053 -#define EGRESS_TRTCM_MODE_MASK BIT(30)
4054 -#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4055 -#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
4057 -#define TRTCM_PARAM_RW_MASK BIT(31)
4058 -#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
4059 -#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
4060 -#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
4061 -#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
4062 -#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
4064 -#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
4065 -#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
4066 -#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
4068 -#define REG_TXWRR_MODE_CFG 0x1020
4069 -#define TWRR_WEIGHT_SCALE_MASK BIT(31)
4070 -#define TWRR_WEIGHT_BASE_MASK BIT(3)
4072 -#define REG_TXWRR_WEIGHT_CFG 0x1024
4073 -#define TWRR_RW_CMD_MASK BIT(31)
4074 -#define TWRR_RW_CMD_DONE BIT(30)
4075 -#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
4076 -#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
4077 -#define TWRR_VALUE_MASK GENMASK(15, 0)
4079 -#define REG_PSE_BUF_USAGE_CFG 0x1028
4080 -#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
4082 -#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
4083 -#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
4085 -#define REG_GLB_TRTCM_CFG 0x1080
4086 -#define GLB_TRTCM_EN_MASK BIT(31)
4087 -#define GLB_TRTCM_MODE_MASK BIT(30)
4088 -#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4089 -#define GLB_FAST_TICK_MASK GENMASK(15, 0)
4091 -#define REG_TXQ_CNGST_CFG 0x10a0
4092 -#define TXQ_CNGST_DROP_EN BIT(31)
4093 -#define TXQ_CNGST_DEI_DROP_EN BIT(30)
4095 -#define REG_SLA_TRTCM_CFG 0x1150
4096 -#define SLA_TRTCM_EN_MASK BIT(31)
4097 -#define SLA_TRTCM_MODE_MASK BIT(30)
4098 -#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4099 -#define SLA_FAST_TICK_MASK GENMASK(15, 0)
4102 -#define QDMA_DESC_DONE_MASK BIT(31)
4103 -#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
4104 -#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
4105 -#define QDMA_DESC_DEI_MASK BIT(25)
4106 -#define QDMA_DESC_NO_DROP_MASK BIT(24)
4107 -#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
4109 -#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
4111 -#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
4112 -#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
4113 -#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
4114 -#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
4115 -#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
4116 -#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
4117 -#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
4118 -#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
4119 -#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
4120 -#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
4122 -#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
4123 -#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
4124 -#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
4125 -#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
4126 -#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
4127 -#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
4128 -#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
4129 -#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
4130 -#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
4133 -#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
4134 -#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
4135 -#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
4136 -#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
4137 -#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
4138 -#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
4139 -#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
4140 -#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
4141 -#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
4143 -struct airoha_qdma_desc {
4155 -#define QDMA_FWD_DESC_CTX_MASK BIT(31)
4156 -#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
4157 -#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
4158 -#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
4160 -#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
4162 -#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
4164 -struct airoha_qdma_fwd_desc {
4176 - QDMA_INT_REG_IDX0,
4177 - QDMA_INT_REG_IDX1,
4178 - QDMA_INT_REG_IDX2,
4179 - QDMA_INT_REG_IDX3,
4180 - QDMA_INT_REG_IDX4,
4193 - XSI_PCIE0_VIP_PORT_MASK = BIT(22),
4194 - XSI_PCIE1_VIP_PORT_MASK = BIT(23),
4195 - XSI_USB_VIP_PORT_MASK = BIT(25),
4196 - XSI_ETH_VIP_PORT_MASK = BIT(24),
4200 - DEV_STATE_INITIALIZED,
4204 - CDM_CRSN_QSEL_Q1 = 1,
4205 - CDM_CRSN_QSEL_Q5 = 5,
4206 - CDM_CRSN_QSEL_Q6 = 6,
4207 - CDM_CRSN_QSEL_Q15 = 15,
4212 - CRSN_21 = 0x15, /* KA */
4213 - CRSN_22 = 0x16, /* hit bind and force route to CPU */
4230 - FE_PSE_PORT_DROP = 0xf,
4233 -enum tx_sched_mode {
4244 -enum trtcm_param_type {
4245 - TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
4246 - TRTCM_TOKEN_RATE_MODE,
4247 - TRTCM_BUCKETSIZE_SHIFT_MODE,
4248 - TRTCM_BUCKET_COUNTER_MODE,
4251 -enum trtcm_mode_type {
4252 - TRTCM_COMMIT_MODE,
4257 - TRTCM_TICK_SEL = BIT(0),
4258 - TRTCM_PKT_MODE = BIT(1),
4259 - TRTCM_METER_MODE = BIT(2),
4262 -#define MIN_TOKEN_SIZE 4096
4263 -#define MAX_TOKEN_SIZE_OFFSET 17
4264 -#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
4265 -#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
4267 -struct airoha_queue_entry {
4270 - struct sk_buff *skb;
4272 - dma_addr_t dma_addr;
4276 -struct airoha_queue {
4277 - struct airoha_qdma *qdma;
4279 - /* protect concurrent queue accesses */
4281 - struct airoha_queue_entry *entry;
4282 - struct airoha_qdma_desc *desc;
4291 - struct napi_struct napi;
4292 - struct page_pool *page_pool;
4295 -struct airoha_tx_irq_queue {
4296 - struct airoha_qdma *qdma;
4298 - struct napi_struct napi;
4304 -struct airoha_hw_stats {
4305 - /* protect concurrent hw_stats accesses */
4307 - struct u64_stats_sync syncp;
4319 - u64 rx_over_errors;
4320 - /* ethtool stats */
4330 -struct airoha_qdma {
4331 - struct airoha_eth *eth;
4332 - void __iomem *regs;
4334 - /* protect concurrent irqmask accesses */
4335 - spinlock_t irq_lock;
4336 - u32 irqmask[QDMA_INT_REG_MAX];
4339 - struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
4341 - struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
4342 - struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
4344 - /* descriptor and packet buffers for qdma hw forward */
4351 -struct airoha_gdm_port {
4352 - struct airoha_qdma *qdma;
4353 - struct net_device *dev;
4356 - struct airoha_hw_stats stats;
4358 - DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
4360 - /* qos stats counters */
4361 - u64 cpu_tx_packets;
4362 - u64 fwd_tx_packets;
4365 -struct airoha_eth {
4366 - struct device *dev;
4368 - unsigned long state;
4369 - void __iomem *fe_regs;
4371 - struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
4372 - struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
4374 - struct net_device *napi_dev;
4376 - struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
4377 - struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
4380 -static u32 airoha_rr(void __iomem *base, u32 offset)
4382 - return readl(base + offset);
4385 -static void airoha_wr(void __iomem *base, u32 offset, u32 val)
4387 - writel(val, base + offset);
4390 -static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
4392 - val |= (airoha_rr(base, offset) & ~mask);
4393 - airoha_wr(base, offset, val);
4398 -#define airoha_fe_rr(eth, offset) \
4399 - airoha_rr((eth)->fe_regs, (offset))
4400 -#define airoha_fe_wr(eth, offset, val) \
4401 - airoha_wr((eth)->fe_regs, (offset), (val))
4402 -#define airoha_fe_rmw(eth, offset, mask, val) \
4403 - airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
4404 -#define airoha_fe_set(eth, offset, val) \
4405 - airoha_rmw((eth)->fe_regs, (offset), 0, (val))
4406 -#define airoha_fe_clear(eth, offset, val) \
4407 - airoha_rmw((eth)->fe_regs, (offset), (val), 0)
4409 -#define airoha_qdma_rr(qdma, offset) \
4410 - airoha_rr((qdma)->regs, (offset))
4411 -#define airoha_qdma_wr(qdma, offset, val) \
4412 - airoha_wr((qdma)->regs, (offset), (val))
4413 -#define airoha_qdma_rmw(qdma, offset, mask, val) \
4414 - airoha_rmw((qdma)->regs, (offset), (mask), (val))
4415 -#define airoha_qdma_set(qdma, offset, val) \
4416 - airoha_rmw((qdma)->regs, (offset), 0, (val))
4417 -#define airoha_qdma_clear(qdma, offset, val) \
4418 - airoha_rmw((qdma)->regs, (offset), (val), 0)
4420 -static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
4421 - u32 clear, u32 set)
4423 - unsigned long flags;
4425 - if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
4428 - spin_lock_irqsave(&qdma->irq_lock, flags);
4430 - qdma->irqmask[index] &= ~clear;
4431 - qdma->irqmask[index] |= set;
4432 - airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
4433 - /* Read irq_enable register in order to guarantee the update above
4434 - * completes in the spinlock critical section.
4436 - airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
4438 - spin_unlock_irqrestore(&qdma->irq_lock, flags);
4441 -static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
4444 - airoha_qdma_set_irqmask(qdma, index, 0, mask);
4447 -static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
4450 - airoha_qdma_set_irqmask(qdma, index, mask, 0);
4453 -static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
4455 - /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
4456 - * GDM{2,3,4} can be used as wan port connected to an external
4459 - return port->id == 1;
4462 -static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
4464 - struct airoha_eth *eth = port->qdma->eth;
4467 - reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
4468 - : REG_FE_WAN_MAC_H;
4469 - val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
4470 - airoha_fe_wr(eth, reg, val);
4472 - val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
4473 - airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
4474 - airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
4477 -static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
4480 - airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
4481 - FIELD_PREP(GDM_OCFQ_MASK, val));
4482 - airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
4483 - FIELD_PREP(GDM_MCFQ_MASK, val));
4484 - airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
4485 - FIELD_PREP(GDM_BCFQ_MASK, val));
4486 - airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
4487 - FIELD_PREP(GDM_UCFQ_MASK, val));
4490 -static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
4492 - u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
4493 - u32 vip_port, cfg_addr;
4496 - case XSI_PCIE0_PORT:
4497 - vip_port = XSI_PCIE0_VIP_PORT_MASK;
4498 - cfg_addr = REG_GDM_FWD_CFG(3);
4500 - case XSI_PCIE1_PORT:
4501 - vip_port = XSI_PCIE1_VIP_PORT_MASK;
4502 - cfg_addr = REG_GDM_FWD_CFG(3);
4504 - case XSI_USB_PORT:
4505 - vip_port = XSI_USB_VIP_PORT_MASK;
4506 - cfg_addr = REG_GDM_FWD_CFG(4);
4508 - case XSI_ETH_PORT:
4509 - vip_port = XSI_ETH_VIP_PORT_MASK;
4510 - cfg_addr = REG_GDM_FWD_CFG(4);
4517 - airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
4518 - airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
4520 - airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
4521 - airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
4524 - airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
4529 -static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
4531 - const int port_list[] = {
4539 - for (i = 0; i < ARRAY_SIZE(port_list); i++) {
4540 - err = airoha_set_gdm_port(eth, port_list[i], enable);
4548 - for (i--; i >= 0; i--)
4549 - airoha_set_gdm_port(eth, port_list[i], false);
4554 -static void airoha_fe_maccr_init(struct airoha_eth *eth)
4558 - for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
4559 - airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
4560 - GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
4561 - GDM_DROP_CRC_ERR);
4562 - airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
4563 - FE_PSE_PORT_CDM1);
4564 - airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
4565 - GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
4566 - FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
4567 - FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
4570 - airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
4571 - FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
4573 - airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
4576 -static void airoha_fe_vip_setup(struct airoha_eth *eth)
4578 - airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
4579 - airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
4581 - airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
4582 - airoha_fe_wr(eth, REG_FE_VIP_EN(4),
4583 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4586 - airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
4587 - airoha_fe_wr(eth, REG_FE_VIP_EN(6),
4588 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4591 - airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
4592 - airoha_fe_wr(eth, REG_FE_VIP_EN(7),
4593 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4596 - /* BOOTP (0x43) */
4597 - airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
4598 - airoha_fe_wr(eth, REG_FE_VIP_EN(8),
4599 - PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
4600 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4602 - /* BOOTP (0x44) */
4603 - airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
4604 - airoha_fe_wr(eth, REG_FE_VIP_EN(9),
4605 - PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
4606 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4609 - airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
4610 - airoha_fe_wr(eth, REG_FE_VIP_EN(10),
4611 - PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
4612 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4614 - airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
4615 - airoha_fe_wr(eth, REG_FE_VIP_EN(11),
4616 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4620 - airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
4621 - airoha_fe_wr(eth, REG_FE_VIP_EN(12),
4622 - PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
4623 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4625 - airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
4626 - airoha_fe_wr(eth, REG_FE_VIP_EN(19),
4627 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4630 - /* ETH->ETH_P_1905 (0x893a) */
4631 - airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
4632 - airoha_fe_wr(eth, REG_FE_VIP_EN(20),
4633 - PATN_FCPU_EN_MASK | PATN_EN_MASK);
4635 - airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
4636 - airoha_fe_wr(eth, REG_FE_VIP_EN(21),
4637 - PATN_FCPU_EN_MASK | PATN_EN_MASK);
4640 -static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
4641 - u32 port, u32 queue)
4645 - airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
4646 - PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
4647 - FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
4648 - FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
4649 - val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
4651 - return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
4654 -static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
4655 - u32 port, u32 queue, u32 val)
4657 - airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
4658 - FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
4659 - airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
4660 - PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
4661 - PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
4662 - FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
4663 - FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
4664 - PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
4667 -static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
4669 - u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
4671 - return FIELD_GET(PSE_ALLRSV_MASK, val);
4674 -static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
4675 - u32 port, u32 queue, u32 val)
4677 - u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
4678 - u32 tmp, all_rsv, fq_limit;
4680 - airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
4682 - /* modify all rsv */
4683 - all_rsv = airoha_fe_get_pse_all_rsv(eth);
4684 - all_rsv += (val - orig_val);
4685 - airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
4686 - FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
4689 - tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
4690 - fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
4691 - tmp = fq_limit - all_rsv - 0x20;
4692 - airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
4693 - PSE_SHARE_USED_HTHD_MASK,
4694 - FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
4696 - tmp = fq_limit - all_rsv - 0x100;
4697 - airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
4698 - PSE_SHARE_USED_MTHD_MASK,
4699 - FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
4700 - tmp = (3 * tmp) >> 2;
4701 - airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
4702 - PSE_SHARE_USED_LTHD_MASK,
4703 - FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
4708 -static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
4710 - const u32 pse_port_num_queues[] = {
4711 - [FE_PSE_PORT_CDM1] = 6,
4712 - [FE_PSE_PORT_GDM1] = 6,
4713 - [FE_PSE_PORT_GDM2] = 32,
4714 - [FE_PSE_PORT_GDM3] = 6,
4715 - [FE_PSE_PORT_PPE1] = 4,
4716 - [FE_PSE_PORT_CDM2] = 6,
4717 - [FE_PSE_PORT_CDM3] = 8,
4718 - [FE_PSE_PORT_CDM4] = 10,
4719 - [FE_PSE_PORT_PPE2] = 4,
4720 - [FE_PSE_PORT_GDM4] = 2,
4721 - [FE_PSE_PORT_CDM5] = 2,
4726 - all_rsv = airoha_fe_get_pse_all_rsv(eth);
4727 - /* hw misses PPE2 oq rsv */
4728 - all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
4729 - airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
4732 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
4733 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
4734 - PSE_QUEUE_RSV_PAGES);
4736 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
4737 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
4738 - PSE_QUEUE_RSV_PAGES);
4740 - for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
4741 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
4743 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
4744 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
4745 - PSE_QUEUE_RSV_PAGES);
4747 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
4748 - if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
4749 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
4750 - PSE_QUEUE_RSV_PAGES);
4752 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
4755 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
4756 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
4757 - PSE_QUEUE_RSV_PAGES);
4759 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
4760 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
4762 - for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
4763 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
4764 - PSE_QUEUE_RSV_PAGES);
4766 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
4767 - if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
4768 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
4769 - PSE_QUEUE_RSV_PAGES);
4771 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
4774 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
4775 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
4776 - PSE_QUEUE_RSV_PAGES);
4778 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
4779 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
4780 - PSE_QUEUE_RSV_PAGES);
4783 -static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
4787 - for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
4791 - airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
4793 - val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
4794 - MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
4795 - airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
4796 - err = read_poll_timeout(airoha_fe_rr, val,
4797 - val & MC_VLAN_CFG_CMD_DONE_MASK,
4798 - USEC_PER_MSEC, 5 * USEC_PER_MSEC,
4799 - false, eth, REG_MC_VLAN_CFG);
4803 - for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
4804 - airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
4806 - val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
4807 - FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
4808 - MC_VLAN_CFG_RW_MASK;
4809 - airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
4810 - err = read_poll_timeout(airoha_fe_rr, val,
4811 - val & MC_VLAN_CFG_CMD_DONE_MASK,
4813 - 5 * USEC_PER_MSEC, false, eth,
4823 -static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
4825 - /* CDM1_CRSN_QSEL */
4826 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
4827 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
4828 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
4829 - CDM_CRSN_QSEL_Q1));
4830 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
4831 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
4832 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
4833 - CDM_CRSN_QSEL_Q1));
4834 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
4835 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
4836 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
4837 - CDM_CRSN_QSEL_Q1));
4838 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
4839 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
4840 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
4841 - CDM_CRSN_QSEL_Q6));
4842 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
4843 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
4844 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
4845 - CDM_CRSN_QSEL_Q1));
4846 - /* CDM2_CRSN_QSEL */
4847 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
4848 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
4849 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
4850 - CDM_CRSN_QSEL_Q1));
4851 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
4852 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
4853 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
4854 - CDM_CRSN_QSEL_Q1));
4855 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
4856 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
4857 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
4858 - CDM_CRSN_QSEL_Q1));
4859 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
4860 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
4861 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
4862 - CDM_CRSN_QSEL_Q6));
4863 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
4864 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
4865 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
4866 - CDM_CRSN_QSEL_Q1));
4869 -static int airoha_fe_init(struct airoha_eth *eth)
4871 - airoha_fe_maccr_init(eth);
4873 - /* PSE IQ reserve */
4874 - airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
4875 - FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
4876 - airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
4877 - PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
4878 - FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
4879 - FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
4881 - /* enable FE copy engine for MC/KA/DPI */
4882 - airoha_fe_wr(eth, REG_FE_PCE_CFG,
4883 - PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
4884 - /* set vip queue selection to ring 1 */
4885 - airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
4886 - FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
4887 - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
4888 - FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
4889 - /* set GDM4 source interface offset to 8 */
4890 - airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
4891 - GDM4_SPORT_OFF2_MASK |
4892 - GDM4_SPORT_OFF1_MASK |
4893 - GDM4_SPORT_OFF0_MASK,
4894 - FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
4895 - FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
4896 - FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
4898 - /* set PSE Page as 128B */
4899 - airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
4900 - FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
4901 - FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
4902 - FE_DMA_GLO_PG_SZ_MASK);
4903 - airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
4904 - FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
4905 - FE_RST_GDM4_MBI_ARB_MASK);
4906 - usleep_range(1000, 2000);
4908 - /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
4909 - * connect other rings to PSE Port0 OQ-0
4911 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
4912 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
4913 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
4914 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
4916 - airoha_fe_vip_setup(eth);
4917 - airoha_fe_pse_ports_init(eth);
4919 - airoha_fe_set(eth, REG_GDM_MISC_CFG,
4920 - GDM2_RDM_ACK_WAIT_PREF_MASK |
4921 - GDM2_CHN_VLD_MODE_MASK);
4922 - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
4923 - FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
4925 - /* init fragment and assemble Force Port */
4926 - /* NPU Core-3, NPU Bridge Channel-3 */
4927 - airoha_fe_rmw(eth, REG_IP_FRAG_FP,
4928 - IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
4929 - FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
4930 - FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
4931 - /* QDMA LAN, RX Ring-22 */
4932 - airoha_fe_rmw(eth, REG_IP_FRAG_FP,
4933 - IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
4934 - FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
4935 - FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
4937 - airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
4938 - airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
4940 - airoha_fe_crsn_qsel_init(eth);
4942 - airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
4943 - airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
4945 - /* default aging mode for mbi unlock issue */
4946 - airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
4947 - MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
4948 - FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
4949 - FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
4951 - /* disable IFC by default */
4952 - airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
4954 - /* enable 1:N vlan action, init vlan table */
4955 - airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
4957 - return airoha_fe_mc_vlan_clear(eth);
4960 -static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
4962 - enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
4963 - struct airoha_qdma *qdma = q->qdma;
4964 - struct airoha_eth *eth = qdma->eth;
4965 - int qid = q - &qdma->q_rx[0];
4968 - while (q->queued < q->ndesc - 1) {
4969 - struct airoha_queue_entry *e = &q->entry[q->head];
4970 - struct airoha_qdma_desc *desc = &q->desc[q->head];
4971 - struct page *page;
4975 - page = page_pool_dev_alloc_frag(q->page_pool, &offset,
4980 - q->head = (q->head + 1) % q->ndesc;
4984 - e->buf = page_address(page) + offset;
4985 - e->dma_addr = page_pool_get_dma_addr(page) + offset;
4986 - e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
4988 - dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
4991 - val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
4992 - WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
4993 - WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
4994 - val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
4995 - WRITE_ONCE(desc->data, cpu_to_le32(val));
4996 - WRITE_ONCE(desc->msg0, 0);
4997 - WRITE_ONCE(desc->msg1, 0);
4998 - WRITE_ONCE(desc->msg2, 0);
4999 - WRITE_ONCE(desc->msg3, 0);
5001 - airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
5002 - RX_RING_CPU_IDX_MASK,
5003 - FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
5009 -static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
5010 - struct airoha_qdma_desc *desc)
5012 - u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
5014 - sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
5016 - case 0x10 ... 0x13:
5026 - return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
5029 -static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
5031 - enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
5032 - struct airoha_qdma *qdma = q->qdma;
5033 - struct airoha_eth *eth = qdma->eth;
5034 - int qid = q - &qdma->q_rx[0];
5037 - while (done < budget) {
5038 - struct airoha_queue_entry *e = &q->entry[q->tail];
5039 - struct airoha_qdma_desc *desc = &q->desc[q->tail];
5040 - dma_addr_t dma_addr = le32_to_cpu(desc->addr);
5041 - u32 desc_ctrl = le32_to_cpu(desc->ctrl);
5042 - struct sk_buff *skb;
5045 - if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
5051 - len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
5055 - q->tail = (q->tail + 1) % q->ndesc;
5058 - dma_sync_single_for_cpu(eth->dev, dma_addr,
5059 - SKB_WITH_OVERHEAD(q->buf_size), dir);
5061 - p = airoha_qdma_get_gdm_port(eth, desc);
5062 - if (p < 0 || !eth->ports[p]) {
5063 - page_pool_put_full_page(q->page_pool,
5064 - virt_to_head_page(e->buf),
5069 - skb = napi_build_skb(e->buf, q->buf_size);
5071 - page_pool_put_full_page(q->page_pool,
5072 - virt_to_head_page(e->buf),
5077 - skb_reserve(skb, 2);
5078 - __skb_put(skb, len);
5079 - skb_mark_for_recycle(skb);
5080 - skb->dev = eth->ports[p]->dev;
5081 - skb->protocol = eth_type_trans(skb, skb->dev);
5082 - skb->ip_summed = CHECKSUM_UNNECESSARY;
5083 - skb_record_rx_queue(skb, qid);
5084 - napi_gro_receive(&q->napi, skb);
5088 - airoha_qdma_fill_rx_queue(q);
5093 -static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
5095 - struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
5096 - int cur, done = 0;
5099 - cur = airoha_qdma_rx_process(q, budget - done);
5101 - } while (cur && done < budget);
5103 - if (done < budget && napi_complete(napi))
5104 - airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
5105 - RX_DONE_INT_MASK);
5110 -static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
5111 - struct airoha_qdma *qdma, int ndesc)
5113 - const struct page_pool_params pp_params = {
5116 - .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
5117 - PP_FLAG_PAGE_FRAG,
5118 - .dma_dir = DMA_FROM_DEVICE,
5119 - .max_len = PAGE_SIZE,
5120 - .nid = NUMA_NO_NODE,
5121 - .dev = qdma->eth->dev,
5124 - struct airoha_eth *eth = qdma->eth;
5125 - int qid = q - &qdma->q_rx[0], thr;
5126 - dma_addr_t dma_addr;
5128 - q->buf_size = PAGE_SIZE / 2;
5132 - q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
5137 - q->page_pool = page_pool_create(&pp_params);
5138 - if (IS_ERR(q->page_pool)) {
5139 - int err = PTR_ERR(q->page_pool);
5141 - q->page_pool = NULL;
5145 - q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
5146 - &dma_addr, GFP_KERNEL);
5150 - netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
5152 - airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
5153 - airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
5154 - RX_RING_SIZE_MASK,
5155 - FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
5157 - thr = clamp(ndesc >> 3, 1, 32);
5158 - airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
5159 - FIELD_PREP(RX_RING_THR_MASK, thr));
5160 - airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
5161 - FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
5163 - airoha_qdma_fill_rx_queue(q);
5168 -static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
5170 - struct airoha_eth *eth = q->qdma->eth;
5172 - while (q->queued) {
5173 - struct airoha_queue_entry *e = &q->entry[q->tail];
5174 - struct page *page = virt_to_head_page(e->buf);
5176 - dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
5177 - page_pool_get_dma_dir(q->page_pool));
5178 - page_pool_put_full_page(q->page_pool, page, false);
5179 - q->tail = (q->tail + 1) % q->ndesc;
5184 -static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
5188 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5191 - if (!(RX_DONE_INT_MASK & BIT(i))) {
5192 - /* rx-queue not binded to irq */
5196 - err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
5205 -static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
5207 - struct airoha_tx_irq_queue *irq_q;
5208 - int id, done = 0, irq_queued;
5209 - struct airoha_qdma *qdma;
5210 - struct airoha_eth *eth;
5213 - irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
5214 - qdma = irq_q->qdma;
5215 - id = irq_q - &qdma->q_tx_irq[0];
5218 - status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
5219 - head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
5220 - head = head % irq_q->size;
5221 - irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
5223 - while (irq_queued > 0 && done < budget) {
5224 - u32 qid, val = irq_q->q[head];
5225 - struct airoha_qdma_desc *desc;
5226 - struct airoha_queue_entry *e;
5227 - struct airoha_queue *q;
5228 - u32 index, desc_ctrl;
5229 - struct sk_buff *skb;
5234 - irq_q->q[head] = 0xff; /* mark as done */
5235 - head = (head + 1) % irq_q->size;
5239 - qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
5240 - if (qid >= ARRAY_SIZE(qdma->q_tx))
5243 - q = &qdma->q_tx[qid];
5247 - index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
5248 - if (index >= q->ndesc)
5251 - spin_lock_bh(&q->lock);
5256 - desc = &q->desc[index];
5257 - desc_ctrl = le32_to_cpu(desc->ctrl);
5259 - if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
5260 - !(desc_ctrl & QDMA_DESC_DROP_MASK))
5263 - e = &q->entry[index];
5266 - dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
5268 - memset(e, 0, sizeof(*e));
5269 - WRITE_ONCE(desc->msg0, 0);
5270 - WRITE_ONCE(desc->msg1, 0);
5273 - /* completion ring can report out-of-order indexes if hw QoS
5274 - * is enabled and packets with different priority are queued
5275 - * to same DMA ring. Take into account possible out-of-order
5276 - * reports incrementing DMA ring tail pointer
5278 - while (q->tail != q->head && !q->entry[q->tail].dma_addr)
5279 - q->tail = (q->tail + 1) % q->ndesc;
5282 - u16 queue = skb_get_queue_mapping(skb);
5283 - struct netdev_queue *txq;
5285 - txq = netdev_get_tx_queue(skb->dev, queue);
5286 - netdev_tx_completed_queue(txq, 1, skb->len);
5287 - if (netif_tx_queue_stopped(txq) &&
5288 - q->ndesc - q->queued >= q->free_thr)
5289 - netif_tx_wake_queue(txq);
5291 - dev_kfree_skb_any(skb);
5294 - spin_unlock_bh(&q->lock);
5298 - int i, len = done >> 7;
5300 - for (i = 0; i < len; i++)
5301 - airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
5302 - IRQ_CLEAR_LEN_MASK, 0x80);
5303 - airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
5304 - IRQ_CLEAR_LEN_MASK, (done & 0x7f));
5307 - if (done < budget && napi_complete(napi))
5308 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
5309 - TX_DONE_INT_MASK(id));
5314 -static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
5315 - struct airoha_qdma *qdma, int size)
5317 - struct airoha_eth *eth = qdma->eth;
5318 - int i, qid = q - &qdma->q_tx[0];
5319 - dma_addr_t dma_addr;
5321 - spin_lock_init(&q->lock);
5324 - q->free_thr = 1 + MAX_SKB_FRAGS;
5326 - q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
5331 - q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
5332 - &dma_addr, GFP_KERNEL);
5336 - for (i = 0; i < q->ndesc; i++) {
5339 - val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
5340 - WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
5343 - /* xmit ring drop default setting */
5344 - airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
5345 - TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
5347 - airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
5348 - airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
5349 - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
5350 - airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
5351 - FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
5356 -static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
5357 - struct airoha_qdma *qdma, int size)
5359 - int id = irq_q - &qdma->q_tx_irq[0];
5360 - struct airoha_eth *eth = qdma->eth;
5361 - dma_addr_t dma_addr;
5363 - netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
5364 - airoha_qdma_tx_napi_poll);
5365 - irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
5366 - &dma_addr, GFP_KERNEL);
5370 - memset(irq_q->q, 0xff, size * sizeof(u32));
5371 - irq_q->size = size;
5372 - irq_q->qdma = qdma;
5374 - airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
5375 - airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
5376 - FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
5377 - airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
5378 - FIELD_PREP(TX_IRQ_THR_MASK, 1));
5383 -static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
5387 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
5388 - err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
5389 - IRQ_QUEUE_LEN(i));
5394 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5395 - err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
5404 -static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
5406 - struct airoha_eth *eth = q->qdma->eth;
5408 - spin_lock_bh(&q->lock);
5409 - while (q->queued) {
5410 - struct airoha_queue_entry *e = &q->entry[q->tail];
5412 - dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
5414 - dev_kfree_skb_any(e->skb);
5417 - q->tail = (q->tail + 1) % q->ndesc;
5420 - spin_unlock_bh(&q->lock);
5423 -static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
5425 - struct airoha_eth *eth = qdma->eth;
5426 - dma_addr_t dma_addr;
5430 - size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
5431 - qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
5433 - if (!qdma->hfwd.desc)
5436 - airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
5438 - size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
5439 - qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
5441 - if (!qdma->hfwd.q)
5444 - airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
5446 - airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
5447 - HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
5448 - FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
5449 - airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
5450 - FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
5451 - airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
5452 - LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
5453 - HW_FWD_DESC_NUM_MASK,
5454 - FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
5457 - return read_poll_timeout(airoha_qdma_rr, status,
5458 - !(status & LMGR_INIT_START), USEC_PER_MSEC,
5459 - 30 * USEC_PER_MSEC, true, qdma,
5460 - REG_LMGR_INIT_CFG);
5463 -static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
5465 - airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
5466 - airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
5468 - airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
5469 - PSE_BUF_ESTIMATE_EN_MASK);
5471 - airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
5472 - EGRESS_RATE_METER_EN_MASK |
5473 - EGRESS_RATE_METER_EQ_RATE_EN_MASK);
5474 - /* 2047us x 31 = 63.457ms */
5475 - airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
5476 - EGRESS_RATE_METER_WINDOW_SZ_MASK,
5477 - FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
5478 - airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
5479 - EGRESS_RATE_METER_TIMESLICE_MASK,
5480 - FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
5482 - /* ratelimit init */
5483 - airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
5484 - /* fast-tick 25us */
5485 - airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
5486 - FIELD_PREP(GLB_FAST_TICK_MASK, 25));
5487 - airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
5488 - FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
5490 - airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
5491 - airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
5492 - FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
5493 - airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
5494 - EGRESS_SLOW_TICK_RATIO_MASK,
5495 - FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
5497 - airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
5498 - airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
5499 - INGRESS_TRTCM_MODE_MASK);
5500 - airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
5501 - FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
5502 - airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
5503 - INGRESS_SLOW_TICK_RATIO_MASK,
5504 - FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
5506 - airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
5507 - airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
5508 - FIELD_PREP(SLA_FAST_TICK_MASK, 25));
5509 - airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
5510 - FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
5513 -static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
5517 - for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
5518 - /* Tx-cpu transferred count */
5519 - airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
5520 - airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
5521 - CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
5522 - CNTR_ALL_DSCP_RING_EN_MASK |
5523 - FIELD_PREP(CNTR_CHAN_MASK, i));
5524 - /* Tx-fwd transferred count */
5525 - airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
5526 - airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
5527 - CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
5528 - CNTR_ALL_DSCP_RING_EN_MASK |
5529 - FIELD_PREP(CNTR_SRC_MASK, 1) |
5530 - FIELD_PREP(CNTR_CHAN_MASK, i));
5534 -static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
5538 - /* clear pending irqs */
5539 - for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
5540 - airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
5543 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
5544 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
5545 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
5547 - /* setup irq binding */
5548 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5549 - if (!qdma->q_tx[i].ndesc)
5552 - if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
5553 - airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
5554 - TX_RING_IRQ_BLOCKING_CFG_MASK);
5556 - airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
5557 - TX_RING_IRQ_BLOCKING_CFG_MASK);
5560 - airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
5561 - GLOBAL_CFG_RX_2B_OFFSET_MASK |
5562 - FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
5563 - GLOBAL_CFG_CPU_TXR_RR_MASK |
5564 - GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
5565 - GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
5566 - GLOBAL_CFG_MULTICAST_EN_MASK |
5567 - GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
5568 - GLOBAL_CFG_TX_WB_DONE_MASK |
5569 - FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
5571 - airoha_qdma_init_qos(qdma);
5573 - /* disable qdma rx delay interrupt */
5574 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5575 - if (!qdma->q_rx[i].ndesc)
5578 - airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
5579 - RX_DELAY_INT_MASK);
5582 - airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
5583 - TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
5584 - airoha_qdma_init_qos_stats(qdma);
5589 -static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
5591 - struct airoha_qdma *qdma = dev_instance;
5592 - u32 intr[ARRAY_SIZE(qdma->irqmask)];
5595 - for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
5596 - intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
5597 - intr[i] &= qdma->irqmask[i];
5598 - airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
5601 - if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
5604 - if (intr[1] & RX_DONE_INT_MASK) {
5605 - airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
5606 - RX_DONE_INT_MASK);
5608 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5609 - if (!qdma->q_rx[i].ndesc)
5612 - if (intr[1] & BIT(i))
5613 - napi_schedule(&qdma->q_rx[i].napi);
5617 - if (intr[0] & INT_TX_MASK) {
5618 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
5619 - if (!(intr[0] & TX_DONE_INT_MASK(i)))
5622 - airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
5623 - TX_DONE_INT_MASK(i));
5624 - napi_schedule(&qdma->q_tx_irq[i].napi);
5628 - return IRQ_HANDLED;
5631 -static int airoha_qdma_init(struct platform_device *pdev,
5632 - struct airoha_eth *eth,
5633 - struct airoha_qdma *qdma)
5635 - int err, id = qdma - ð->qdma[0];
5638 - spin_lock_init(&qdma->irq_lock);
5641 - res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
5645 - qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
5646 - if (IS_ERR(qdma->regs))
5647 - return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
5648 - "failed to iomap qdma%d regs\n", id);
5650 - qdma->irq = platform_get_irq(pdev, 4 * id);
5651 - if (qdma->irq < 0)
5654 - err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
5655 - IRQF_SHARED, KBUILD_MODNAME, qdma);
5659 - err = airoha_qdma_init_rx(qdma);
5663 - err = airoha_qdma_init_tx(qdma);
5667 - err = airoha_qdma_init_hfwd_queues(qdma);
5671 - return airoha_qdma_hw_init(qdma);
5674 -static int airoha_hw_init(struct platform_device *pdev,
5675 - struct airoha_eth *eth)
5680 - err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
5685 - err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
5690 - err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
5695 - err = airoha_fe_init(eth);
5699 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
5700 - err = airoha_qdma_init(pdev, eth, ð->qdma[i]);
5705 - set_bit(DEV_STATE_INITIALIZED, ð->state);
5710 -static void airoha_hw_cleanup(struct airoha_qdma *qdma)
5714 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5715 - if (!qdma->q_rx[i].ndesc)
5718 - netif_napi_del(&qdma->q_rx[i].napi);
5719 - airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
5720 - if (qdma->q_rx[i].page_pool)
5721 - page_pool_destroy(qdma->q_rx[i].page_pool);
5724 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
5725 - netif_napi_del(&qdma->q_tx_irq[i].napi);
5727 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5728 - if (!qdma->q_tx[i].ndesc)
5731 - airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
5735 -static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
5739 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
5740 - napi_enable(&qdma->q_tx_irq[i].napi);
5742 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5743 - if (!qdma->q_rx[i].ndesc)
5746 - napi_enable(&qdma->q_rx[i].napi);
5750 -static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
5754 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
5755 - napi_disable(&qdma->q_tx_irq[i].napi);
5757 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5758 - if (!qdma->q_rx[i].ndesc)
5761 - napi_disable(&qdma->q_rx[i].napi);
5765 -static void airoha_update_hw_stats(struct airoha_gdm_port *port)
5767 - struct airoha_eth *eth = port->qdma->eth;
5770 - spin_lock(&port->stats.lock);
5771 - u64_stats_update_begin(&port->stats.syncp);
5774 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
5775 - port->stats.tx_ok_pkts += ((u64)val << 32);
5776 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
5777 - port->stats.tx_ok_pkts += val;
5779 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
5780 - port->stats.tx_ok_bytes += ((u64)val << 32);
5781 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
5782 - port->stats.tx_ok_bytes += val;
5784 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
5785 - port->stats.tx_drops += val;
5787 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
5788 - port->stats.tx_broadcast += val;
5790 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
5791 - port->stats.tx_multicast += val;
5793 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
5794 - port->stats.tx_len[i] += val;
5796 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
5797 - port->stats.tx_len[i] += ((u64)val << 32);
5798 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
5799 - port->stats.tx_len[i++] += val;
5801 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
5802 - port->stats.tx_len[i] += ((u64)val << 32);
5803 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
5804 - port->stats.tx_len[i++] += val;
5806 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
5807 - port->stats.tx_len[i] += ((u64)val << 32);
5808 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
5809 - port->stats.tx_len[i++] += val;
5811 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
5812 - port->stats.tx_len[i] += ((u64)val << 32);
5813 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
5814 - port->stats.tx_len[i++] += val;
5816 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
5817 - port->stats.tx_len[i] += ((u64)val << 32);
5818 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
5819 - port->stats.tx_len[i++] += val;
5821 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
5822 - port->stats.tx_len[i] += ((u64)val << 32);
5823 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
5824 - port->stats.tx_len[i++] += val;
5826 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
5827 - port->stats.tx_len[i++] += val;
5830 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
5831 - port->stats.rx_ok_pkts += ((u64)val << 32);
5832 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
5833 - port->stats.rx_ok_pkts += val;
5835 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
5836 - port->stats.rx_ok_bytes += ((u64)val << 32);
5837 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
5838 - port->stats.rx_ok_bytes += val;
5840 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
5841 - port->stats.rx_drops += val;
5843 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
5844 - port->stats.rx_broadcast += val;
5846 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
5847 - port->stats.rx_multicast += val;
5849 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
5850 - port->stats.rx_errors += val;
5852 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
5853 - port->stats.rx_crc_error += val;
5855 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
5856 - port->stats.rx_over_errors += val;
5858 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
5859 - port->stats.rx_fragment += val;
5861 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
5862 - port->stats.rx_jabber += val;
5865 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
5866 - port->stats.rx_len[i] += val;
5868 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
5869 - port->stats.rx_len[i] += ((u64)val << 32);
5870 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
5871 - port->stats.rx_len[i++] += val;
5873 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
5874 - port->stats.rx_len[i] += ((u64)val << 32);
5875 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
5876 - port->stats.rx_len[i++] += val;
5878 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
5879 - port->stats.rx_len[i] += ((u64)val << 32);
5880 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
5881 - port->stats.rx_len[i++] += val;
5883 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
5884 - port->stats.rx_len[i] += ((u64)val << 32);
5885 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
5886 - port->stats.rx_len[i++] += val;
5888 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
5889 - port->stats.rx_len[i] += ((u64)val << 32);
5890 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
5891 - port->stats.rx_len[i++] += val;
5893 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
5894 - port->stats.rx_len[i] += ((u64)val << 32);
5895 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
5896 - port->stats.rx_len[i++] += val;
5898 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
5899 - port->stats.rx_len[i++] += val;
5901 - /* reset mib counters */
5902 - airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
5903 - FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
5905 - u64_stats_update_end(&port->stats.syncp);
5906 - spin_unlock(&port->stats.lock);
5909 -static int airoha_dev_open(struct net_device *dev)
5911 - struct airoha_gdm_port *port = netdev_priv(dev);
5912 - struct airoha_qdma *qdma = port->qdma;
5915 - netif_tx_start_all_queues(dev);
5916 - err = airoha_set_gdm_ports(qdma->eth, true);
5920 - if (netdev_uses_dsa(dev))
5921 - airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
5922 - GDM_STAG_EN_MASK);
5924 - airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
5925 - GDM_STAG_EN_MASK);
5927 - airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
5928 - GLOBAL_CFG_TX_DMA_EN_MASK |
5929 - GLOBAL_CFG_RX_DMA_EN_MASK);
5934 -static int airoha_dev_stop(struct net_device *dev)
5936 - struct airoha_gdm_port *port = netdev_priv(dev);
5937 - struct airoha_qdma *qdma = port->qdma;
5940 - netif_tx_disable(dev);
5941 - err = airoha_set_gdm_ports(qdma->eth, false);
5945 - airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
5946 - GLOBAL_CFG_TX_DMA_EN_MASK |
5947 - GLOBAL_CFG_RX_DMA_EN_MASK);
5949 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5950 - if (!qdma->q_tx[i].ndesc)
5953 - airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
5954 - netdev_tx_reset_subqueue(dev, i);
5960 -static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
5962 - struct airoha_gdm_port *port = netdev_priv(dev);
5965 - err = eth_mac_addr(dev, p);
5969 - airoha_set_macaddr(port, dev->dev_addr);
5974 -static int airoha_dev_init(struct net_device *dev)
5976 - struct airoha_gdm_port *port = netdev_priv(dev);
5978 - airoha_set_macaddr(port, dev->dev_addr);
5983 -static void airoha_dev_get_stats64(struct net_device *dev,
5984 - struct rtnl_link_stats64 *storage)
5986 - struct airoha_gdm_port *port = netdev_priv(dev);
5987 - unsigned int start;
5989 - airoha_update_hw_stats(port);
5991 - start = u64_stats_fetch_begin(&port->stats.syncp);
5992 - storage->rx_packets = port->stats.rx_ok_pkts;
5993 - storage->tx_packets = port->stats.tx_ok_pkts;
5994 - storage->rx_bytes = port->stats.rx_ok_bytes;
5995 - storage->tx_bytes = port->stats.tx_ok_bytes;
5996 - storage->multicast = port->stats.rx_multicast;
5997 - storage->rx_errors = port->stats.rx_errors;
5998 - storage->rx_dropped = port->stats.rx_drops;
5999 - storage->tx_dropped = port->stats.tx_drops;
6000 - storage->rx_crc_errors = port->stats.rx_crc_error;
6001 - storage->rx_over_errors = port->stats.rx_over_errors;
6002 - } while (u64_stats_fetch_retry(&port->stats.syncp, start));
6005 -static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
6006 - struct net_device *sb_dev)
6008 - struct airoha_gdm_port *port = netdev_priv(dev);
6009 - int queue, channel;
6011 - /* For dsa device select QoS channel according to the dsa user port
6012 - * index, rely on port id otherwise. Select QoS queue based on the
6015 - channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
6016 - channel = channel % AIROHA_NUM_QOS_CHANNELS;
6017 - queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
6018 - queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
6020 - return queue < dev->num_tx_queues ? queue : 0;
6023 -static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
6024 - struct net_device *dev)
6026 - struct airoha_gdm_port *port = netdev_priv(dev);
6027 - u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
6028 - u32 msg0, msg1, len = skb_headlen(skb);
6029 - struct airoha_qdma *qdma = port->qdma;
6030 - struct netdev_queue *txq;
6031 - struct airoha_queue *q;
6032 - void *data = skb->data;
6037 - qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
6038 - msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
6039 - qid / AIROHA_NUM_QOS_QUEUES) |
6040 - FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
6041 - qid % AIROHA_NUM_QOS_QUEUES);
6042 - if (skb->ip_summed == CHECKSUM_PARTIAL)
6043 - msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
6044 - FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
6045 - FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
6047 - /* TSO: fill MSS info in tcp checksum field */
6048 - if (skb_is_gso(skb)) {
6049 - if (skb_cow_head(skb, 0))
6052 - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
6054 - __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
6056 - tcp_hdr(skb)->check = (__force __sum16)csum;
6057 - msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
6061 - fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
6062 - msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
6063 - FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
6065 - q = &qdma->q_tx[qid];
6066 - if (WARN_ON_ONCE(!q->ndesc))
6069 - spin_lock_bh(&q->lock);
6071 - txq = netdev_get_tx_queue(dev, qid);
6072 - if (q->queued + nr_frags > q->ndesc) {
6073 - /* not enough space in the queue */
6074 - netif_tx_stop_queue(txq);
6075 - spin_unlock_bh(&q->lock);
6076 - return NETDEV_TX_BUSY;
6080 - for (i = 0; i < nr_frags; i++) {
6081 - struct airoha_qdma_desc *desc = &q->desc[index];
6082 - struct airoha_queue_entry *e = &q->entry[index];
6083 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6087 - addr = dma_map_single(dev->dev.parent, data, len,
6089 - if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
6092 - index = (index + 1) % q->ndesc;
6094 - val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
6095 - if (i < nr_frags - 1)
6096 - val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
6097 - WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
6098 - WRITE_ONCE(desc->addr, cpu_to_le32(addr));
6099 - val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
6100 - WRITE_ONCE(desc->data, cpu_to_le32(val));
6101 - WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
6102 - WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
6103 - WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
6105 - e->skb = i ? NULL : skb;
6106 - e->dma_addr = addr;
6109 - data = skb_frag_address(frag);
6110 - len = skb_frag_size(frag);
6116 - skb_tx_timestamp(skb);
6117 - netdev_tx_sent_queue(txq, skb->len);
6119 - if (netif_xmit_stopped(txq) || !netdev_xmit_more())
6120 - airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
6121 - TX_RING_CPU_IDX_MASK,
6122 - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
6124 - if (q->ndesc - q->queued < q->free_thr)
6125 - netif_tx_stop_queue(txq);
6127 - spin_unlock_bh(&q->lock);
6129 - return NETDEV_TX_OK;
6132 - for (i--; i >= 0; i--) {
6133 - index = (q->head + i) % q->ndesc;
6134 - dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
6135 - q->entry[index].dma_len, DMA_TO_DEVICE);
6138 - spin_unlock_bh(&q->lock);
6140 - dev_kfree_skb_any(skb);
6141 - dev->stats.tx_dropped++;
6143 - return NETDEV_TX_OK;
6146 -static void airoha_ethtool_get_drvinfo(struct net_device *dev,
6147 - struct ethtool_drvinfo *info)
6149 - struct airoha_gdm_port *port = netdev_priv(dev);
6150 - struct airoha_eth *eth = port->qdma->eth;
6152 - strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
6153 - strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
6156 -static void airoha_ethtool_get_mac_stats(struct net_device *dev,
6157 - struct ethtool_eth_mac_stats *stats)
6159 - struct airoha_gdm_port *port = netdev_priv(dev);
6160 - unsigned int start;
6162 - airoha_update_hw_stats(port);
6164 - start = u64_stats_fetch_begin(&port->stats.syncp);
6165 - stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
6166 - stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
6167 - stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
6168 - } while (u64_stats_fetch_retry(&port->stats.syncp, start));
6171 -static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
6183 -airoha_ethtool_get_rmon_stats(struct net_device *dev,
6184 - struct ethtool_rmon_stats *stats,
6185 - const struct ethtool_rmon_hist_range **ranges)
6187 - struct airoha_gdm_port *port = netdev_priv(dev);
6188 - struct airoha_hw_stats *hw_stats = &port->stats;
6189 - unsigned int start;
6191 - BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
6192 - ARRAY_SIZE(hw_stats->tx_len) + 1);
6193 - BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
6194 - ARRAY_SIZE(hw_stats->rx_len) + 1);
6196 - *ranges = airoha_ethtool_rmon_ranges;
6197 - airoha_update_hw_stats(port);
6201 - start = u64_stats_fetch_begin(&port->stats.syncp);
6202 - stats->fragments = hw_stats->rx_fragment;
6203 - stats->jabbers = hw_stats->rx_jabber;
6204 - for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
6206 - stats->hist[i] = hw_stats->rx_len[i];
6207 - stats->hist_tx[i] = hw_stats->tx_len[i];
6209 - } while (u64_stats_fetch_retry(&port->stats.syncp, start));
6212 -static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
6213 - int channel, enum tx_sched_mode mode,
6214 - const u16 *weights, u8 n_weights)
6218 - for (i = 0; i < AIROHA_NUM_TX_RING; i++)
6219 - airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
6220 - TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
6222 - for (i = 0; i < n_weights; i++) {
6226 - airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
6227 - TWRR_RW_CMD_MASK |
6228 - FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
6229 - FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
6230 - FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
6231 - err = read_poll_timeout(airoha_qdma_rr, status,
6232 - status & TWRR_RW_CMD_DONE,
6233 - USEC_PER_MSEC, 10 * USEC_PER_MSEC,
6235 - REG_TXWRR_WEIGHT_CFG);
6240 - airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
6241 - CHAN_QOS_MODE_MASK(channel),
6242 - mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
6247 -static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
6250 - static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
6252 - return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
6256 -static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
6258 - struct tc_ets_qopt_offload *opt)
6260 - struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
6261 - enum tx_sched_mode mode = TC_SCH_SP;
6262 - u16 w[AIROHA_NUM_QOS_QUEUES] = {};
6263 - int i, nstrict = 0, nwrr, qidx;
6265 - if (p->bands > AIROHA_NUM_QOS_QUEUES)
6268 - for (i = 0; i < p->bands; i++) {
6269 - if (!p->quanta[i])
6273 - /* this configuration is not supported by the hw */
6274 - if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
6277 - /* EN7581 SoC supports fixed QoS band priority where WRR queues have
6278 - * lowest priorities with respect to SP ones.
6279 - * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
6281 - nwrr = p->bands - nstrict;
6282 - qidx = nstrict && nwrr ? nstrict : 0;
6283 - for (i = 1; i <= p->bands; i++) {
6284 - if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
6287 - qidx = i == nwrr ? 0 : qidx + 1;
6290 - for (i = 0; i < nwrr; i++)
6291 - w[i] = p->weights[nstrict + i];
6294 - mode = TC_SCH_WRR8;
6295 - else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
6296 - mode = nstrict + 1;
6298 - return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
6302 -static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
6304 - struct tc_ets_qopt_offload *opt)
6306 - u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
6307 - REG_CNTR_VAL(channel << 1));
6308 - u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
6309 - REG_CNTR_VAL((channel << 1) + 1));
6310 - u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
6311 - (fwd_tx_packets - port->fwd_tx_packets);
6312 - _bstats_update(opt->stats.bstats, 0, tx_packets);
6314 - port->cpu_tx_packets = cpu_tx_packets;
6315 - port->fwd_tx_packets = fwd_tx_packets;
6320 -static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
6321 - struct tc_ets_qopt_offload *opt)
6323 - int channel = TC_H_MAJ(opt->handle) >> 16;
6325 - if (opt->parent == TC_H_ROOT)
6328 - switch (opt->command) {
6329 - case TC_ETS_REPLACE:
6330 - return airoha_qdma_set_tx_ets_sched(port, channel, opt);
6331 - case TC_ETS_DESTROY:
6332 - /* PRIO is default qdisc scheduler */
6333 - return airoha_qdma_set_tx_prio_sched(port, channel);
6334 - case TC_ETS_STATS:
6335 - return airoha_qdma_get_tx_ets_stats(port, channel, opt);
6337 - return -EOPNOTSUPP;
6341 -static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
6342 - u32 addr, enum trtcm_param_type param,
6343 - enum trtcm_mode_type mode,
6344 - u32 *val_low, u32 *val_high)
6346 - u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
6347 - u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
6348 - FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
6349 - FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
6350 - FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
6352 - airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
6353 - if (read_poll_timeout(airoha_qdma_rr, val,
6354 - val & TRTCM_PARAM_RW_DONE_MASK,
6355 - USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
6356 - qdma, REG_TRTCM_CFG_PARAM(addr)))
6357 - return -ETIMEDOUT;
6359 - *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
6361 - *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
6366 -static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
6367 - u32 addr, enum trtcm_param_type param,
6368 - enum trtcm_mode_type mode, u32 val)
6370 - u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
6371 - u32 config = TRTCM_PARAM_RW_MASK |
6372 - FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
6373 - FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
6374 - FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
6375 - FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
6377 - airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
6378 - airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
6380 - return read_poll_timeout(airoha_qdma_rr, val,
6381 - val & TRTCM_PARAM_RW_DONE_MASK,
6382 - USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
6383 - qdma, REG_TRTCM_CFG_PARAM(addr));
6386 -static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
6387 - u32 addr, enum trtcm_mode_type mode,
6388 - bool enable, u32 enable_mask)
6392 - if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
6393 - mode, &val, NULL))
6396 - val = enable ? val | enable_mask : val & ~enable_mask;
6398 - return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
6402 -static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
6403 - int channel, u32 addr,
6404 - enum trtcm_mode_type mode,
6405 - u32 rate_val, u32 bucket_size)
6407 - u32 val, config, tick, unit, rate, rate_frac;
6410 - if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
6411 - mode, &config, NULL))
6414 - val = airoha_qdma_rr(qdma, addr);
6415 - tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
6416 - if (config & TRTCM_TICK_SEL)
6417 - tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
6421 - unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
6425 - rate = rate_val / unit;
6426 - rate_frac = rate_val % unit;
6427 - rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
6428 - rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
6429 - FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
6431 - err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
6432 - TRTCM_TOKEN_RATE_MODE, mode, rate);
6436 - val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
6437 - val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
6439 - return airoha_qdma_set_trtcm_param(qdma, channel, addr,
6440 - TRTCM_BUCKETSIZE_SHIFT_MODE,
6444 -static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
6445 - int channel, u32 rate,
6450 - for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
6451 - err = airoha_qdma_set_trtcm_config(port->qdma, channel,
6452 - REG_EGRESS_TRTCM_CFG, i,
6453 - !!rate, TRTCM_METER_MODE);
6457 - err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
6458 - REG_EGRESS_TRTCM_CFG,
6459 - i, rate, bucket_size);
6467 -static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
6468 - struct tc_htb_qopt_offload *opt)
6470 - u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
6471 - u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
6472 - struct net_device *dev = port->dev;
6473 - int num_tx_queues = dev->real_num_tx_queues;
6476 - if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
6477 - NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
6481 - err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
6483 - NL_SET_ERR_MSG_MOD(opt->extack,
6484 - "failed configuring htb offload");
6488 - if (opt->command == TC_HTB_NODE_MODIFY)
6491 - err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
6493 - airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
6494 - NL_SET_ERR_MSG_MOD(opt->extack,
6495 - "failed setting real_num_tx_queues");
6499 - set_bit(channel, port->qos_sq_bmap);
6500 - opt->qid = AIROHA_NUM_TX_RING + channel;
6505 -static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
6507 - struct net_device *dev = port->dev;
6509 - netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
6510 - airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
6511 - clear_bit(queue, port->qos_sq_bmap);
6514 -static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
6515 - struct tc_htb_qopt_offload *opt)
6517 - u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
6519 - if (!test_bit(channel, port->qos_sq_bmap)) {
6520 - NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
6524 - airoha_tc_remove_htb_queue(port, channel);
6529 -static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
6533 - for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
6534 - airoha_tc_remove_htb_queue(port, q);
6539 -static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
6540 - struct tc_htb_qopt_offload *opt)
6542 - u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
6544 - if (!test_bit(channel, port->qos_sq_bmap)) {
6545 - NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
6549 - opt->qid = channel;
6554 -static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
6555 - struct tc_htb_qopt_offload *opt)
6557 - switch (opt->command) {
6558 - case TC_HTB_CREATE:
6560 - case TC_HTB_DESTROY:
6561 - return airoha_tc_htb_destroy(port);
6562 - case TC_HTB_NODE_MODIFY:
6563 - case TC_HTB_LEAF_ALLOC_QUEUE:
6564 - return airoha_tc_htb_alloc_leaf_queue(port, opt);
6565 - case TC_HTB_LEAF_DEL:
6566 - case TC_HTB_LEAF_DEL_LAST:
6567 - case TC_HTB_LEAF_DEL_LAST_FORCE:
6568 - return airoha_tc_htb_delete_leaf_queue(port, opt);
6569 - case TC_HTB_LEAF_QUERY_QUEUE:
6570 - return airoha_tc_get_htb_get_leaf_queue(port, opt);
6572 - return -EOPNOTSUPP;
6578 -static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
6581 - struct airoha_gdm_port *port = netdev_priv(dev);
6584 - case TC_SETUP_QDISC_ETS:
6585 - return airoha_tc_setup_qdisc_ets(port, type_data);
6586 - case TC_SETUP_QDISC_HTB:
6587 - return airoha_tc_setup_qdisc_htb(port, type_data);
6589 - return -EOPNOTSUPP;
6593 -static const struct net_device_ops airoha_netdev_ops = {
6594 - .ndo_init = airoha_dev_init,
6595 - .ndo_open = airoha_dev_open,
6596 - .ndo_stop = airoha_dev_stop,
6597 - .ndo_select_queue = airoha_dev_select_queue,
6598 - .ndo_start_xmit = airoha_dev_xmit,
6599 - .ndo_get_stats64 = airoha_dev_get_stats64,
6600 - .ndo_set_mac_address = airoha_dev_set_macaddr,
6601 - .ndo_setup_tc = airoha_dev_tc_setup,
6604 -static const struct ethtool_ops airoha_ethtool_ops = {
6605 - .get_drvinfo = airoha_ethtool_get_drvinfo,
6606 - .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
6607 - .get_rmon_stats = airoha_ethtool_get_rmon_stats,
6610 -static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
6612 - const __be32 *id_ptr = of_get_property(np, "reg", NULL);
6613 - struct airoha_gdm_port *port;
6614 - struct airoha_qdma *qdma;
6615 - struct net_device *dev;
6620 - dev_err(eth->dev, "missing gdm port id\n");
6624 - id = be32_to_cpup(id_ptr);
6627 - if (!id || id > ARRAY_SIZE(eth->ports)) {
6628 - dev_err(eth->dev, "invalid gdm port id: %d\n", id);
6632 - if (eth->ports[index]) {
6633 - dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
6637 - dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
6638 - AIROHA_NUM_NETDEV_TX_RINGS,
6639 - AIROHA_NUM_RX_RING);
6641 - dev_err(eth->dev, "alloc_etherdev failed\n");
6645 - qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA];
6646 - dev->netdev_ops = &airoha_netdev_ops;
6647 - dev->ethtool_ops = &airoha_ethtool_ops;
6648 - dev->max_mtu = AIROHA_MAX_MTU;
6649 - dev->watchdog_timeo = 5 * HZ;
6650 - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6651 - NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
6652 - NETIF_F_SG | NETIF_F_TSO |
6654 - dev->features |= dev->hw_features;
6655 - dev->dev.of_node = np;
6656 - dev->irq = qdma->irq;
6657 - SET_NETDEV_DEV(dev, eth->dev);
6659 - /* reserve hw queues for HTB offloading */
6660 - err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
6664 - err = of_get_ethdev_address(np, dev);
6666 - if (err == -EPROBE_DEFER)
6669 - eth_hw_addr_random(dev);
6670 - dev_info(eth->dev, "generated random MAC address %pM\n",
6674 - port = netdev_priv(dev);
6675 - u64_stats_init(&port->stats.syncp);
6676 - spin_lock_init(&port->stats.lock);
6677 - port->qdma = qdma;
6680 - eth->ports[index] = port;
6682 - return register_netdev(dev);
6685 -static int airoha_probe(struct platform_device *pdev)
6687 - struct device_node *np;
6688 - struct airoha_eth *eth;
6691 - eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
6695 - eth->dev = &pdev->dev;
6697 - err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
6699 - dev_err(eth->dev, "failed configuring DMA mask\n");
6703 - eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
6704 - if (IS_ERR(eth->fe_regs))
6705 - return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
6706 - "failed to iomap fe regs\n");
6708 - eth->rsts[0].id = "fe";
6709 - eth->rsts[1].id = "pdma";
6710 - eth->rsts[2].id = "qdma";
6711 - err = devm_reset_control_bulk_get_exclusive(eth->dev,
6712 - ARRAY_SIZE(eth->rsts),
6715 - dev_err(eth->dev, "failed to get bulk reset lines\n");
6719 - eth->xsi_rsts[0].id = "xsi-mac";
6720 - eth->xsi_rsts[1].id = "hsi0-mac";
6721 - eth->xsi_rsts[2].id = "hsi1-mac";
6722 - eth->xsi_rsts[3].id = "hsi-mac";
6723 - eth->xsi_rsts[4].id = "xfp-mac";
6724 - err = devm_reset_control_bulk_get_exclusive(eth->dev,
6725 - ARRAY_SIZE(eth->xsi_rsts),
6728 - dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
6732 - eth->napi_dev = alloc_netdev_dummy(0);
6733 - if (!eth->napi_dev)
6736 - /* Enable threaded NAPI by default */
6737 - eth->napi_dev->threaded = true;
6738 - strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
6739 - platform_set_drvdata(pdev, eth);
6741 - err = airoha_hw_init(pdev, eth);
6743 - goto error_hw_cleanup;
6745 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
6746 - airoha_qdma_start_napi(ð->qdma[i]);
6748 - for_each_child_of_node(pdev->dev.of_node, np) {
6749 - if (!of_device_is_compatible(np, "airoha,eth-mac"))
6752 - if (!of_device_is_available(np))
6755 - err = airoha_alloc_gdm_port(eth, np);
6758 - goto error_napi_stop;
6765 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
6766 - airoha_qdma_stop_napi(ð->qdma[i]);
6768 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
6769 - airoha_hw_cleanup(ð->qdma[i]);
6771 - for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
6772 - struct airoha_gdm_port *port = eth->ports[i];
6774 - if (port && port->dev->reg_state == NETREG_REGISTERED)
6775 - unregister_netdev(port->dev);
6777 - free_netdev(eth->napi_dev);
6778 - platform_set_drvdata(pdev, NULL);
6783 -static void airoha_remove(struct platform_device *pdev)
6785 - struct airoha_eth *eth = platform_get_drvdata(pdev);
6788 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
6789 - airoha_qdma_stop_napi(ð->qdma[i]);
6790 - airoha_hw_cleanup(ð->qdma[i]);
6793 - for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
6794 - struct airoha_gdm_port *port = eth->ports[i];
6799 - airoha_dev_stop(port->dev);
6800 - unregister_netdev(port->dev);
6802 - free_netdev(eth->napi_dev);
6804 - platform_set_drvdata(pdev, NULL);
6807 -static const struct of_device_id of_airoha_match[] = {
6808 - { .compatible = "airoha,en7581-eth" },
6809 - { /* sentinel */ }
6811 -MODULE_DEVICE_TABLE(of, of_airoha_match);
6813 -static struct platform_driver airoha_driver = {
6814 - .probe = airoha_probe,
6815 - .remove_new = airoha_remove,
6817 - .name = KBUILD_MODNAME,
6818 - .of_match_table = of_airoha_match,
6821 -module_platform_driver(airoha_driver);
6823 -MODULE_LICENSE("GPL");
6824 -MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
6825 -MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");