]> git.ipfire.org Git - thirdparty/openwrt.git/blob
255bbd2ed9d837e2950e247bf43b8216b43c1eef
[thirdparty/openwrt.git] /
1 From fb3dda82fd38ca42140f29b3082324dcdc128293 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Fri, 28 Feb 2025 11:54:09 +0100
4 Subject: [PATCH 01/15] net: airoha: Move airoha_eth driver in a dedicated
5 folder
6
7 The airoha_eth driver has no codebase shared with mtk_eth_soc one.
8 Moreover, the upcoming features (flowtable hw offloading, PCS, ..) will
9 not reuse any code from MediaTek driver. Move the Airoha driver in a
10 dedicated folder.
11
12 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
13 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
14 ---
15 drivers/net/ethernet/Kconfig | 2 ++
16 drivers/net/ethernet/Makefile | 1 +
17 drivers/net/ethernet/airoha/Kconfig | 18 ++++++++++++++++++
18 drivers/net/ethernet/airoha/Makefile | 6 ++++++
19 .../ethernet/{mediatek => airoha}/airoha_eth.c | 0
20 drivers/net/ethernet/mediatek/Kconfig | 8 --------
21 drivers/net/ethernet/mediatek/Makefile | 1 -
22 7 files changed, 27 insertions(+), 9 deletions(-)
23 create mode 100644 drivers/net/ethernet/airoha/Kconfig
24 create mode 100644 drivers/net/ethernet/airoha/Makefile
25 rename drivers/net/ethernet/{mediatek => airoha}/airoha_eth.c (100%)
26
27 --- a/drivers/net/ethernet/Kconfig
28 +++ b/drivers/net/ethernet/Kconfig
29 @@ -23,6 +23,8 @@ source "drivers/net/ethernet/actions/Kco
30 source "drivers/net/ethernet/adaptec/Kconfig"
31 source "drivers/net/ethernet/aeroflex/Kconfig"
32 source "drivers/net/ethernet/agere/Kconfig"
33 +source "drivers/net/ethernet/airoha/Kconfig"
34 +source "drivers/net/ethernet/mellanox/Kconfig"
35 source "drivers/net/ethernet/alacritech/Kconfig"
36 source "drivers/net/ethernet/allwinner/Kconfig"
37 source "drivers/net/ethernet/alteon/Kconfig"
38 --- a/drivers/net/ethernet/Makefile
39 +++ b/drivers/net/ethernet/Makefile
40 @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adap
41 obj-$(CONFIG_GRETH) += aeroflex/
42 obj-$(CONFIG_NET_VENDOR_ADI) += adi/
43 obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
44 +obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
45 obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
46 obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
47 obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
48 --- /dev/null
49 +++ b/drivers/net/ethernet/airoha/Kconfig
50 @@ -0,0 +1,18 @@
51 +# SPDX-License-Identifier: GPL-2.0-only
52 +config NET_VENDOR_AIROHA
53 + bool "Airoha devices"
54 + depends on ARCH_AIROHA || COMPILE_TEST
55 + help
56 + If you have a Airoha SoC with ethernet, say Y.
57 +
58 +if NET_VENDOR_AIROHA
59 +
60 +config NET_AIROHA
61 + tristate "Airoha SoC Gigabit Ethernet support"
62 + depends on NET_DSA || !NET_DSA
63 + select PAGE_POOL
64 + help
65 + This driver supports the gigabit ethernet MACs in the
66 + Airoha SoC family.
67 +
68 +endif #NET_VENDOR_AIROHA
69 --- /dev/null
70 +++ b/drivers/net/ethernet/airoha/Makefile
71 @@ -0,0 +1,6 @@
72 +# SPDX-License-Identifier: GPL-2.0-only
73 +#
74 +# Airoha for the Mediatek SoCs built-in ethernet macs
75 +#
76 +
77 +obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
78 --- a/drivers/net/ethernet/mediatek/Kconfig
79 +++ b/drivers/net/ethernet/mediatek/Kconfig
80 @@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
81
82 if NET_VENDOR_MEDIATEK
83
84 -config NET_AIROHA
85 - tristate "Airoha SoC Gigabit Ethernet support"
86 - depends on NET_DSA || !NET_DSA
87 - select PAGE_POOL
88 - help
89 - This driver supports the gigabit ethernet MACs in the
90 - Airoha SoC family.
91 -
92 config NET_MEDIATEK_SOC_WED
93 depends on ARCH_MEDIATEK || COMPILE_TEST
94 def_bool NET_MEDIATEK_SOC != n
95 --- a/drivers/net/ethernet/mediatek/Makefile
96 +++ b/drivers/net/ethernet/mediatek/Makefile
97 @@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) +
98 endif
99 obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
100 obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
101 -obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
102 --- /dev/null
103 +++ b/drivers/net/ethernet/airoha/airoha_eth.c
104 @@ -0,0 +1,3359 @@
105 +// SPDX-License-Identifier: GPL-2.0-only
106 +/*
107 + * Copyright (c) 2024 AIROHA Inc
108 + * Author: Lorenzo Bianconi <lorenzo@kernel.org>
109 + */
110 +#include <linux/etherdevice.h>
111 +#include <linux/iopoll.h>
112 +#include <linux/kernel.h>
113 +#include <linux/netdevice.h>
114 +#include <linux/of.h>
115 +#include <linux/of_net.h>
116 +#include <linux/platform_device.h>
117 +#include <linux/reset.h>
118 +#include <linux/tcp.h>
119 +#include <linux/u64_stats_sync.h>
120 +#include <net/dsa.h>
121 +#include <net/page_pool/helpers.h>
122 +#include <net/pkt_cls.h>
123 +#include <uapi/linux/ppp_defs.h>
124 +
125 +#define AIROHA_MAX_NUM_GDM_PORTS 1
126 +#define AIROHA_MAX_NUM_QDMA 2
127 +#define AIROHA_MAX_NUM_RSTS 3
128 +#define AIROHA_MAX_NUM_XSI_RSTS 5
129 +#define AIROHA_MAX_MTU 2000
130 +#define AIROHA_MAX_PACKET_SIZE 2048
131 +#define AIROHA_NUM_QOS_CHANNELS 4
132 +#define AIROHA_NUM_QOS_QUEUES 8
133 +#define AIROHA_NUM_TX_RING 32
134 +#define AIROHA_NUM_RX_RING 32
135 +#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
136 + AIROHA_NUM_QOS_CHANNELS)
137 +#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
138 +#define AIROHA_FE_MC_MAX_VLAN_PORT 16
139 +#define AIROHA_NUM_TX_IRQ 2
140 +#define HW_DSCP_NUM 2048
141 +#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
142 +#define TX_DSCP_NUM 1024
143 +#define RX_DSCP_NUM(_n) \
144 + ((_n) == 2 ? 128 : \
145 + (_n) == 11 ? 128 : \
146 + (_n) == 15 ? 128 : \
147 + (_n) == 0 ? 1024 : 16)
148 +
149 +#define PSE_RSV_PAGES 128
150 +#define PSE_QUEUE_RSV_PAGES 64
151 +
152 +#define QDMA_METER_IDX(_n) ((_n) & 0xff)
153 +#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
154 +
155 +/* FE */
156 +#define PSE_BASE 0x0100
157 +#define CSR_IFC_BASE 0x0200
158 +#define CDM1_BASE 0x0400
159 +#define GDM1_BASE 0x0500
160 +#define PPE1_BASE 0x0c00
161 +
162 +#define CDM2_BASE 0x1400
163 +#define GDM2_BASE 0x1500
164 +
165 +#define GDM3_BASE 0x1100
166 +#define GDM4_BASE 0x2500
167 +
168 +#define GDM_BASE(_n) \
169 + ((_n) == 4 ? GDM4_BASE : \
170 + (_n) == 3 ? GDM3_BASE : \
171 + (_n) == 2 ? GDM2_BASE : GDM1_BASE)
172 +
173 +#define REG_FE_DMA_GLO_CFG 0x0000
174 +#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
175 +#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
176 +
177 +#define REG_FE_RST_GLO_CFG 0x0004
178 +#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
179 +#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
180 +#define FE_RST_CORE_MASK BIT(0)
181 +
182 +#define REG_FE_WAN_MAC_H 0x0030
183 +#define REG_FE_LAN_MAC_H 0x0040
184 +
185 +#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
186 +#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
187 +
188 +#define REG_FE_CDM1_OQ_MAP0 0x0050
189 +#define REG_FE_CDM1_OQ_MAP1 0x0054
190 +#define REG_FE_CDM1_OQ_MAP2 0x0058
191 +#define REG_FE_CDM1_OQ_MAP3 0x005c
192 +
193 +#define REG_FE_PCE_CFG 0x0070
194 +#define PCE_DPI_EN_MASK BIT(2)
195 +#define PCE_KA_EN_MASK BIT(1)
196 +#define PCE_MC_EN_MASK BIT(0)
197 +
198 +#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
199 +#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
200 +#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
201 +#define PSE_CFG_WR_EN_MASK BIT(8)
202 +#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
203 +
204 +#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
205 +#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
206 +
207 +#define PSE_FQ_CFG 0x008c
208 +#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
209 +
210 +#define REG_FE_PSE_BUF_SET 0x0090
211 +#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
212 +#define PSE_ALLRSV_MASK GENMASK(14, 0)
213 +
214 +#define REG_PSE_SHARE_USED_THD 0x0094
215 +#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
216 +#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
217 +
218 +#define REG_GDM_MISC_CFG 0x0148
219 +#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
220 +#define GDM2_CHN_VLD_MODE_MASK BIT(5)
221 +
222 +#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
223 +#define FE_IFC_EN_MASK BIT(0)
224 +
225 +#define REG_FE_VIP_PORT_EN 0x01f0
226 +#define REG_FE_IFC_PORT_EN 0x01f4
227 +
228 +#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
229 +#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
230 +
231 +#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
232 +#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
233 +#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
234 +
235 +#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
236 +#define PATN_FCPU_EN_MASK BIT(7)
237 +#define PATN_SWP_EN_MASK BIT(6)
238 +#define PATN_DP_EN_MASK BIT(5)
239 +#define PATN_SP_EN_MASK BIT(4)
240 +#define PATN_TYPE_MASK GENMASK(3, 1)
241 +#define PATN_EN_MASK BIT(0)
242 +
243 +#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
244 +#define PATN_DP_MASK GENMASK(31, 16)
245 +#define PATN_SP_MASK GENMASK(15, 0)
246 +
247 +#define REG_CDM1_VLAN_CTRL CDM1_BASE
248 +#define CDM1_VLAN_MASK GENMASK(31, 16)
249 +
250 +#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
251 +#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
252 +
253 +#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
254 +#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
255 + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
256 +
257 +#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
258 +#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
259 +#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
260 +
261 +#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
262 +#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
263 + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
264 +
265 +#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
266 +#define GDM_DROP_CRC_ERR BIT(23)
267 +#define GDM_IP4_CKSUM BIT(22)
268 +#define GDM_TCP_CKSUM BIT(21)
269 +#define GDM_UDP_CKSUM BIT(20)
270 +#define GDM_UCFQ_MASK GENMASK(15, 12)
271 +#define GDM_BCFQ_MASK GENMASK(11, 8)
272 +#define GDM_MCFQ_MASK GENMASK(7, 4)
273 +#define GDM_OCFQ_MASK GENMASK(3, 0)
274 +
275 +#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
276 +#define GDM_INGRESS_FC_EN_MASK BIT(1)
277 +#define GDM_STAG_EN_MASK BIT(0)
278 +
279 +#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
280 +#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
281 +#define GDM_LONG_LEN_MASK GENMASK(29, 16)
282 +
283 +#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
284 +#define FE_CPORT_PAD BIT(26)
285 +#define FE_CPORT_PORT_XFC_MASK BIT(25)
286 +#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
287 +
288 +#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
289 +#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
290 +#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
291 +
292 +#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
293 +#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
294 +#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
295 +#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
296 +#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
297 +#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
298 +
299 +#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
300 +#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
301 +#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
302 +#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
303 +#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
304 +#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
305 +#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
306 +#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
307 +#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
308 +#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
309 +#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
310 +#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
311 +#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
312 +#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
313 +#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
314 +
315 +#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
316 +#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
317 +#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
318 +#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
319 +#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
320 +#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
321 +#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
322 +#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
323 +#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
324 +#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
325 +#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
326 +#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
327 +#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
328 +#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
329 +#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
330 +#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
331 +#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
332 +#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
333 +#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
334 +#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
335 +#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
336 +#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
337 +
338 +#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
339 +#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
340 +#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
341 +#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
342 +#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
343 +
344 +#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
345 +#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
346 +#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
347 +#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
348 +
349 +#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
350 +#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
351 +#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
352 +#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
353 +#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
354 +#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
355 +#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
356 +#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
357 +#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
358 +#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
359 +#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
360 +#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
361 +#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
362 +#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
363 +#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
364 +#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
365 +
366 +#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
367 +#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
368 +#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
369 +
370 +#define REG_GDM3_FWD_CFG GDM3_BASE
371 +#define GDM3_PAD_EN_MASK BIT(28)
372 +
373 +#define REG_GDM4_FWD_CFG GDM4_BASE
374 +#define GDM4_PAD_EN_MASK BIT(28)
375 +#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
376 +
377 +#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
378 +#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
379 +#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
380 +#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
381 +
382 +#define REG_IP_FRAG_FP 0x2010
383 +#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
384 +#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
385 +#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
386 +#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
387 +
388 +#define REG_MC_VLAN_EN 0x2100
389 +#define MC_VLAN_EN_MASK BIT(0)
390 +
391 +#define REG_MC_VLAN_CFG 0x2104
392 +#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
393 +#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
394 +#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
395 +#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
396 +#define MC_VLAN_CFG_RW_MASK BIT(0)
397 +
398 +#define REG_MC_VLAN_DATA 0x2108
399 +
400 +#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
401 +
402 +/* QDMA */
403 +#define REG_QDMA_GLOBAL_CFG 0x0004
404 +#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
405 +#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
406 +#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
407 +#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
408 +#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
409 +#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
410 +#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
411 +#define GLOBAL_CFG_RESET_MASK BIT(23)
412 +#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
413 +#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
414 +#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
415 +#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
416 +#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
417 +#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
418 +#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
419 +#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
420 +#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
421 +#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
422 +#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
423 +#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
424 +#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
425 +#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
426 +#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
427 +
428 +#define REG_FWD_DSCP_BASE 0x0010
429 +#define REG_FWD_BUF_BASE 0x0014
430 +
431 +#define REG_HW_FWD_DSCP_CFG 0x0018
432 +#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
433 +#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
434 +#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
435 +
436 +#define REG_INT_STATUS(_n) \
437 + (((_n) == 4) ? 0x0730 : \
438 + ((_n) == 3) ? 0x0724 : \
439 + ((_n) == 2) ? 0x0720 : \
440 + ((_n) == 1) ? 0x0024 : 0x0020)
441 +
442 +#define REG_INT_ENABLE(_n) \
443 + (((_n) == 4) ? 0x0750 : \
444 + ((_n) == 3) ? 0x0744 : \
445 + ((_n) == 2) ? 0x0740 : \
446 + ((_n) == 1) ? 0x002c : 0x0028)
447 +
448 +/* QDMA_CSR_INT_ENABLE1 */
449 +#define RX15_COHERENT_INT_MASK BIT(31)
450 +#define RX14_COHERENT_INT_MASK BIT(30)
451 +#define RX13_COHERENT_INT_MASK BIT(29)
452 +#define RX12_COHERENT_INT_MASK BIT(28)
453 +#define RX11_COHERENT_INT_MASK BIT(27)
454 +#define RX10_COHERENT_INT_MASK BIT(26)
455 +#define RX9_COHERENT_INT_MASK BIT(25)
456 +#define RX8_COHERENT_INT_MASK BIT(24)
457 +#define RX7_COHERENT_INT_MASK BIT(23)
458 +#define RX6_COHERENT_INT_MASK BIT(22)
459 +#define RX5_COHERENT_INT_MASK BIT(21)
460 +#define RX4_COHERENT_INT_MASK BIT(20)
461 +#define RX3_COHERENT_INT_MASK BIT(19)
462 +#define RX2_COHERENT_INT_MASK BIT(18)
463 +#define RX1_COHERENT_INT_MASK BIT(17)
464 +#define RX0_COHERENT_INT_MASK BIT(16)
465 +#define TX7_COHERENT_INT_MASK BIT(15)
466 +#define TX6_COHERENT_INT_MASK BIT(14)
467 +#define TX5_COHERENT_INT_MASK BIT(13)
468 +#define TX4_COHERENT_INT_MASK BIT(12)
469 +#define TX3_COHERENT_INT_MASK BIT(11)
470 +#define TX2_COHERENT_INT_MASK BIT(10)
471 +#define TX1_COHERENT_INT_MASK BIT(9)
472 +#define TX0_COHERENT_INT_MASK BIT(8)
473 +#define CNT_OVER_FLOW_INT_MASK BIT(7)
474 +#define IRQ1_FULL_INT_MASK BIT(5)
475 +#define IRQ1_INT_MASK BIT(4)
476 +#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
477 +#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
478 +#define IRQ0_FULL_INT_MASK BIT(1)
479 +#define IRQ0_INT_MASK BIT(0)
480 +
481 +#define TX_DONE_INT_MASK(_n) \
482 + ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
483 + : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
484 +
485 +#define INT_TX_MASK \
486 + (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
487 + IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
488 +
489 +#define INT_IDX0_MASK \
490 + (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
491 + TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
492 + TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
493 + TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
494 + RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
495 + RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
496 + RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
497 + RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
498 + RX15_COHERENT_INT_MASK | INT_TX_MASK)
499 +
500 +/* QDMA_CSR_INT_ENABLE2 */
501 +#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
502 +#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
503 +#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
504 +#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
505 +#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
506 +#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
507 +#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
508 +#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
509 +#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
510 +#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
511 +#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
512 +#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
513 +#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
514 +#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
515 +#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
516 +#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
517 +#define RX15_DONE_INT_MASK BIT(15)
518 +#define RX14_DONE_INT_MASK BIT(14)
519 +#define RX13_DONE_INT_MASK BIT(13)
520 +#define RX12_DONE_INT_MASK BIT(12)
521 +#define RX11_DONE_INT_MASK BIT(11)
522 +#define RX10_DONE_INT_MASK BIT(10)
523 +#define RX9_DONE_INT_MASK BIT(9)
524 +#define RX8_DONE_INT_MASK BIT(8)
525 +#define RX7_DONE_INT_MASK BIT(7)
526 +#define RX6_DONE_INT_MASK BIT(6)
527 +#define RX5_DONE_INT_MASK BIT(5)
528 +#define RX4_DONE_INT_MASK BIT(4)
529 +#define RX3_DONE_INT_MASK BIT(3)
530 +#define RX2_DONE_INT_MASK BIT(2)
531 +#define RX1_DONE_INT_MASK BIT(1)
532 +#define RX0_DONE_INT_MASK BIT(0)
533 +
534 +#define RX_DONE_INT_MASK \
535 + (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
536 + RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
537 + RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
538 + RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
539 + RX15_DONE_INT_MASK)
540 +#define INT_IDX1_MASK \
541 + (RX_DONE_INT_MASK | \
542 + RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
543 + RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
544 + RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
545 + RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
546 + RX15_NO_CPU_DSCP_INT_MASK)
547 +
548 +/* QDMA_CSR_INT_ENABLE5 */
549 +#define TX31_COHERENT_INT_MASK BIT(31)
550 +#define TX30_COHERENT_INT_MASK BIT(30)
551 +#define TX29_COHERENT_INT_MASK BIT(29)
552 +#define TX28_COHERENT_INT_MASK BIT(28)
553 +#define TX27_COHERENT_INT_MASK BIT(27)
554 +#define TX26_COHERENT_INT_MASK BIT(26)
555 +#define TX25_COHERENT_INT_MASK BIT(25)
556 +#define TX24_COHERENT_INT_MASK BIT(24)
557 +#define TX23_COHERENT_INT_MASK BIT(23)
558 +#define TX22_COHERENT_INT_MASK BIT(22)
559 +#define TX21_COHERENT_INT_MASK BIT(21)
560 +#define TX20_COHERENT_INT_MASK BIT(20)
561 +#define TX19_COHERENT_INT_MASK BIT(19)
562 +#define TX18_COHERENT_INT_MASK BIT(18)
563 +#define TX17_COHERENT_INT_MASK BIT(17)
564 +#define TX16_COHERENT_INT_MASK BIT(16)
565 +#define TX15_COHERENT_INT_MASK BIT(15)
566 +#define TX14_COHERENT_INT_MASK BIT(14)
567 +#define TX13_COHERENT_INT_MASK BIT(13)
568 +#define TX12_COHERENT_INT_MASK BIT(12)
569 +#define TX11_COHERENT_INT_MASK BIT(11)
570 +#define TX10_COHERENT_INT_MASK BIT(10)
571 +#define TX9_COHERENT_INT_MASK BIT(9)
572 +#define TX8_COHERENT_INT_MASK BIT(8)
573 +
574 +#define INT_IDX4_MASK \
575 + (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
576 + TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
577 + TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
578 + TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
579 + TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
580 + TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
581 + TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
582 + TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
583 + TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
584 + TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
585 + TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
586 + TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
587 +
588 +#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
589 +
590 +#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
591 +#define TX_IRQ_THR_MASK GENMASK(27, 16)
592 +#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
593 +
594 +#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
595 +#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
596 +
597 +#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
598 +#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
599 +#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
600 +
601 +#define REG_TX_RING_BASE(_n) \
602 + (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
603 +
604 +#define REG_TX_RING_BLOCKING(_n) \
605 + (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
606 +
607 +#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
608 +#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
609 +#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
610 +#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
611 +#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
612 +
613 +#define REG_TX_CPU_IDX(_n) \
614 + (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
615 +
616 +#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
617 +
618 +#define REG_TX_DMA_IDX(_n) \
619 + (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
620 +
621 +#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
622 +
623 +#define IRQ_RING_IDX_MASK GENMASK(20, 16)
624 +#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
625 +
626 +#define REG_RX_RING_BASE(_n) \
627 + (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
628 +
629 +#define REG_RX_RING_SIZE(_n) \
630 + (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
631 +
632 +#define RX_RING_THR_MASK GENMASK(31, 16)
633 +#define RX_RING_SIZE_MASK GENMASK(15, 0)
634 +
635 +#define REG_RX_CPU_IDX(_n) \
636 + (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
637 +
638 +#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
639 +
640 +#define REG_RX_DMA_IDX(_n) \
641 + (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
642 +
643 +#define REG_RX_DELAY_INT_IDX(_n) \
644 + (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
645 +
646 +#define RX_DELAY_INT_MASK GENMASK(15, 0)
647 +
648 +#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
649 +
650 +#define REG_INGRESS_TRTCM_CFG 0x0070
651 +#define INGRESS_TRTCM_EN_MASK BIT(31)
652 +#define INGRESS_TRTCM_MODE_MASK BIT(30)
653 +#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
654 +#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
655 +
656 +#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
657 +#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
658 +
659 +#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
660 +#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
661 +
662 +#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
663 +#define CNTR_EN_MASK BIT(31)
664 +#define CNTR_ALL_CHAN_EN_MASK BIT(30)
665 +#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
666 +#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
667 +#define CNTR_SRC_MASK GENMASK(27, 24)
668 +#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
669 +#define CNTR_CHAN_MASK GENMASK(7, 3)
670 +#define CNTR_QUEUE_MASK GENMASK(2, 0)
671 +
672 +#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
673 +
674 +#define REG_LMGR_INIT_CFG 0x1000
675 +#define LMGR_INIT_START BIT(31)
676 +#define LMGR_SRAM_MODE_MASK BIT(30)
677 +#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
678 +#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
679 +
680 +#define REG_FWD_DSCP_LOW_THR 0x1004
681 +#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
682 +
683 +#define REG_EGRESS_RATE_METER_CFG 0x100c
684 +#define EGRESS_RATE_METER_EN_MASK BIT(31)
685 +#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
686 +#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
687 +#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
688 +
689 +#define REG_EGRESS_TRTCM_CFG 0x1010
690 +#define EGRESS_TRTCM_EN_MASK BIT(31)
691 +#define EGRESS_TRTCM_MODE_MASK BIT(30)
692 +#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
693 +#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
694 +
695 +#define TRTCM_PARAM_RW_MASK BIT(31)
696 +#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
697 +#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
698 +#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
699 +#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
700 +#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
701 +
702 +#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
703 +#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
704 +#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
705 +
706 +#define REG_TXWRR_MODE_CFG 0x1020
707 +#define TWRR_WEIGHT_SCALE_MASK BIT(31)
708 +#define TWRR_WEIGHT_BASE_MASK BIT(3)
709 +
710 +#define REG_TXWRR_WEIGHT_CFG 0x1024
711 +#define TWRR_RW_CMD_MASK BIT(31)
712 +#define TWRR_RW_CMD_DONE BIT(30)
713 +#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
714 +#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
715 +#define TWRR_VALUE_MASK GENMASK(15, 0)
716 +
717 +#define REG_PSE_BUF_USAGE_CFG 0x1028
718 +#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
719 +
720 +#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
721 +#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
722 +
723 +#define REG_GLB_TRTCM_CFG 0x1080
724 +#define GLB_TRTCM_EN_MASK BIT(31)
725 +#define GLB_TRTCM_MODE_MASK BIT(30)
726 +#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
727 +#define GLB_FAST_TICK_MASK GENMASK(15, 0)
728 +
729 +#define REG_TXQ_CNGST_CFG 0x10a0
730 +#define TXQ_CNGST_DROP_EN BIT(31)
731 +#define TXQ_CNGST_DEI_DROP_EN BIT(30)
732 +
733 +#define REG_SLA_TRTCM_CFG 0x1150
734 +#define SLA_TRTCM_EN_MASK BIT(31)
735 +#define SLA_TRTCM_MODE_MASK BIT(30)
736 +#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
737 +#define SLA_FAST_TICK_MASK GENMASK(15, 0)
738 +
739 +/* CTRL */
740 +#define QDMA_DESC_DONE_MASK BIT(31)
741 +#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
742 +#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
743 +#define QDMA_DESC_DEI_MASK BIT(25)
744 +#define QDMA_DESC_NO_DROP_MASK BIT(24)
745 +#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
746 +/* DATA */
747 +#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
748 +/* TX MSG0 */
749 +#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
750 +#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
751 +#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
752 +#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
753 +#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
754 +#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
755 +#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
756 +#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
757 +#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
758 +#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
759 +/* TX MSG1 */
760 +#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
761 +#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
762 +#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
763 +#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
764 +#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
765 +#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
766 +#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
767 +#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
768 +#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
769 +
770 +/* RX MSG1 */
771 +#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
772 +#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
773 +#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
774 +#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
775 +#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
776 +#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
777 +#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
778 +#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
779 +#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
780 +
781 +struct airoha_qdma_desc {
782 + __le32 rsv;
783 + __le32 ctrl;
784 + __le32 addr;
785 + __le32 data;
786 + __le32 msg0;
787 + __le32 msg1;
788 + __le32 msg2;
789 + __le32 msg3;
790 +};
791 +
792 +/* CTRL0 */
793 +#define QDMA_FWD_DESC_CTX_MASK BIT(31)
794 +#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
795 +#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
796 +#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
797 +/* CTRL1 */
798 +#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
799 +/* CTRL2 */
800 +#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
801 +
802 +struct airoha_qdma_fwd_desc {
803 + __le32 addr;
804 + __le32 ctrl0;
805 + __le32 ctrl1;
806 + __le32 ctrl2;
807 + __le32 msg0;
808 + __le32 msg1;
809 + __le32 rsv0;
810 + __le32 rsv1;
811 +};
812 +
813 +enum {
814 + QDMA_INT_REG_IDX0,
815 + QDMA_INT_REG_IDX1,
816 + QDMA_INT_REG_IDX2,
817 + QDMA_INT_REG_IDX3,
818 + QDMA_INT_REG_IDX4,
819 + QDMA_INT_REG_MAX
820 +};
821 +
822 +enum {
823 + XSI_PCIE0_PORT,
824 + XSI_PCIE1_PORT,
825 + XSI_USB_PORT,
826 + XSI_AE_PORT,
827 + XSI_ETH_PORT,
828 +};
829 +
830 +enum {
831 + XSI_PCIE0_VIP_PORT_MASK = BIT(22),
832 + XSI_PCIE1_VIP_PORT_MASK = BIT(23),
833 + XSI_USB_VIP_PORT_MASK = BIT(25),
834 + XSI_ETH_VIP_PORT_MASK = BIT(24),
835 +};
836 +
837 +enum {
838 + DEV_STATE_INITIALIZED,
839 +};
840 +
841 +enum {
842 + CDM_CRSN_QSEL_Q1 = 1,
843 + CDM_CRSN_QSEL_Q5 = 5,
844 + CDM_CRSN_QSEL_Q6 = 6,
845 + CDM_CRSN_QSEL_Q15 = 15,
846 +};
847 +
848 +enum {
849 + CRSN_08 = 0x8,
850 + CRSN_21 = 0x15, /* KA */
851 + CRSN_22 = 0x16, /* hit bind and force route to CPU */
852 + CRSN_24 = 0x18,
853 + CRSN_25 = 0x19,
854 +};
855 +
856 +enum {
857 + FE_PSE_PORT_CDM1,
858 + FE_PSE_PORT_GDM1,
859 + FE_PSE_PORT_GDM2,
860 + FE_PSE_PORT_GDM3,
861 + FE_PSE_PORT_PPE1,
862 + FE_PSE_PORT_CDM2,
863 + FE_PSE_PORT_CDM3,
864 + FE_PSE_PORT_CDM4,
865 + FE_PSE_PORT_PPE2,
866 + FE_PSE_PORT_GDM4,
867 + FE_PSE_PORT_CDM5,
868 + FE_PSE_PORT_DROP = 0xf,
869 +};
870 +
871 +enum tx_sched_mode {
872 + TC_SCH_WRR8,
873 + TC_SCH_SP,
874 + TC_SCH_WRR7,
875 + TC_SCH_WRR6,
876 + TC_SCH_WRR5,
877 + TC_SCH_WRR4,
878 + TC_SCH_WRR3,
879 + TC_SCH_WRR2,
880 +};
881 +
882 +enum trtcm_param_type {
883 + TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
884 + TRTCM_TOKEN_RATE_MODE,
885 + TRTCM_BUCKETSIZE_SHIFT_MODE,
886 + TRTCM_BUCKET_COUNTER_MODE,
887 +};
888 +
889 +enum trtcm_mode_type {
890 + TRTCM_COMMIT_MODE,
891 + TRTCM_PEAK_MODE,
892 +};
893 +
894 +enum trtcm_param {
895 + TRTCM_TICK_SEL = BIT(0),
896 + TRTCM_PKT_MODE = BIT(1),
897 + TRTCM_METER_MODE = BIT(2),
898 +};
899 +
900 +#define MIN_TOKEN_SIZE 4096
901 +#define MAX_TOKEN_SIZE_OFFSET 17
902 +#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
903 +#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
904 +
905 +struct airoha_queue_entry {
906 + union {
907 + void *buf;
908 + struct sk_buff *skb;
909 + };
910 + dma_addr_t dma_addr;
911 + u16 dma_len;
912 +};
913 +
914 +struct airoha_queue {
915 + struct airoha_qdma *qdma;
916 +
917 + /* protect concurrent queue accesses */
918 + spinlock_t lock;
919 + struct airoha_queue_entry *entry;
920 + struct airoha_qdma_desc *desc;
921 + u16 head;
922 + u16 tail;
923 +
924 + int queued;
925 + int ndesc;
926 + int free_thr;
927 + int buf_size;
928 +
929 + struct napi_struct napi;
930 + struct page_pool *page_pool;
931 +};
932 +
933 +struct airoha_tx_irq_queue {
934 + struct airoha_qdma *qdma;
935 +
936 + struct napi_struct napi;
937 +
938 + int size;
939 + u32 *q;
940 +};
941 +
942 +struct airoha_hw_stats {
943 + /* protect concurrent hw_stats accesses */
944 + spinlock_t lock;
945 + struct u64_stats_sync syncp;
946 +
947 + /* get_stats64 */
948 + u64 rx_ok_pkts;
949 + u64 tx_ok_pkts;
950 + u64 rx_ok_bytes;
951 + u64 tx_ok_bytes;
952 + u64 rx_multicast;
953 + u64 rx_errors;
954 + u64 rx_drops;
955 + u64 tx_drops;
956 + u64 rx_crc_error;
957 + u64 rx_over_errors;
958 + /* ethtool stats */
959 + u64 tx_broadcast;
960 + u64 tx_multicast;
961 + u64 tx_len[7];
962 + u64 rx_broadcast;
963 + u64 rx_fragment;
964 + u64 rx_jabber;
965 + u64 rx_len[7];
966 +};
967 +
968 +struct airoha_qdma {
969 + struct airoha_eth *eth;
970 + void __iomem *regs;
971 +
972 + /* protect concurrent irqmask accesses */
973 + spinlock_t irq_lock;
974 + u32 irqmask[QDMA_INT_REG_MAX];
975 + int irq;
976 +
977 + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
978 +
979 + struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
980 + struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
981 +
982 + /* descriptor and packet buffers for qdma hw forward */
983 + struct {
984 + void *desc;
985 + void *q;
986 + } hfwd;
987 +};
988 +
989 +struct airoha_gdm_port {
990 + struct airoha_qdma *qdma;
991 + struct net_device *dev;
992 + int id;
993 +
994 + struct airoha_hw_stats stats;
995 +
996 + DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
997 +
998 + /* qos stats counters */
999 + u64 cpu_tx_packets;
1000 + u64 fwd_tx_packets;
1001 +};
1002 +
1003 +struct airoha_eth {
1004 + struct device *dev;
1005 +
1006 + unsigned long state;
1007 + void __iomem *fe_regs;
1008 +
1009 + struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
1010 + struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
1011 +
1012 + struct net_device *napi_dev;
1013 +
1014 + struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
1015 + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
1016 +};
1017 +
1018 +static u32 airoha_rr(void __iomem *base, u32 offset)
1019 +{
1020 + return readl(base + offset);
1021 +}
1022 +
1023 +static void airoha_wr(void __iomem *base, u32 offset, u32 val)
1024 +{
1025 + writel(val, base + offset);
1026 +}
1027 +
1028 +static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
1029 +{
1030 + val |= (airoha_rr(base, offset) & ~mask);
1031 + airoha_wr(base, offset, val);
1032 +
1033 + return val;
1034 +}
1035 +
1036 +#define airoha_fe_rr(eth, offset) \
1037 + airoha_rr((eth)->fe_regs, (offset))
1038 +#define airoha_fe_wr(eth, offset, val) \
1039 + airoha_wr((eth)->fe_regs, (offset), (val))
1040 +#define airoha_fe_rmw(eth, offset, mask, val) \
1041 + airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
1042 +#define airoha_fe_set(eth, offset, val) \
1043 + airoha_rmw((eth)->fe_regs, (offset), 0, (val))
1044 +#define airoha_fe_clear(eth, offset, val) \
1045 + airoha_rmw((eth)->fe_regs, (offset), (val), 0)
1046 +
1047 +#define airoha_qdma_rr(qdma, offset) \
1048 + airoha_rr((qdma)->regs, (offset))
1049 +#define airoha_qdma_wr(qdma, offset, val) \
1050 + airoha_wr((qdma)->regs, (offset), (val))
1051 +#define airoha_qdma_rmw(qdma, offset, mask, val) \
1052 + airoha_rmw((qdma)->regs, (offset), (mask), (val))
1053 +#define airoha_qdma_set(qdma, offset, val) \
1054 + airoha_rmw((qdma)->regs, (offset), 0, (val))
1055 +#define airoha_qdma_clear(qdma, offset, val) \
1056 + airoha_rmw((qdma)->regs, (offset), (val), 0)
1057 +
1058 +static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
1059 + u32 clear, u32 set)
1060 +{
1061 + unsigned long flags;
1062 +
1063 + if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
1064 + return;
1065 +
1066 + spin_lock_irqsave(&qdma->irq_lock, flags);
1067 +
1068 + qdma->irqmask[index] &= ~clear;
1069 + qdma->irqmask[index] |= set;
1070 + airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
1071 + /* Read irq_enable register in order to guarantee the update above
1072 + * completes in the spinlock critical section.
1073 + */
1074 + airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
1075 +
1076 + spin_unlock_irqrestore(&qdma->irq_lock, flags);
1077 +}
1078 +
1079 +static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
1080 + u32 mask)
1081 +{
1082 + airoha_qdma_set_irqmask(qdma, index, 0, mask);
1083 +}
1084 +
1085 +static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
1086 + u32 mask)
1087 +{
1088 + airoha_qdma_set_irqmask(qdma, index, mask, 0);
1089 +}
1090 +
1091 +static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
1092 +{
1093 + /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
1094 + * GDM{2,3,4} can be used as wan port connected to an external
1095 + * phy module.
1096 + */
1097 + return port->id == 1;
1098 +}
1099 +
1100 +static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
1101 +{
1102 + struct airoha_eth *eth = port->qdma->eth;
1103 + u32 val, reg;
1104 +
1105 + reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
1106 + : REG_FE_WAN_MAC_H;
1107 + val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
1108 + airoha_fe_wr(eth, reg, val);
1109 +
1110 + val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
1111 + airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
1112 + airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
1113 +}
1114 +
1115 +static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
1116 + u32 val)
1117 +{
1118 + airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
1119 + FIELD_PREP(GDM_OCFQ_MASK, val));
1120 + airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
1121 + FIELD_PREP(GDM_MCFQ_MASK, val));
1122 + airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
1123 + FIELD_PREP(GDM_BCFQ_MASK, val));
1124 + airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
1125 + FIELD_PREP(GDM_UCFQ_MASK, val));
1126 +}
1127 +
1128 +static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
1129 +{
1130 + u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
1131 + u32 vip_port, cfg_addr;
1132 +
1133 + switch (port) {
1134 + case XSI_PCIE0_PORT:
1135 + vip_port = XSI_PCIE0_VIP_PORT_MASK;
1136 + cfg_addr = REG_GDM_FWD_CFG(3);
1137 + break;
1138 + case XSI_PCIE1_PORT:
1139 + vip_port = XSI_PCIE1_VIP_PORT_MASK;
1140 + cfg_addr = REG_GDM_FWD_CFG(3);
1141 + break;
1142 + case XSI_USB_PORT:
1143 + vip_port = XSI_USB_VIP_PORT_MASK;
1144 + cfg_addr = REG_GDM_FWD_CFG(4);
1145 + break;
1146 + case XSI_ETH_PORT:
1147 + vip_port = XSI_ETH_VIP_PORT_MASK;
1148 + cfg_addr = REG_GDM_FWD_CFG(4);
1149 + break;
1150 + default:
1151 + return -EINVAL;
1152 + }
1153 +
1154 + if (enable) {
1155 + airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
1156 + airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
1157 + } else {
1158 + airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
1159 + airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
1160 + }
1161 +
1162 + airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
1163 +
1164 + return 0;
1165 +}
1166 +
1167 +static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
1168 +{
1169 + const int port_list[] = {
1170 + XSI_PCIE0_PORT,
1171 + XSI_PCIE1_PORT,
1172 + XSI_USB_PORT,
1173 + XSI_ETH_PORT
1174 + };
1175 + int i, err;
1176 +
1177 + for (i = 0; i < ARRAY_SIZE(port_list); i++) {
1178 + err = airoha_set_gdm_port(eth, port_list[i], enable);
1179 + if (err)
1180 + goto error;
1181 + }
1182 +
1183 + return 0;
1184 +
1185 +error:
1186 + for (i--; i >= 0; i--)
1187 + airoha_set_gdm_port(eth, port_list[i], false);
1188 +
1189 + return err;
1190 +}
1191 +
1192 +static void airoha_fe_maccr_init(struct airoha_eth *eth)
1193 +{
1194 + int p;
1195 +
1196 + for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
1197 + airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
1198 + GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
1199 + GDM_DROP_CRC_ERR);
1200 + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
1201 + FE_PSE_PORT_CDM1);
1202 + airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
1203 + GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1204 + FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1205 + FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
1206 + }
1207 +
1208 + airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
1209 + FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
1210 +
1211 + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
1212 +}
1213 +
1214 +static void airoha_fe_vip_setup(struct airoha_eth *eth)
1215 +{
1216 + airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
1217 + airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
1218 +
1219 + airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
1220 + airoha_fe_wr(eth, REG_FE_VIP_EN(4),
1221 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1222 + PATN_EN_MASK);
1223 +
1224 + airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
1225 + airoha_fe_wr(eth, REG_FE_VIP_EN(6),
1226 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1227 + PATN_EN_MASK);
1228 +
1229 + airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
1230 + airoha_fe_wr(eth, REG_FE_VIP_EN(7),
1231 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1232 + PATN_EN_MASK);
1233 +
1234 + /* BOOTP (0x43) */
1235 + airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
1236 + airoha_fe_wr(eth, REG_FE_VIP_EN(8),
1237 + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1238 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1239 +
1240 + /* BOOTP (0x44) */
1241 + airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
1242 + airoha_fe_wr(eth, REG_FE_VIP_EN(9),
1243 + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1244 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1245 +
1246 + /* ISAKMP */
1247 + airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
1248 + airoha_fe_wr(eth, REG_FE_VIP_EN(10),
1249 + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1250 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1251 +
1252 + airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
1253 + airoha_fe_wr(eth, REG_FE_VIP_EN(11),
1254 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1255 + PATN_EN_MASK);
1256 +
1257 + /* DHCPv6 */
1258 + airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
1259 + airoha_fe_wr(eth, REG_FE_VIP_EN(12),
1260 + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1261 + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1262 +
1263 + airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
1264 + airoha_fe_wr(eth, REG_FE_VIP_EN(19),
1265 + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1266 + PATN_EN_MASK);
1267 +
1268 + /* ETH->ETH_P_1905 (0x893a) */
1269 + airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
1270 + airoha_fe_wr(eth, REG_FE_VIP_EN(20),
1271 + PATN_FCPU_EN_MASK | PATN_EN_MASK);
1272 +
1273 + airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
1274 + airoha_fe_wr(eth, REG_FE_VIP_EN(21),
1275 + PATN_FCPU_EN_MASK | PATN_EN_MASK);
1276 +}
1277 +
1278 +static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
1279 + u32 port, u32 queue)
1280 +{
1281 + u32 val;
1282 +
1283 + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1284 + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
1285 + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1286 + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
1287 + val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
1288 +
1289 + return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
1290 +}
1291 +
1292 +static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
1293 + u32 port, u32 queue, u32 val)
1294 +{
1295 + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
1296 + FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
1297 + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1298 + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
1299 + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
1300 + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1301 + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
1302 + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
1303 +}
1304 +
1305 +static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
1306 +{
1307 + u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
1308 +
1309 + return FIELD_GET(PSE_ALLRSV_MASK, val);
1310 +}
1311 +
1312 +static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
1313 + u32 port, u32 queue, u32 val)
1314 +{
1315 + u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
1316 + u32 tmp, all_rsv, fq_limit;
1317 +
1318 + airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
1319 +
1320 + /* modify all rsv */
1321 + all_rsv = airoha_fe_get_pse_all_rsv(eth);
1322 + all_rsv += (val - orig_val);
1323 + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
1324 + FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
1325 +
1326 + /* modify hthd */
1327 + tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
1328 + fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
1329 + tmp = fq_limit - all_rsv - 0x20;
1330 + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1331 + PSE_SHARE_USED_HTHD_MASK,
1332 + FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
1333 +
1334 + tmp = fq_limit - all_rsv - 0x100;
1335 + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1336 + PSE_SHARE_USED_MTHD_MASK,
1337 + FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
1338 + tmp = (3 * tmp) >> 2;
1339 + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
1340 + PSE_SHARE_USED_LTHD_MASK,
1341 + FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
1342 +
1343 + return 0;
1344 +}
1345 +
1346 +static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
1347 +{
1348 + const u32 pse_port_num_queues[] = {
1349 + [FE_PSE_PORT_CDM1] = 6,
1350 + [FE_PSE_PORT_GDM1] = 6,
1351 + [FE_PSE_PORT_GDM2] = 32,
1352 + [FE_PSE_PORT_GDM3] = 6,
1353 + [FE_PSE_PORT_PPE1] = 4,
1354 + [FE_PSE_PORT_CDM2] = 6,
1355 + [FE_PSE_PORT_CDM3] = 8,
1356 + [FE_PSE_PORT_CDM4] = 10,
1357 + [FE_PSE_PORT_PPE2] = 4,
1358 + [FE_PSE_PORT_GDM4] = 2,
1359 + [FE_PSE_PORT_CDM5] = 2,
1360 + };
1361 + u32 all_rsv;
1362 + int q;
1363 +
1364 + all_rsv = airoha_fe_get_pse_all_rsv(eth);
1365 + /* hw misses PPE2 oq rsv */
1366 + all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
1367 + airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
1368 +
1369 + /* CMD1 */
1370 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
1371 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
1372 + PSE_QUEUE_RSV_PAGES);
1373 + /* GMD1 */
1374 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
1375 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
1376 + PSE_QUEUE_RSV_PAGES);
1377 + /* GMD2 */
1378 + for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
1379 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
1380 + /* GMD3 */
1381 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
1382 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
1383 + PSE_QUEUE_RSV_PAGES);
1384 + /* PPE1 */
1385 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
1386 + if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
1387 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
1388 + PSE_QUEUE_RSV_PAGES);
1389 + else
1390 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
1391 + }
1392 + /* CDM2 */
1393 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
1394 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
1395 + PSE_QUEUE_RSV_PAGES);
1396 + /* CDM3 */
1397 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
1398 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
1399 + /* CDM4 */
1400 + for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
1401 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
1402 + PSE_QUEUE_RSV_PAGES);
1403 + /* PPE2 */
1404 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
1405 + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
1406 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
1407 + PSE_QUEUE_RSV_PAGES);
1408 + else
1409 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
1410 + }
1411 + /* GMD4 */
1412 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
1413 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
1414 + PSE_QUEUE_RSV_PAGES);
1415 + /* CDM5 */
1416 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
1417 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
1418 + PSE_QUEUE_RSV_PAGES);
1419 +}
1420 +
1421 +static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
1422 +{
1423 + int i;
1424 +
1425 + for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
1426 + int err, j;
1427 + u32 val;
1428 +
1429 + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1430 +
1431 + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1432 + MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
1433 + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1434 + err = read_poll_timeout(airoha_fe_rr, val,
1435 + val & MC_VLAN_CFG_CMD_DONE_MASK,
1436 + USEC_PER_MSEC, 5 * USEC_PER_MSEC,
1437 + false, eth, REG_MC_VLAN_CFG);
1438 + if (err)
1439 + return err;
1440 +
1441 + for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
1442 + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1443 +
1444 + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1445 + FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
1446 + MC_VLAN_CFG_RW_MASK;
1447 + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1448 + err = read_poll_timeout(airoha_fe_rr, val,
1449 + val & MC_VLAN_CFG_CMD_DONE_MASK,
1450 + USEC_PER_MSEC,
1451 + 5 * USEC_PER_MSEC, false, eth,
1452 + REG_MC_VLAN_CFG);
1453 + if (err)
1454 + return err;
1455 + }
1456 + }
1457 +
1458 + return 0;
1459 +}
1460 +
1461 +static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
1462 +{
1463 + /* CDM1_CRSN_QSEL */
1464 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
1465 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1466 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1467 + CDM_CRSN_QSEL_Q1));
1468 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
1469 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1470 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1471 + CDM_CRSN_QSEL_Q1));
1472 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
1473 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1474 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1475 + CDM_CRSN_QSEL_Q1));
1476 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
1477 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1478 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1479 + CDM_CRSN_QSEL_Q6));
1480 + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
1481 + CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1482 + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1483 + CDM_CRSN_QSEL_Q1));
1484 + /* CDM2_CRSN_QSEL */
1485 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
1486 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1487 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1488 + CDM_CRSN_QSEL_Q1));
1489 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
1490 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1491 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1492 + CDM_CRSN_QSEL_Q1));
1493 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
1494 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1495 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1496 + CDM_CRSN_QSEL_Q1));
1497 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
1498 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1499 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1500 + CDM_CRSN_QSEL_Q6));
1501 + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
1502 + CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1503 + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1504 + CDM_CRSN_QSEL_Q1));
1505 +}
1506 +
1507 +static int airoha_fe_init(struct airoha_eth *eth)
1508 +{
1509 + airoha_fe_maccr_init(eth);
1510 +
1511 + /* PSE IQ reserve */
1512 + airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
1513 + FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
1514 + airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
1515 + PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
1516 + FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
1517 + FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
1518 +
1519 + /* enable FE copy engine for MC/KA/DPI */
1520 + airoha_fe_wr(eth, REG_FE_PCE_CFG,
1521 + PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
1522 + /* set vip queue selection to ring 1 */
1523 + airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
1524 + FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
1525 + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
1526 + FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
1527 + /* set GDM4 source interface offset to 8 */
1528 + airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
1529 + GDM4_SPORT_OFF2_MASK |
1530 + GDM4_SPORT_OFF1_MASK |
1531 + GDM4_SPORT_OFF0_MASK,
1532 + FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
1533 + FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
1534 + FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
1535 +
1536 + /* set PSE Page as 128B */
1537 + airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
1538 + FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
1539 + FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
1540 + FE_DMA_GLO_PG_SZ_MASK);
1541 + airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
1542 + FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
1543 + FE_RST_GDM4_MBI_ARB_MASK);
1544 + usleep_range(1000, 2000);
1545 +
1546 + /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
1547 + * connect other rings to PSE Port0 OQ-0
1548 + */
1549 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
1550 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
1551 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
1552 + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
1553 +
1554 + airoha_fe_vip_setup(eth);
1555 + airoha_fe_pse_ports_init(eth);
1556 +
1557 + airoha_fe_set(eth, REG_GDM_MISC_CFG,
1558 + GDM2_RDM_ACK_WAIT_PREF_MASK |
1559 + GDM2_CHN_VLD_MODE_MASK);
1560 + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
1561 + FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
1562 +
1563 + /* init fragment and assemble Force Port */
1564 + /* NPU Core-3, NPU Bridge Channel-3 */
1565 + airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1566 + IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
1567 + FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
1568 + FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
1569 + /* QDMA LAN, RX Ring-22 */
1570 + airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1571 + IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
1572 + FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
1573 + FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
1574 +
1575 + airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
1576 + airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
1577 +
1578 + airoha_fe_crsn_qsel_init(eth);
1579 +
1580 + airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
1581 + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
1582 +
1583 + /* default aging mode for mbi unlock issue */
1584 + airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
1585 + MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
1586 + FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
1587 + FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
1588 +
1589 + /* disable IFC by default */
1590 + airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
1591 +
1592 + /* enable 1:N vlan action, init vlan table */
1593 + airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
1594 +
1595 + return airoha_fe_mc_vlan_clear(eth);
1596 +}
1597 +
1598 +static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
1599 +{
1600 + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1601 + struct airoha_qdma *qdma = q->qdma;
1602 + struct airoha_eth *eth = qdma->eth;
1603 + int qid = q - &qdma->q_rx[0];
1604 + int nframes = 0;
1605 +
1606 + while (q->queued < q->ndesc - 1) {
1607 + struct airoha_queue_entry *e = &q->entry[q->head];
1608 + struct airoha_qdma_desc *desc = &q->desc[q->head];
1609 + struct page *page;
1610 + int offset;
1611 + u32 val;
1612 +
1613 + page = page_pool_dev_alloc_frag(q->page_pool, &offset,
1614 + q->buf_size);
1615 + if (!page)
1616 + break;
1617 +
1618 + q->head = (q->head + 1) % q->ndesc;
1619 + q->queued++;
1620 + nframes++;
1621 +
1622 + e->buf = page_address(page) + offset;
1623 + e->dma_addr = page_pool_get_dma_addr(page) + offset;
1624 + e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
1625 +
1626 + dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
1627 + dir);
1628 +
1629 + val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
1630 + WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
1631 + WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
1632 + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
1633 + WRITE_ONCE(desc->data, cpu_to_le32(val));
1634 + WRITE_ONCE(desc->msg0, 0);
1635 + WRITE_ONCE(desc->msg1, 0);
1636 + WRITE_ONCE(desc->msg2, 0);
1637 + WRITE_ONCE(desc->msg3, 0);
1638 +
1639 + airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
1640 + RX_RING_CPU_IDX_MASK,
1641 + FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
1642 + }
1643 +
1644 + return nframes;
1645 +}
1646 +
1647 +static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
1648 + struct airoha_qdma_desc *desc)
1649 +{
1650 + u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
1651 +
1652 + sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
1653 + switch (sport) {
1654 + case 0x10 ... 0x13:
1655 + port = 0;
1656 + break;
1657 + case 0x2 ... 0x4:
1658 + port = sport - 1;
1659 + break;
1660 + default:
1661 + return -EINVAL;
1662 + }
1663 +
1664 + return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
1665 +}
1666 +
1667 +static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
1668 +{
1669 + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1670 + struct airoha_qdma *qdma = q->qdma;
1671 + struct airoha_eth *eth = qdma->eth;
1672 + int qid = q - &qdma->q_rx[0];
1673 + int done = 0;
1674 +
1675 + while (done < budget) {
1676 + struct airoha_queue_entry *e = &q->entry[q->tail];
1677 + struct airoha_qdma_desc *desc = &q->desc[q->tail];
1678 + dma_addr_t dma_addr = le32_to_cpu(desc->addr);
1679 + u32 desc_ctrl = le32_to_cpu(desc->ctrl);
1680 + struct sk_buff *skb;
1681 + int len, p;
1682 +
1683 + if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
1684 + break;
1685 +
1686 + if (!dma_addr)
1687 + break;
1688 +
1689 + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
1690 + if (!len)
1691 + break;
1692 +
1693 + q->tail = (q->tail + 1) % q->ndesc;
1694 + q->queued--;
1695 +
1696 + dma_sync_single_for_cpu(eth->dev, dma_addr,
1697 + SKB_WITH_OVERHEAD(q->buf_size), dir);
1698 +
1699 + p = airoha_qdma_get_gdm_port(eth, desc);
1700 + if (p < 0 || !eth->ports[p]) {
1701 + page_pool_put_full_page(q->page_pool,
1702 + virt_to_head_page(e->buf),
1703 + true);
1704 + continue;
1705 + }
1706 +
1707 + skb = napi_build_skb(e->buf, q->buf_size);
1708 + if (!skb) {
1709 + page_pool_put_full_page(q->page_pool,
1710 + virt_to_head_page(e->buf),
1711 + true);
1712 + break;
1713 + }
1714 +
1715 + skb_reserve(skb, 2);
1716 + __skb_put(skb, len);
1717 + skb_mark_for_recycle(skb);
1718 + skb->dev = eth->ports[p]->dev;
1719 + skb->protocol = eth_type_trans(skb, skb->dev);
1720 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1721 + skb_record_rx_queue(skb, qid);
1722 + napi_gro_receive(&q->napi, skb);
1723 +
1724 + done++;
1725 + }
1726 + airoha_qdma_fill_rx_queue(q);
1727 +
1728 + return done;
1729 +}
1730 +
1731 +static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
1732 +{
1733 + struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
1734 + int cur, done = 0;
1735 +
1736 + do {
1737 + cur = airoha_qdma_rx_process(q, budget - done);
1738 + done += cur;
1739 + } while (cur && done < budget);
1740 +
1741 + if (done < budget && napi_complete(napi))
1742 + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
1743 + RX_DONE_INT_MASK);
1744 +
1745 + return done;
1746 +}
1747 +
1748 +static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
1749 + struct airoha_qdma *qdma, int ndesc)
1750 +{
1751 + const struct page_pool_params pp_params = {
1752 + .order = 0,
1753 + .pool_size = 256,
1754 + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
1755 + PP_FLAG_PAGE_FRAG,
1756 + .dma_dir = DMA_FROM_DEVICE,
1757 + .max_len = PAGE_SIZE,
1758 + .nid = NUMA_NO_NODE,
1759 + .dev = qdma->eth->dev,
1760 + .napi = &q->napi,
1761 + };
1762 + struct airoha_eth *eth = qdma->eth;
1763 + int qid = q - &qdma->q_rx[0], thr;
1764 + dma_addr_t dma_addr;
1765 +
1766 + q->buf_size = PAGE_SIZE / 2;
1767 + q->ndesc = ndesc;
1768 + q->qdma = qdma;
1769 +
1770 + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1771 + GFP_KERNEL);
1772 + if (!q->entry)
1773 + return -ENOMEM;
1774 +
1775 + q->page_pool = page_pool_create(&pp_params);
1776 + if (IS_ERR(q->page_pool)) {
1777 + int err = PTR_ERR(q->page_pool);
1778 +
1779 + q->page_pool = NULL;
1780 + return err;
1781 + }
1782 +
1783 + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1784 + &dma_addr, GFP_KERNEL);
1785 + if (!q->desc)
1786 + return -ENOMEM;
1787 +
1788 + netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
1789 +
1790 + airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
1791 + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
1792 + RX_RING_SIZE_MASK,
1793 + FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
1794 +
1795 + thr = clamp(ndesc >> 3, 1, 32);
1796 + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
1797 + FIELD_PREP(RX_RING_THR_MASK, thr));
1798 + airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
1799 + FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
1800 +
1801 + airoha_qdma_fill_rx_queue(q);
1802 +
1803 + return 0;
1804 +}
1805 +
1806 +static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
1807 +{
1808 + struct airoha_eth *eth = q->qdma->eth;
1809 +
1810 + while (q->queued) {
1811 + struct airoha_queue_entry *e = &q->entry[q->tail];
1812 + struct page *page = virt_to_head_page(e->buf);
1813 +
1814 + dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
1815 + page_pool_get_dma_dir(q->page_pool));
1816 + page_pool_put_full_page(q->page_pool, page, false);
1817 + q->tail = (q->tail + 1) % q->ndesc;
1818 + q->queued--;
1819 + }
1820 +}
1821 +
1822 +static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
1823 +{
1824 + int i;
1825 +
1826 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1827 + int err;
1828 +
1829 + if (!(RX_DONE_INT_MASK & BIT(i))) {
1830 + /* rx-queue not binded to irq */
1831 + continue;
1832 + }
1833 +
1834 + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
1835 + RX_DSCP_NUM(i));
1836 + if (err)
1837 + return err;
1838 + }
1839 +
1840 + return 0;
1841 +}
1842 +
1843 +static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
1844 +{
1845 + struct airoha_tx_irq_queue *irq_q;
1846 + int id, done = 0, irq_queued;
1847 + struct airoha_qdma *qdma;
1848 + struct airoha_eth *eth;
1849 + u32 status, head;
1850 +
1851 + irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
1852 + qdma = irq_q->qdma;
1853 + id = irq_q - &qdma->q_tx_irq[0];
1854 + eth = qdma->eth;
1855 +
1856 + status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
1857 + head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
1858 + head = head % irq_q->size;
1859 + irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
1860 +
1861 + while (irq_queued > 0 && done < budget) {
1862 + u32 qid, val = irq_q->q[head];
1863 + struct airoha_qdma_desc *desc;
1864 + struct airoha_queue_entry *e;
1865 + struct airoha_queue *q;
1866 + u32 index, desc_ctrl;
1867 + struct sk_buff *skb;
1868 +
1869 + if (val == 0xff)
1870 + break;
1871 +
1872 + irq_q->q[head] = 0xff; /* mark as done */
1873 + head = (head + 1) % irq_q->size;
1874 + irq_queued--;
1875 + done++;
1876 +
1877 + qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
1878 + if (qid >= ARRAY_SIZE(qdma->q_tx))
1879 + continue;
1880 +
1881 + q = &qdma->q_tx[qid];
1882 + if (!q->ndesc)
1883 + continue;
1884 +
1885 + index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
1886 + if (index >= q->ndesc)
1887 + continue;
1888 +
1889 + spin_lock_bh(&q->lock);
1890 +
1891 + if (!q->queued)
1892 + goto unlock;
1893 +
1894 + desc = &q->desc[index];
1895 + desc_ctrl = le32_to_cpu(desc->ctrl);
1896 +
1897 + if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
1898 + !(desc_ctrl & QDMA_DESC_DROP_MASK))
1899 + goto unlock;
1900 +
1901 + e = &q->entry[index];
1902 + skb = e->skb;
1903 +
1904 + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1905 + DMA_TO_DEVICE);
1906 + memset(e, 0, sizeof(*e));
1907 + WRITE_ONCE(desc->msg0, 0);
1908 + WRITE_ONCE(desc->msg1, 0);
1909 + q->queued--;
1910 +
1911 + /* completion ring can report out-of-order indexes if hw QoS
1912 + * is enabled and packets with different priority are queued
1913 + * to same DMA ring. Take into account possible out-of-order
1914 + * reports incrementing DMA ring tail pointer
1915 + */
1916 + while (q->tail != q->head && !q->entry[q->tail].dma_addr)
1917 + q->tail = (q->tail + 1) % q->ndesc;
1918 +
1919 + if (skb) {
1920 + u16 queue = skb_get_queue_mapping(skb);
1921 + struct netdev_queue *txq;
1922 +
1923 + txq = netdev_get_tx_queue(skb->dev, queue);
1924 + netdev_tx_completed_queue(txq, 1, skb->len);
1925 + if (netif_tx_queue_stopped(txq) &&
1926 + q->ndesc - q->queued >= q->free_thr)
1927 + netif_tx_wake_queue(txq);
1928 +
1929 + dev_kfree_skb_any(skb);
1930 + }
1931 +unlock:
1932 + spin_unlock_bh(&q->lock);
1933 + }
1934 +
1935 + if (done) {
1936 + int i, len = done >> 7;
1937 +
1938 + for (i = 0; i < len; i++)
1939 + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1940 + IRQ_CLEAR_LEN_MASK, 0x80);
1941 + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1942 + IRQ_CLEAR_LEN_MASK, (done & 0x7f));
1943 + }
1944 +
1945 + if (done < budget && napi_complete(napi))
1946 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
1947 + TX_DONE_INT_MASK(id));
1948 +
1949 + return done;
1950 +}
1951 +
1952 +static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
1953 + struct airoha_qdma *qdma, int size)
1954 +{
1955 + struct airoha_eth *eth = qdma->eth;
1956 + int i, qid = q - &qdma->q_tx[0];
1957 + dma_addr_t dma_addr;
1958 +
1959 + spin_lock_init(&q->lock);
1960 + q->ndesc = size;
1961 + q->qdma = qdma;
1962 + q->free_thr = 1 + MAX_SKB_FRAGS;
1963 +
1964 + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1965 + GFP_KERNEL);
1966 + if (!q->entry)
1967 + return -ENOMEM;
1968 +
1969 + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1970 + &dma_addr, GFP_KERNEL);
1971 + if (!q->desc)
1972 + return -ENOMEM;
1973 +
1974 + for (i = 0; i < q->ndesc; i++) {
1975 + u32 val;
1976 +
1977 + val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
1978 + WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
1979 + }
1980 +
1981 + /* xmit ring drop default setting */
1982 + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
1983 + TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
1984 +
1985 + airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
1986 + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
1987 + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
1988 + airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
1989 + FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
1990 +
1991 + return 0;
1992 +}
1993 +
1994 +static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
1995 + struct airoha_qdma *qdma, int size)
1996 +{
1997 + int id = irq_q - &qdma->q_tx_irq[0];
1998 + struct airoha_eth *eth = qdma->eth;
1999 + dma_addr_t dma_addr;
2000 +
2001 + netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
2002 + airoha_qdma_tx_napi_poll);
2003 + irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
2004 + &dma_addr, GFP_KERNEL);
2005 + if (!irq_q->q)
2006 + return -ENOMEM;
2007 +
2008 + memset(irq_q->q, 0xff, size * sizeof(u32));
2009 + irq_q->size = size;
2010 + irq_q->qdma = qdma;
2011 +
2012 + airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
2013 + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
2014 + FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
2015 + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
2016 + FIELD_PREP(TX_IRQ_THR_MASK, 1));
2017 +
2018 + return 0;
2019 +}
2020 +
2021 +static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
2022 +{
2023 + int i, err;
2024 +
2025 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2026 + err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
2027 + IRQ_QUEUE_LEN(i));
2028 + if (err)
2029 + return err;
2030 + }
2031 +
2032 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2033 + err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
2034 + TX_DSCP_NUM);
2035 + if (err)
2036 + return err;
2037 + }
2038 +
2039 + return 0;
2040 +}
2041 +
2042 +static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
2043 +{
2044 + struct airoha_eth *eth = q->qdma->eth;
2045 +
2046 + spin_lock_bh(&q->lock);
2047 + while (q->queued) {
2048 + struct airoha_queue_entry *e = &q->entry[q->tail];
2049 +
2050 + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
2051 + DMA_TO_DEVICE);
2052 + dev_kfree_skb_any(e->skb);
2053 + e->skb = NULL;
2054 +
2055 + q->tail = (q->tail + 1) % q->ndesc;
2056 + q->queued--;
2057 + }
2058 + spin_unlock_bh(&q->lock);
2059 +}
2060 +
2061 +static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
2062 +{
2063 + struct airoha_eth *eth = qdma->eth;
2064 + dma_addr_t dma_addr;
2065 + u32 status;
2066 + int size;
2067 +
2068 + size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
2069 + qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
2070 + GFP_KERNEL);
2071 + if (!qdma->hfwd.desc)
2072 + return -ENOMEM;
2073 +
2074 + airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
2075 +
2076 + size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
2077 + qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
2078 + GFP_KERNEL);
2079 + if (!qdma->hfwd.q)
2080 + return -ENOMEM;
2081 +
2082 + airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
2083 +
2084 + airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
2085 + HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
2086 + FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
2087 + airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
2088 + FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
2089 + airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
2090 + LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
2091 + HW_FWD_DESC_NUM_MASK,
2092 + FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
2093 + LMGR_INIT_START);
2094 +
2095 + return read_poll_timeout(airoha_qdma_rr, status,
2096 + !(status & LMGR_INIT_START), USEC_PER_MSEC,
2097 + 30 * USEC_PER_MSEC, true, qdma,
2098 + REG_LMGR_INIT_CFG);
2099 +}
2100 +
2101 +static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
2102 +{
2103 + airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
2104 + airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
2105 +
2106 + airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
2107 + PSE_BUF_ESTIMATE_EN_MASK);
2108 +
2109 + airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
2110 + EGRESS_RATE_METER_EN_MASK |
2111 + EGRESS_RATE_METER_EQ_RATE_EN_MASK);
2112 + /* 2047us x 31 = 63.457ms */
2113 + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
2114 + EGRESS_RATE_METER_WINDOW_SZ_MASK,
2115 + FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
2116 + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
2117 + EGRESS_RATE_METER_TIMESLICE_MASK,
2118 + FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
2119 +
2120 + /* ratelimit init */
2121 + airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
2122 + /* fast-tick 25us */
2123 + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
2124 + FIELD_PREP(GLB_FAST_TICK_MASK, 25));
2125 + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
2126 + FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
2127 +
2128 + airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
2129 + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
2130 + FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
2131 + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
2132 + EGRESS_SLOW_TICK_RATIO_MASK,
2133 + FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
2134 +
2135 + airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
2136 + airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
2137 + INGRESS_TRTCM_MODE_MASK);
2138 + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
2139 + FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
2140 + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
2141 + INGRESS_SLOW_TICK_RATIO_MASK,
2142 + FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
2143 +
2144 + airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
2145 + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
2146 + FIELD_PREP(SLA_FAST_TICK_MASK, 25));
2147 + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
2148 + FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
2149 +}
2150 +
2151 +static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
2152 +{
2153 + int i;
2154 +
2155 + for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
2156 + /* Tx-cpu transferred count */
2157 + airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
2158 + airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
2159 + CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
2160 + CNTR_ALL_DSCP_RING_EN_MASK |
2161 + FIELD_PREP(CNTR_CHAN_MASK, i));
2162 + /* Tx-fwd transferred count */
2163 + airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
2164 + airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
2165 + CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
2166 + CNTR_ALL_DSCP_RING_EN_MASK |
2167 + FIELD_PREP(CNTR_SRC_MASK, 1) |
2168 + FIELD_PREP(CNTR_CHAN_MASK, i));
2169 + }
2170 +}
2171 +
2172 +static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
2173 +{
2174 + int i;
2175 +
2176 + /* clear pending irqs */
2177 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
2178 + airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
2179 +
2180 + /* setup irqs */
2181 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
2182 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
2183 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
2184 +
2185 + /* setup irq binding */
2186 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2187 + if (!qdma->q_tx[i].ndesc)
2188 + continue;
2189 +
2190 + if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
2191 + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
2192 + TX_RING_IRQ_BLOCKING_CFG_MASK);
2193 + else
2194 + airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
2195 + TX_RING_IRQ_BLOCKING_CFG_MASK);
2196 + }
2197 +
2198 + airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
2199 + GLOBAL_CFG_RX_2B_OFFSET_MASK |
2200 + FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
2201 + GLOBAL_CFG_CPU_TXR_RR_MASK |
2202 + GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
2203 + GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
2204 + GLOBAL_CFG_MULTICAST_EN_MASK |
2205 + GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
2206 + GLOBAL_CFG_TX_WB_DONE_MASK |
2207 + FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
2208 +
2209 + airoha_qdma_init_qos(qdma);
2210 +
2211 + /* disable qdma rx delay interrupt */
2212 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2213 + if (!qdma->q_rx[i].ndesc)
2214 + continue;
2215 +
2216 + airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
2217 + RX_DELAY_INT_MASK);
2218 + }
2219 +
2220 + airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
2221 + TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
2222 + airoha_qdma_init_qos_stats(qdma);
2223 +
2224 + return 0;
2225 +}
2226 +
2227 +static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
2228 +{
2229 + struct airoha_qdma *qdma = dev_instance;
2230 + u32 intr[ARRAY_SIZE(qdma->irqmask)];
2231 + int i;
2232 +
2233 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
2234 + intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
2235 + intr[i] &= qdma->irqmask[i];
2236 + airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
2237 + }
2238 +
2239 + if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
2240 + return IRQ_NONE;
2241 +
2242 + if (intr[1] & RX_DONE_INT_MASK) {
2243 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
2244 + RX_DONE_INT_MASK);
2245 +
2246 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2247 + if (!qdma->q_rx[i].ndesc)
2248 + continue;
2249 +
2250 + if (intr[1] & BIT(i))
2251 + napi_schedule(&qdma->q_rx[i].napi);
2252 + }
2253 + }
2254 +
2255 + if (intr[0] & INT_TX_MASK) {
2256 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2257 + if (!(intr[0] & TX_DONE_INT_MASK(i)))
2258 + continue;
2259 +
2260 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
2261 + TX_DONE_INT_MASK(i));
2262 + napi_schedule(&qdma->q_tx_irq[i].napi);
2263 + }
2264 + }
2265 +
2266 + return IRQ_HANDLED;
2267 +}
2268 +
2269 +static int airoha_qdma_init(struct platform_device *pdev,
2270 + struct airoha_eth *eth,
2271 + struct airoha_qdma *qdma)
2272 +{
2273 + int err, id = qdma - &eth->qdma[0];
2274 + const char *res;
2275 +
2276 + spin_lock_init(&qdma->irq_lock);
2277 + qdma->eth = eth;
2278 +
2279 + res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
2280 + if (!res)
2281 + return -ENOMEM;
2282 +
2283 + qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
2284 + if (IS_ERR(qdma->regs))
2285 + return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
2286 + "failed to iomap qdma%d regs\n", id);
2287 +
2288 + qdma->irq = platform_get_irq(pdev, 4 * id);
2289 + if (qdma->irq < 0)
2290 + return qdma->irq;
2291 +
2292 + err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
2293 + IRQF_SHARED, KBUILD_MODNAME, qdma);
2294 + if (err)
2295 + return err;
2296 +
2297 + err = airoha_qdma_init_rx(qdma);
2298 + if (err)
2299 + return err;
2300 +
2301 + err = airoha_qdma_init_tx(qdma);
2302 + if (err)
2303 + return err;
2304 +
2305 + err = airoha_qdma_init_hfwd_queues(qdma);
2306 + if (err)
2307 + return err;
2308 +
2309 + return airoha_qdma_hw_init(qdma);
2310 +}
2311 +
2312 +static int airoha_hw_init(struct platform_device *pdev,
2313 + struct airoha_eth *eth)
2314 +{
2315 + int err, i;
2316 +
2317 + /* disable xsi */
2318 + err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
2319 + eth->xsi_rsts);
2320 + if (err)
2321 + return err;
2322 +
2323 + err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
2324 + if (err)
2325 + return err;
2326 +
2327 + msleep(20);
2328 + err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
2329 + if (err)
2330 + return err;
2331 +
2332 + msleep(20);
2333 + err = airoha_fe_init(eth);
2334 + if (err)
2335 + return err;
2336 +
2337 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
2338 + err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
2339 + if (err)
2340 + return err;
2341 + }
2342 +
2343 + set_bit(DEV_STATE_INITIALIZED, &eth->state);
2344 +
2345 + return 0;
2346 +}
2347 +
2348 +static void airoha_hw_cleanup(struct airoha_qdma *qdma)
2349 +{
2350 + int i;
2351 +
2352 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2353 + if (!qdma->q_rx[i].ndesc)
2354 + continue;
2355 +
2356 + netif_napi_del(&qdma->q_rx[i].napi);
2357 + airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
2358 + if (qdma->q_rx[i].page_pool)
2359 + page_pool_destroy(qdma->q_rx[i].page_pool);
2360 + }
2361 +
2362 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2363 + netif_napi_del(&qdma->q_tx_irq[i].napi);
2364 +
2365 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2366 + if (!qdma->q_tx[i].ndesc)
2367 + continue;
2368 +
2369 + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
2370 + }
2371 +}
2372 +
2373 +static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
2374 +{
2375 + int i;
2376 +
2377 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2378 + napi_enable(&qdma->q_tx_irq[i].napi);
2379 +
2380 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2381 + if (!qdma->q_rx[i].ndesc)
2382 + continue;
2383 +
2384 + napi_enable(&qdma->q_rx[i].napi);
2385 + }
2386 +}
2387 +
2388 +static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
2389 +{
2390 + int i;
2391 +
2392 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2393 + napi_disable(&qdma->q_tx_irq[i].napi);
2394 +
2395 + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2396 + if (!qdma->q_rx[i].ndesc)
2397 + continue;
2398 +
2399 + napi_disable(&qdma->q_rx[i].napi);
2400 + }
2401 +}
2402 +
2403 +static void airoha_update_hw_stats(struct airoha_gdm_port *port)
2404 +{
2405 + struct airoha_eth *eth = port->qdma->eth;
2406 + u32 val, i = 0;
2407 +
2408 + spin_lock(&port->stats.lock);
2409 + u64_stats_update_begin(&port->stats.syncp);
2410 +
2411 + /* TX */
2412 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
2413 + port->stats.tx_ok_pkts += ((u64)val << 32);
2414 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
2415 + port->stats.tx_ok_pkts += val;
2416 +
2417 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
2418 + port->stats.tx_ok_bytes += ((u64)val << 32);
2419 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
2420 + port->stats.tx_ok_bytes += val;
2421 +
2422 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
2423 + port->stats.tx_drops += val;
2424 +
2425 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
2426 + port->stats.tx_broadcast += val;
2427 +
2428 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
2429 + port->stats.tx_multicast += val;
2430 +
2431 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
2432 + port->stats.tx_len[i] += val;
2433 +
2434 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
2435 + port->stats.tx_len[i] += ((u64)val << 32);
2436 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
2437 + port->stats.tx_len[i++] += val;
2438 +
2439 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
2440 + port->stats.tx_len[i] += ((u64)val << 32);
2441 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
2442 + port->stats.tx_len[i++] += val;
2443 +
2444 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
2445 + port->stats.tx_len[i] += ((u64)val << 32);
2446 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
2447 + port->stats.tx_len[i++] += val;
2448 +
2449 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
2450 + port->stats.tx_len[i] += ((u64)val << 32);
2451 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
2452 + port->stats.tx_len[i++] += val;
2453 +
2454 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
2455 + port->stats.tx_len[i] += ((u64)val << 32);
2456 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
2457 + port->stats.tx_len[i++] += val;
2458 +
2459 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
2460 + port->stats.tx_len[i] += ((u64)val << 32);
2461 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
2462 + port->stats.tx_len[i++] += val;
2463 +
2464 + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
2465 + port->stats.tx_len[i++] += val;
2466 +
2467 + /* RX */
2468 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
2469 + port->stats.rx_ok_pkts += ((u64)val << 32);
2470 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
2471 + port->stats.rx_ok_pkts += val;
2472 +
2473 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
2474 + port->stats.rx_ok_bytes += ((u64)val << 32);
2475 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
2476 + port->stats.rx_ok_bytes += val;
2477 +
2478 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
2479 + port->stats.rx_drops += val;
2480 +
2481 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
2482 + port->stats.rx_broadcast += val;
2483 +
2484 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
2485 + port->stats.rx_multicast += val;
2486 +
2487 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
2488 + port->stats.rx_errors += val;
2489 +
2490 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
2491 + port->stats.rx_crc_error += val;
2492 +
2493 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
2494 + port->stats.rx_over_errors += val;
2495 +
2496 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
2497 + port->stats.rx_fragment += val;
2498 +
2499 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
2500 + port->stats.rx_jabber += val;
2501 +
2502 + i = 0;
2503 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
2504 + port->stats.rx_len[i] += val;
2505 +
2506 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
2507 + port->stats.rx_len[i] += ((u64)val << 32);
2508 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
2509 + port->stats.rx_len[i++] += val;
2510 +
2511 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
2512 + port->stats.rx_len[i] += ((u64)val << 32);
2513 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
2514 + port->stats.rx_len[i++] += val;
2515 +
2516 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
2517 + port->stats.rx_len[i] += ((u64)val << 32);
2518 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
2519 + port->stats.rx_len[i++] += val;
2520 +
2521 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
2522 + port->stats.rx_len[i] += ((u64)val << 32);
2523 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
2524 + port->stats.rx_len[i++] += val;
2525 +
2526 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
2527 + port->stats.rx_len[i] += ((u64)val << 32);
2528 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
2529 + port->stats.rx_len[i++] += val;
2530 +
2531 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
2532 + port->stats.rx_len[i] += ((u64)val << 32);
2533 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
2534 + port->stats.rx_len[i++] += val;
2535 +
2536 + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
2537 + port->stats.rx_len[i++] += val;
2538 +
2539 + /* reset mib counters */
2540 + airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
2541 + FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
2542 +
2543 + u64_stats_update_end(&port->stats.syncp);
2544 + spin_unlock(&port->stats.lock);
2545 +}
2546 +
2547 +static int airoha_dev_open(struct net_device *dev)
2548 +{
2549 + struct airoha_gdm_port *port = netdev_priv(dev);
2550 + struct airoha_qdma *qdma = port->qdma;
2551 + int err;
2552 +
2553 + netif_tx_start_all_queues(dev);
2554 + err = airoha_set_gdm_ports(qdma->eth, true);
2555 + if (err)
2556 + return err;
2557 +
2558 + if (netdev_uses_dsa(dev))
2559 + airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2560 + GDM_STAG_EN_MASK);
2561 + else
2562 + airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2563 + GDM_STAG_EN_MASK);
2564 +
2565 + airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
2566 + GLOBAL_CFG_TX_DMA_EN_MASK |
2567 + GLOBAL_CFG_RX_DMA_EN_MASK);
2568 +
2569 + return 0;
2570 +}
2571 +
2572 +static int airoha_dev_stop(struct net_device *dev)
2573 +{
2574 + struct airoha_gdm_port *port = netdev_priv(dev);
2575 + struct airoha_qdma *qdma = port->qdma;
2576 + int i, err;
2577 +
2578 + netif_tx_disable(dev);
2579 + err = airoha_set_gdm_ports(qdma->eth, false);
2580 + if (err)
2581 + return err;
2582 +
2583 + airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
2584 + GLOBAL_CFG_TX_DMA_EN_MASK |
2585 + GLOBAL_CFG_RX_DMA_EN_MASK);
2586 +
2587 + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2588 + if (!qdma->q_tx[i].ndesc)
2589 + continue;
2590 +
2591 + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
2592 + netdev_tx_reset_subqueue(dev, i);
2593 + }
2594 +
2595 + return 0;
2596 +}
2597 +
2598 +static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
2599 +{
2600 + struct airoha_gdm_port *port = netdev_priv(dev);
2601 + int err;
2602 +
2603 + err = eth_mac_addr(dev, p);
2604 + if (err)
2605 + return err;
2606 +
2607 + airoha_set_macaddr(port, dev->dev_addr);
2608 +
2609 + return 0;
2610 +}
2611 +
2612 +static int airoha_dev_init(struct net_device *dev)
2613 +{
2614 + struct airoha_gdm_port *port = netdev_priv(dev);
2615 +
2616 + airoha_set_macaddr(port, dev->dev_addr);
2617 +
2618 + return 0;
2619 +}
2620 +
2621 +static void airoha_dev_get_stats64(struct net_device *dev,
2622 + struct rtnl_link_stats64 *storage)
2623 +{
2624 + struct airoha_gdm_port *port = netdev_priv(dev);
2625 + unsigned int start;
2626 +
2627 + airoha_update_hw_stats(port);
2628 + do {
2629 + start = u64_stats_fetch_begin(&port->stats.syncp);
2630 + storage->rx_packets = port->stats.rx_ok_pkts;
2631 + storage->tx_packets = port->stats.tx_ok_pkts;
2632 + storage->rx_bytes = port->stats.rx_ok_bytes;
2633 + storage->tx_bytes = port->stats.tx_ok_bytes;
2634 + storage->multicast = port->stats.rx_multicast;
2635 + storage->rx_errors = port->stats.rx_errors;
2636 + storage->rx_dropped = port->stats.rx_drops;
2637 + storage->tx_dropped = port->stats.tx_drops;
2638 + storage->rx_crc_errors = port->stats.rx_crc_error;
2639 + storage->rx_over_errors = port->stats.rx_over_errors;
2640 + } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2641 +}
2642 +
2643 +static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
2644 + struct net_device *sb_dev)
2645 +{
2646 + struct airoha_gdm_port *port = netdev_priv(dev);
2647 + int queue, channel;
2648 +
2649 + /* For dsa device select QoS channel according to the dsa user port
2650 + * index, rely on port id otherwise. Select QoS queue based on the
2651 + * skb priority.
2652 + */
2653 + channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
2654 + channel = channel % AIROHA_NUM_QOS_CHANNELS;
2655 + queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
2656 + queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
2657 +
2658 + return queue < dev->num_tx_queues ? queue : 0;
2659 +}
2660 +
2661 +static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
2662 + struct net_device *dev)
2663 +{
2664 + struct airoha_gdm_port *port = netdev_priv(dev);
2665 + u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
2666 + u32 msg0, msg1, len = skb_headlen(skb);
2667 + struct airoha_qdma *qdma = port->qdma;
2668 + struct netdev_queue *txq;
2669 + struct airoha_queue *q;
2670 + void *data = skb->data;
2671 + int i, qid;
2672 + u16 index;
2673 + u8 fport;
2674 +
2675 + qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
2676 + msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
2677 + qid / AIROHA_NUM_QOS_QUEUES) |
2678 + FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
2679 + qid % AIROHA_NUM_QOS_QUEUES);
2680 + if (skb->ip_summed == CHECKSUM_PARTIAL)
2681 + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
2682 + FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
2683 + FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
2684 +
2685 + /* TSO: fill MSS info in tcp checksum field */
2686 + if (skb_is_gso(skb)) {
2687 + if (skb_cow_head(skb, 0))
2688 + goto error;
2689 +
2690 + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
2691 + SKB_GSO_TCPV6)) {
2692 + __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
2693 +
2694 + tcp_hdr(skb)->check = (__force __sum16)csum;
2695 + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
2696 + }
2697 + }
2698 +
2699 + fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
2700 + msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
2701 + FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
2702 +
2703 + q = &qdma->q_tx[qid];
2704 + if (WARN_ON_ONCE(!q->ndesc))
2705 + goto error;
2706 +
2707 + spin_lock_bh(&q->lock);
2708 +
2709 + txq = netdev_get_tx_queue(dev, qid);
2710 + if (q->queued + nr_frags > q->ndesc) {
2711 + /* not enough space in the queue */
2712 + netif_tx_stop_queue(txq);
2713 + spin_unlock_bh(&q->lock);
2714 + return NETDEV_TX_BUSY;
2715 + }
2716 +
2717 + index = q->head;
2718 + for (i = 0; i < nr_frags; i++) {
2719 + struct airoha_qdma_desc *desc = &q->desc[index];
2720 + struct airoha_queue_entry *e = &q->entry[index];
2721 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2722 + dma_addr_t addr;
2723 + u32 val;
2724 +
2725 + addr = dma_map_single(dev->dev.parent, data, len,
2726 + DMA_TO_DEVICE);
2727 + if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
2728 + goto error_unmap;
2729 +
2730 + index = (index + 1) % q->ndesc;
2731 +
2732 + val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
2733 + if (i < nr_frags - 1)
2734 + val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
2735 + WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
2736 + WRITE_ONCE(desc->addr, cpu_to_le32(addr));
2737 + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
2738 + WRITE_ONCE(desc->data, cpu_to_le32(val));
2739 + WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
2740 + WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
2741 + WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
2742 +
2743 + e->skb = i ? NULL : skb;
2744 + e->dma_addr = addr;
2745 + e->dma_len = len;
2746 +
2747 + data = skb_frag_address(frag);
2748 + len = skb_frag_size(frag);
2749 + }
2750 +
2751 + q->head = index;
2752 + q->queued += i;
2753 +
2754 + skb_tx_timestamp(skb);
2755 + netdev_tx_sent_queue(txq, skb->len);
2756 +
2757 + if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2758 + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
2759 + TX_RING_CPU_IDX_MASK,
2760 + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
2761 +
2762 + if (q->ndesc - q->queued < q->free_thr)
2763 + netif_tx_stop_queue(txq);
2764 +
2765 + spin_unlock_bh(&q->lock);
2766 +
2767 + return NETDEV_TX_OK;
2768 +
2769 +error_unmap:
2770 + for (i--; i >= 0; i--) {
2771 + index = (q->head + i) % q->ndesc;
2772 + dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
2773 + q->entry[index].dma_len, DMA_TO_DEVICE);
2774 + }
2775 +
2776 + spin_unlock_bh(&q->lock);
2777 +error:
2778 + dev_kfree_skb_any(skb);
2779 + dev->stats.tx_dropped++;
2780 +
2781 + return NETDEV_TX_OK;
2782 +}
2783 +
2784 +static void airoha_ethtool_get_drvinfo(struct net_device *dev,
2785 + struct ethtool_drvinfo *info)
2786 +{
2787 + struct airoha_gdm_port *port = netdev_priv(dev);
2788 + struct airoha_eth *eth = port->qdma->eth;
2789 +
2790 + strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
2791 + strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
2792 +}
2793 +
2794 +static void airoha_ethtool_get_mac_stats(struct net_device *dev,
2795 + struct ethtool_eth_mac_stats *stats)
2796 +{
2797 + struct airoha_gdm_port *port = netdev_priv(dev);
2798 + unsigned int start;
2799 +
2800 + airoha_update_hw_stats(port);
2801 + do {
2802 + start = u64_stats_fetch_begin(&port->stats.syncp);
2803 + stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
2804 + stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
2805 + stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
2806 + } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2807 +}
2808 +
2809 +static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
2810 + { 0, 64 },
2811 + { 65, 127 },
2812 + { 128, 255 },
2813 + { 256, 511 },
2814 + { 512, 1023 },
2815 + { 1024, 1518 },
2816 + { 1519, 10239 },
2817 + {},
2818 +};
2819 +
2820 +static void
2821 +airoha_ethtool_get_rmon_stats(struct net_device *dev,
2822 + struct ethtool_rmon_stats *stats,
2823 + const struct ethtool_rmon_hist_range **ranges)
2824 +{
2825 + struct airoha_gdm_port *port = netdev_priv(dev);
2826 + struct airoha_hw_stats *hw_stats = &port->stats;
2827 + unsigned int start;
2828 +
2829 + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2830 + ARRAY_SIZE(hw_stats->tx_len) + 1);
2831 + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2832 + ARRAY_SIZE(hw_stats->rx_len) + 1);
2833 +
2834 + *ranges = airoha_ethtool_rmon_ranges;
2835 + airoha_update_hw_stats(port);
2836 + do {
2837 + int i;
2838 +
2839 + start = u64_stats_fetch_begin(&port->stats.syncp);
2840 + stats->fragments = hw_stats->rx_fragment;
2841 + stats->jabbers = hw_stats->rx_jabber;
2842 + for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
2843 + i++) {
2844 + stats->hist[i] = hw_stats->rx_len[i];
2845 + stats->hist_tx[i] = hw_stats->tx_len[i];
2846 + }
2847 + } while (u64_stats_fetch_retry(&port->stats.syncp, start));
2848 +}
2849 +
2850 +static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
2851 + int channel, enum tx_sched_mode mode,
2852 + const u16 *weights, u8 n_weights)
2853 +{
2854 + int i;
2855 +
2856 + for (i = 0; i < AIROHA_NUM_TX_RING; i++)
2857 + airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
2858 + TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
2859 +
2860 + for (i = 0; i < n_weights; i++) {
2861 + u32 status;
2862 + int err;
2863 +
2864 + airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
2865 + TWRR_RW_CMD_MASK |
2866 + FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
2867 + FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
2868 + FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
2869 + err = read_poll_timeout(airoha_qdma_rr, status,
2870 + status & TWRR_RW_CMD_DONE,
2871 + USEC_PER_MSEC, 10 * USEC_PER_MSEC,
2872 + true, port->qdma,
2873 + REG_TXWRR_WEIGHT_CFG);
2874 + if (err)
2875 + return err;
2876 + }
2877 +
2878 + airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
2879 + CHAN_QOS_MODE_MASK(channel),
2880 + mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
2881 +
2882 + return 0;
2883 +}
2884 +
2885 +static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
2886 + int channel)
2887 +{
2888 + static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2889 +
2890 + return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
2891 + ARRAY_SIZE(w));
2892 +}
2893 +
2894 +static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
2895 + int channel,
2896 + struct tc_ets_qopt_offload *opt)
2897 +{
2898 + struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
2899 + enum tx_sched_mode mode = TC_SCH_SP;
2900 + u16 w[AIROHA_NUM_QOS_QUEUES] = {};
2901 + int i, nstrict = 0, nwrr, qidx;
2902 +
2903 + if (p->bands > AIROHA_NUM_QOS_QUEUES)
2904 + return -EINVAL;
2905 +
2906 + for (i = 0; i < p->bands; i++) {
2907 + if (!p->quanta[i])
2908 + nstrict++;
2909 + }
2910 +
2911 + /* this configuration is not supported by the hw */
2912 + if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
2913 + return -EINVAL;
2914 +
2915 + /* EN7581 SoC supports fixed QoS band priority where WRR queues have
2916 + * lowest priorities with respect to SP ones.
2917 + * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
2918 + */
2919 + nwrr = p->bands - nstrict;
2920 + qidx = nstrict && nwrr ? nstrict : 0;
2921 + for (i = 1; i <= p->bands; i++) {
2922 + if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
2923 + return -EINVAL;
2924 +
2925 + qidx = i == nwrr ? 0 : qidx + 1;
2926 + }
2927 +
2928 + for (i = 0; i < nwrr; i++)
2929 + w[i] = p->weights[nstrict + i];
2930 +
2931 + if (!nstrict)
2932 + mode = TC_SCH_WRR8;
2933 + else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
2934 + mode = nstrict + 1;
2935 +
2936 + return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
2937 + ARRAY_SIZE(w));
2938 +}
2939 +
2940 +static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
2941 + int channel,
2942 + struct tc_ets_qopt_offload *opt)
2943 +{
2944 + u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
2945 + REG_CNTR_VAL(channel << 1));
2946 + u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
2947 + REG_CNTR_VAL((channel << 1) + 1));
2948 + u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
2949 + (fwd_tx_packets - port->fwd_tx_packets);
2950 + _bstats_update(opt->stats.bstats, 0, tx_packets);
2951 +
2952 + port->cpu_tx_packets = cpu_tx_packets;
2953 + port->fwd_tx_packets = fwd_tx_packets;
2954 +
2955 + return 0;
2956 +}
2957 +
2958 +static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
2959 + struct tc_ets_qopt_offload *opt)
2960 +{
2961 + int channel = TC_H_MAJ(opt->handle) >> 16;
2962 +
2963 + if (opt->parent == TC_H_ROOT)
2964 + return -EINVAL;
2965 +
2966 + switch (opt->command) {
2967 + case TC_ETS_REPLACE:
2968 + return airoha_qdma_set_tx_ets_sched(port, channel, opt);
2969 + case TC_ETS_DESTROY:
2970 + /* PRIO is default qdisc scheduler */
2971 + return airoha_qdma_set_tx_prio_sched(port, channel);
2972 + case TC_ETS_STATS:
2973 + return airoha_qdma_get_tx_ets_stats(port, channel, opt);
2974 + default:
2975 + return -EOPNOTSUPP;
2976 + }
2977 +}
2978 +
2979 +static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
2980 + u32 addr, enum trtcm_param_type param,
2981 + enum trtcm_mode_type mode,
2982 + u32 *val_low, u32 *val_high)
2983 +{
2984 + u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
2985 + u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
2986 + FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
2987 + FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
2988 + FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
2989 +
2990 + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
2991 + if (read_poll_timeout(airoha_qdma_rr, val,
2992 + val & TRTCM_PARAM_RW_DONE_MASK,
2993 + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
2994 + qdma, REG_TRTCM_CFG_PARAM(addr)))
2995 + return -ETIMEDOUT;
2996 +
2997 + *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
2998 + if (val_high)
2999 + *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
3000 +
3001 + return 0;
3002 +}
3003 +
3004 +static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
3005 + u32 addr, enum trtcm_param_type param,
3006 + enum trtcm_mode_type mode, u32 val)
3007 +{
3008 + u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
3009 + u32 config = TRTCM_PARAM_RW_MASK |
3010 + FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
3011 + FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
3012 + FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
3013 + FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
3014 +
3015 + airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
3016 + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
3017 +
3018 + return read_poll_timeout(airoha_qdma_rr, val,
3019 + val & TRTCM_PARAM_RW_DONE_MASK,
3020 + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
3021 + qdma, REG_TRTCM_CFG_PARAM(addr));
3022 +}
3023 +
3024 +static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
3025 + u32 addr, enum trtcm_mode_type mode,
3026 + bool enable, u32 enable_mask)
3027 +{
3028 + u32 val;
3029 +
3030 + if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
3031 + mode, &val, NULL))
3032 + return -EINVAL;
3033 +
3034 + val = enable ? val | enable_mask : val & ~enable_mask;
3035 +
3036 + return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
3037 + mode, val);
3038 +}
3039 +
3040 +static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
3041 + int channel, u32 addr,
3042 + enum trtcm_mode_type mode,
3043 + u32 rate_val, u32 bucket_size)
3044 +{
3045 + u32 val, config, tick, unit, rate, rate_frac;
3046 + int err;
3047 +
3048 + if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
3049 + mode, &config, NULL))
3050 + return -EINVAL;
3051 +
3052 + val = airoha_qdma_rr(qdma, addr);
3053 + tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
3054 + if (config & TRTCM_TICK_SEL)
3055 + tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
3056 + if (!tick)
3057 + return -EINVAL;
3058 +
3059 + unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
3060 + if (!unit)
3061 + return -EINVAL;
3062 +
3063 + rate = rate_val / unit;
3064 + rate_frac = rate_val % unit;
3065 + rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
3066 + rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
3067 + FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
3068 +
3069 + err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
3070 + TRTCM_TOKEN_RATE_MODE, mode, rate);
3071 + if (err)
3072 + return err;
3073 +
3074 + val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
3075 + val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
3076 +
3077 + return airoha_qdma_set_trtcm_param(qdma, channel, addr,
3078 + TRTCM_BUCKETSIZE_SHIFT_MODE,
3079 + mode, val);
3080 +}
3081 +
3082 +static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
3083 + int channel, u32 rate,
3084 + u32 bucket_size)
3085 +{
3086 + int i, err;
3087 +
3088 + for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
3089 + err = airoha_qdma_set_trtcm_config(port->qdma, channel,
3090 + REG_EGRESS_TRTCM_CFG, i,
3091 + !!rate, TRTCM_METER_MODE);
3092 + if (err)
3093 + return err;
3094 +
3095 + err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
3096 + REG_EGRESS_TRTCM_CFG,
3097 + i, rate, bucket_size);
3098 + if (err)
3099 + return err;
3100 + }
3101 +
3102 + return 0;
3103 +}
3104 +
3105 +static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
3106 + struct tc_htb_qopt_offload *opt)
3107 +{
3108 + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
3109 + u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
3110 + struct net_device *dev = port->dev;
3111 + int num_tx_queues = dev->real_num_tx_queues;
3112 + int err;
3113 +
3114 + if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
3115 + NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
3116 + return -EINVAL;
3117 + }
3118 +
3119 + err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
3120 + if (err) {
3121 + NL_SET_ERR_MSG_MOD(opt->extack,
3122 + "failed configuring htb offload");
3123 + return err;
3124 + }
3125 +
3126 + if (opt->command == TC_HTB_NODE_MODIFY)
3127 + return 0;
3128 +
3129 + err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
3130 + if (err) {
3131 + airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
3132 + NL_SET_ERR_MSG_MOD(opt->extack,
3133 + "failed setting real_num_tx_queues");
3134 + return err;
3135 + }
3136 +
3137 + set_bit(channel, port->qos_sq_bmap);
3138 + opt->qid = AIROHA_NUM_TX_RING + channel;
3139 +
3140 + return 0;
3141 +}
3142 +
3143 +static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
3144 +{
3145 + struct net_device *dev = port->dev;
3146 +
3147 + netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
3148 + airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
3149 + clear_bit(queue, port->qos_sq_bmap);
3150 +}
3151 +
3152 +static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
3153 + struct tc_htb_qopt_offload *opt)
3154 +{
3155 + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
3156 +
3157 + if (!test_bit(channel, port->qos_sq_bmap)) {
3158 + NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
3159 + return -EINVAL;
3160 + }
3161 +
3162 + airoha_tc_remove_htb_queue(port, channel);
3163 +
3164 + return 0;
3165 +}
3166 +
3167 +static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
3168 +{
3169 + int q;
3170 +
3171 + for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
3172 + airoha_tc_remove_htb_queue(port, q);
3173 +
3174 + return 0;
3175 +}
3176 +
3177 +static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
3178 + struct tc_htb_qopt_offload *opt)
3179 +{
3180 + u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
3181 +
3182 + if (!test_bit(channel, port->qos_sq_bmap)) {
3183 + NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
3184 + return -EINVAL;
3185 + }
3186 +
3187 + opt->qid = channel;
3188 +
3189 + return 0;
3190 +}
3191 +
3192 +static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
3193 + struct tc_htb_qopt_offload *opt)
3194 +{
3195 + switch (opt->command) {
3196 + case TC_HTB_CREATE:
3197 + break;
3198 + case TC_HTB_DESTROY:
3199 + return airoha_tc_htb_destroy(port);
3200 + case TC_HTB_NODE_MODIFY:
3201 + case TC_HTB_LEAF_ALLOC_QUEUE:
3202 + return airoha_tc_htb_alloc_leaf_queue(port, opt);
3203 + case TC_HTB_LEAF_DEL:
3204 + case TC_HTB_LEAF_DEL_LAST:
3205 + case TC_HTB_LEAF_DEL_LAST_FORCE:
3206 + return airoha_tc_htb_delete_leaf_queue(port, opt);
3207 + case TC_HTB_LEAF_QUERY_QUEUE:
3208 + return airoha_tc_get_htb_get_leaf_queue(port, opt);
3209 + default:
3210 + return -EOPNOTSUPP;
3211 + }
3212 +
3213 + return 0;
3214 +}
3215 +
3216 +static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
3217 + void *type_data)
3218 +{
3219 + struct airoha_gdm_port *port = netdev_priv(dev);
3220 +
3221 + switch (type) {
3222 + case TC_SETUP_QDISC_ETS:
3223 + return airoha_tc_setup_qdisc_ets(port, type_data);
3224 + case TC_SETUP_QDISC_HTB:
3225 + return airoha_tc_setup_qdisc_htb(port, type_data);
3226 + default:
3227 + return -EOPNOTSUPP;
3228 + }
3229 +}
3230 +
3231 +static const struct net_device_ops airoha_netdev_ops = {
3232 + .ndo_init = airoha_dev_init,
3233 + .ndo_open = airoha_dev_open,
3234 + .ndo_stop = airoha_dev_stop,
3235 + .ndo_select_queue = airoha_dev_select_queue,
3236 + .ndo_start_xmit = airoha_dev_xmit,
3237 + .ndo_get_stats64 = airoha_dev_get_stats64,
3238 + .ndo_set_mac_address = airoha_dev_set_macaddr,
3239 + .ndo_setup_tc = airoha_dev_tc_setup,
3240 +};
3241 +
3242 +static const struct ethtool_ops airoha_ethtool_ops = {
3243 + .get_drvinfo = airoha_ethtool_get_drvinfo,
3244 + .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
3245 + .get_rmon_stats = airoha_ethtool_get_rmon_stats,
3246 +};
3247 +
3248 +static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
3249 +{
3250 + const __be32 *id_ptr = of_get_property(np, "reg", NULL);
3251 + struct airoha_gdm_port *port;
3252 + struct airoha_qdma *qdma;
3253 + struct net_device *dev;
3254 + int err, index;
3255 + u32 id;
3256 +
3257 + if (!id_ptr) {
3258 + dev_err(eth->dev, "missing gdm port id\n");
3259 + return -EINVAL;
3260 + }
3261 +
3262 + id = be32_to_cpup(id_ptr);
3263 + index = id - 1;
3264 +
3265 + if (!id || id > ARRAY_SIZE(eth->ports)) {
3266 + dev_err(eth->dev, "invalid gdm port id: %d\n", id);
3267 + return -EINVAL;
3268 + }
3269 +
3270 + if (eth->ports[index]) {
3271 + dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
3272 + return -EINVAL;
3273 + }
3274 +
3275 + dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
3276 + AIROHA_NUM_NETDEV_TX_RINGS,
3277 + AIROHA_NUM_RX_RING);
3278 + if (!dev) {
3279 + dev_err(eth->dev, "alloc_etherdev failed\n");
3280 + return -ENOMEM;
3281 + }
3282 +
3283 + qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
3284 + dev->netdev_ops = &airoha_netdev_ops;
3285 + dev->ethtool_ops = &airoha_ethtool_ops;
3286 + dev->max_mtu = AIROHA_MAX_MTU;
3287 + dev->watchdog_timeo = 5 * HZ;
3288 + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
3289 + NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
3290 + NETIF_F_SG | NETIF_F_TSO |
3291 + NETIF_F_HW_TC;
3292 + dev->features |= dev->hw_features;
3293 + dev->dev.of_node = np;
3294 + dev->irq = qdma->irq;
3295 + SET_NETDEV_DEV(dev, eth->dev);
3296 +
3297 + /* reserve hw queues for HTB offloading */
3298 + err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
3299 + if (err)
3300 + return err;
3301 +
3302 + err = of_get_ethdev_address(np, dev);
3303 + if (err) {
3304 + if (err == -EPROBE_DEFER)
3305 + return err;
3306 +
3307 + eth_hw_addr_random(dev);
3308 + dev_info(eth->dev, "generated random MAC address %pM\n",
3309 + dev->dev_addr);
3310 + }
3311 +
3312 + port = netdev_priv(dev);
3313 + u64_stats_init(&port->stats.syncp);
3314 + spin_lock_init(&port->stats.lock);
3315 + port->qdma = qdma;
3316 + port->dev = dev;
3317 + port->id = id;
3318 + eth->ports[index] = port;
3319 +
3320 + return register_netdev(dev);
3321 +}
3322 +
3323 +static int airoha_probe(struct platform_device *pdev)
3324 +{
3325 + struct device_node *np;
3326 + struct airoha_eth *eth;
3327 + int i, err;
3328 +
3329 + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3330 + if (!eth)
3331 + return -ENOMEM;
3332 +
3333 + eth->dev = &pdev->dev;
3334 +
3335 + err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
3336 + if (err) {
3337 + dev_err(eth->dev, "failed configuring DMA mask\n");
3338 + return err;
3339 + }
3340 +
3341 + eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
3342 + if (IS_ERR(eth->fe_regs))
3343 + return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
3344 + "failed to iomap fe regs\n");
3345 +
3346 + eth->rsts[0].id = "fe";
3347 + eth->rsts[1].id = "pdma";
3348 + eth->rsts[2].id = "qdma";
3349 + err = devm_reset_control_bulk_get_exclusive(eth->dev,
3350 + ARRAY_SIZE(eth->rsts),
3351 + eth->rsts);
3352 + if (err) {
3353 + dev_err(eth->dev, "failed to get bulk reset lines\n");
3354 + return err;
3355 + }
3356 +
3357 + eth->xsi_rsts[0].id = "xsi-mac";
3358 + eth->xsi_rsts[1].id = "hsi0-mac";
3359 + eth->xsi_rsts[2].id = "hsi1-mac";
3360 + eth->xsi_rsts[3].id = "hsi-mac";
3361 + eth->xsi_rsts[4].id = "xfp-mac";
3362 + err = devm_reset_control_bulk_get_exclusive(eth->dev,
3363 + ARRAY_SIZE(eth->xsi_rsts),
3364 + eth->xsi_rsts);
3365 + if (err) {
3366 + dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
3367 + return err;
3368 + }
3369 +
3370 + eth->napi_dev = alloc_netdev_dummy(0);
3371 + if (!eth->napi_dev)
3372 + return -ENOMEM;
3373 +
3374 + /* Enable threaded NAPI by default */
3375 + eth->napi_dev->threaded = true;
3376 + strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
3377 + platform_set_drvdata(pdev, eth);
3378 +
3379 + err = airoha_hw_init(pdev, eth);
3380 + if (err)
3381 + goto error_hw_cleanup;
3382 +
3383 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3384 + airoha_qdma_start_napi(&eth->qdma[i]);
3385 +
3386 + for_each_child_of_node(pdev->dev.of_node, np) {
3387 + if (!of_device_is_compatible(np, "airoha,eth-mac"))
3388 + continue;
3389 +
3390 + if (!of_device_is_available(np))
3391 + continue;
3392 +
3393 + err = airoha_alloc_gdm_port(eth, np);
3394 + if (err) {
3395 + of_node_put(np);
3396 + goto error_napi_stop;
3397 + }
3398 + }
3399 +
3400 + return 0;
3401 +
3402 +error_napi_stop:
3403 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3404 + airoha_qdma_stop_napi(&eth->qdma[i]);
3405 +error_hw_cleanup:
3406 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
3407 + airoha_hw_cleanup(&eth->qdma[i]);
3408 +
3409 + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3410 + struct airoha_gdm_port *port = eth->ports[i];
3411 +
3412 + if (port && port->dev->reg_state == NETREG_REGISTERED)
3413 + unregister_netdev(port->dev);
3414 + }
3415 + free_netdev(eth->napi_dev);
3416 + platform_set_drvdata(pdev, NULL);
3417 +
3418 + return err;
3419 +}
3420 +
3421 +static void airoha_remove(struct platform_device *pdev)
3422 +{
3423 + struct airoha_eth *eth = platform_get_drvdata(pdev);
3424 + int i;
3425 +
3426 + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
3427 + airoha_qdma_stop_napi(&eth->qdma[i]);
3428 + airoha_hw_cleanup(&eth->qdma[i]);
3429 + }
3430 +
3431 + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
3432 + struct airoha_gdm_port *port = eth->ports[i];
3433 +
3434 + if (!port)
3435 + continue;
3436 +
3437 + airoha_dev_stop(port->dev);
3438 + unregister_netdev(port->dev);
3439 + }
3440 + free_netdev(eth->napi_dev);
3441 +
3442 + platform_set_drvdata(pdev, NULL);
3443 +}
3444 +
3445 +static const struct of_device_id of_airoha_match[] = {
3446 + { .compatible = "airoha,en7581-eth" },
3447 + { /* sentinel */ }
3448 +};
3449 +MODULE_DEVICE_TABLE(of, of_airoha_match);
3450 +
3451 +static struct platform_driver airoha_driver = {
3452 + .probe = airoha_probe,
3453 + .remove_new = airoha_remove,
3454 + .driver = {
3455 + .name = KBUILD_MODNAME,
3456 + .of_match_table = of_airoha_match,
3457 + },
3458 +};
3459 +module_platform_driver(airoha_driver);
3460 +
3461 +MODULE_LICENSE("GPL");
3462 +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
3463 +MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
3464 --- a/drivers/net/ethernet/mediatek/airoha_eth.c
3465 +++ /dev/null
3466 @@ -1,3359 +0,0 @@
3467 -// SPDX-License-Identifier: GPL-2.0-only
3468 -/*
3469 - * Copyright (c) 2024 AIROHA Inc
3470 - * Author: Lorenzo Bianconi <lorenzo@kernel.org>
3471 - */
3472 -#include <linux/etherdevice.h>
3473 -#include <linux/iopoll.h>
3474 -#include <linux/kernel.h>
3475 -#include <linux/netdevice.h>
3476 -#include <linux/of.h>
3477 -#include <linux/of_net.h>
3478 -#include <linux/platform_device.h>
3479 -#include <linux/reset.h>
3480 -#include <linux/tcp.h>
3481 -#include <linux/u64_stats_sync.h>
3482 -#include <net/dsa.h>
3483 -#include <net/page_pool/helpers.h>
3484 -#include <net/pkt_cls.h>
3485 -#include <uapi/linux/ppp_defs.h>
3486 -
3487 -#define AIROHA_MAX_NUM_GDM_PORTS 1
3488 -#define AIROHA_MAX_NUM_QDMA 2
3489 -#define AIROHA_MAX_NUM_RSTS 3
3490 -#define AIROHA_MAX_NUM_XSI_RSTS 5
3491 -#define AIROHA_MAX_MTU 2000
3492 -#define AIROHA_MAX_PACKET_SIZE 2048
3493 -#define AIROHA_NUM_QOS_CHANNELS 4
3494 -#define AIROHA_NUM_QOS_QUEUES 8
3495 -#define AIROHA_NUM_TX_RING 32
3496 -#define AIROHA_NUM_RX_RING 32
3497 -#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
3498 - AIROHA_NUM_QOS_CHANNELS)
3499 -#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
3500 -#define AIROHA_FE_MC_MAX_VLAN_PORT 16
3501 -#define AIROHA_NUM_TX_IRQ 2
3502 -#define HW_DSCP_NUM 2048
3503 -#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
3504 -#define TX_DSCP_NUM 1024
3505 -#define RX_DSCP_NUM(_n) \
3506 - ((_n) == 2 ? 128 : \
3507 - (_n) == 11 ? 128 : \
3508 - (_n) == 15 ? 128 : \
3509 - (_n) == 0 ? 1024 : 16)
3510 -
3511 -#define PSE_RSV_PAGES 128
3512 -#define PSE_QUEUE_RSV_PAGES 64
3513 -
3514 -#define QDMA_METER_IDX(_n) ((_n) & 0xff)
3515 -#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
3516 -
3517 -/* FE */
3518 -#define PSE_BASE 0x0100
3519 -#define CSR_IFC_BASE 0x0200
3520 -#define CDM1_BASE 0x0400
3521 -#define GDM1_BASE 0x0500
3522 -#define PPE1_BASE 0x0c00
3523 -
3524 -#define CDM2_BASE 0x1400
3525 -#define GDM2_BASE 0x1500
3526 -
3527 -#define GDM3_BASE 0x1100
3528 -#define GDM4_BASE 0x2500
3529 -
3530 -#define GDM_BASE(_n) \
3531 - ((_n) == 4 ? GDM4_BASE : \
3532 - (_n) == 3 ? GDM3_BASE : \
3533 - (_n) == 2 ? GDM2_BASE : GDM1_BASE)
3534 -
3535 -#define REG_FE_DMA_GLO_CFG 0x0000
3536 -#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
3537 -#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
3538 -
3539 -#define REG_FE_RST_GLO_CFG 0x0004
3540 -#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
3541 -#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
3542 -#define FE_RST_CORE_MASK BIT(0)
3543 -
3544 -#define REG_FE_WAN_MAC_H 0x0030
3545 -#define REG_FE_LAN_MAC_H 0x0040
3546 -
3547 -#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
3548 -#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
3549 -
3550 -#define REG_FE_CDM1_OQ_MAP0 0x0050
3551 -#define REG_FE_CDM1_OQ_MAP1 0x0054
3552 -#define REG_FE_CDM1_OQ_MAP2 0x0058
3553 -#define REG_FE_CDM1_OQ_MAP3 0x005c
3554 -
3555 -#define REG_FE_PCE_CFG 0x0070
3556 -#define PCE_DPI_EN_MASK BIT(2)
3557 -#define PCE_KA_EN_MASK BIT(1)
3558 -#define PCE_MC_EN_MASK BIT(0)
3559 -
3560 -#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
3561 -#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
3562 -#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
3563 -#define PSE_CFG_WR_EN_MASK BIT(8)
3564 -#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
3565 -
3566 -#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
3567 -#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
3568 -
3569 -#define PSE_FQ_CFG 0x008c
3570 -#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
3571 -
3572 -#define REG_FE_PSE_BUF_SET 0x0090
3573 -#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
3574 -#define PSE_ALLRSV_MASK GENMASK(14, 0)
3575 -
3576 -#define REG_PSE_SHARE_USED_THD 0x0094
3577 -#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
3578 -#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
3579 -
3580 -#define REG_GDM_MISC_CFG 0x0148
3581 -#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
3582 -#define GDM2_CHN_VLD_MODE_MASK BIT(5)
3583 -
3584 -#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
3585 -#define FE_IFC_EN_MASK BIT(0)
3586 -
3587 -#define REG_FE_VIP_PORT_EN 0x01f0
3588 -#define REG_FE_IFC_PORT_EN 0x01f4
3589 -
3590 -#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
3591 -#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
3592 -
3593 -#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
3594 -#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
3595 -#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
3596 -
3597 -#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
3598 -#define PATN_FCPU_EN_MASK BIT(7)
3599 -#define PATN_SWP_EN_MASK BIT(6)
3600 -#define PATN_DP_EN_MASK BIT(5)
3601 -#define PATN_SP_EN_MASK BIT(4)
3602 -#define PATN_TYPE_MASK GENMASK(3, 1)
3603 -#define PATN_EN_MASK BIT(0)
3604 -
3605 -#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
3606 -#define PATN_DP_MASK GENMASK(31, 16)
3607 -#define PATN_SP_MASK GENMASK(15, 0)
3608 -
3609 -#define REG_CDM1_VLAN_CTRL CDM1_BASE
3610 -#define CDM1_VLAN_MASK GENMASK(31, 16)
3611 -
3612 -#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
3613 -#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
3614 -
3615 -#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
3616 -#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
3617 - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
3618 -
3619 -#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
3620 -#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
3621 -#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
3622 -
3623 -#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
3624 -#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
3625 - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
3626 -
3627 -#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
3628 -#define GDM_DROP_CRC_ERR BIT(23)
3629 -#define GDM_IP4_CKSUM BIT(22)
3630 -#define GDM_TCP_CKSUM BIT(21)
3631 -#define GDM_UDP_CKSUM BIT(20)
3632 -#define GDM_UCFQ_MASK GENMASK(15, 12)
3633 -#define GDM_BCFQ_MASK GENMASK(11, 8)
3634 -#define GDM_MCFQ_MASK GENMASK(7, 4)
3635 -#define GDM_OCFQ_MASK GENMASK(3, 0)
3636 -
3637 -#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
3638 -#define GDM_INGRESS_FC_EN_MASK BIT(1)
3639 -#define GDM_STAG_EN_MASK BIT(0)
3640 -
3641 -#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
3642 -#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
3643 -#define GDM_LONG_LEN_MASK GENMASK(29, 16)
3644 -
3645 -#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
3646 -#define FE_CPORT_PAD BIT(26)
3647 -#define FE_CPORT_PORT_XFC_MASK BIT(25)
3648 -#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
3649 -
3650 -#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
3651 -#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
3652 -#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
3653 -
3654 -#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
3655 -#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
3656 -#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
3657 -#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
3658 -#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
3659 -#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
3660 -
3661 -#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
3662 -#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
3663 -#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
3664 -#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
3665 -#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
3666 -#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
3667 -#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
3668 -#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
3669 -#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
3670 -#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
3671 -#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
3672 -#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
3673 -#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
3674 -#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
3675 -#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
3676 -
3677 -#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
3678 -#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
3679 -#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
3680 -#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
3681 -#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
3682 -#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
3683 -#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
3684 -#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
3685 -#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
3686 -#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
3687 -#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
3688 -#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
3689 -#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
3690 -#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
3691 -#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
3692 -#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
3693 -#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
3694 -#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
3695 -#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
3696 -#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
3697 -#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
3698 -#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
3699 -
3700 -#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
3701 -#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
3702 -#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
3703 -#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
3704 -#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
3705 -
3706 -#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
3707 -#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
3708 -#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
3709 -#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
3710 -
3711 -#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
3712 -#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
3713 -#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
3714 -#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
3715 -#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
3716 -#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
3717 -#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
3718 -#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
3719 -#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
3720 -#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
3721 -#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
3722 -#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
3723 -#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
3724 -#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
3725 -#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
3726 -#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
3727 -
3728 -#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
3729 -#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
3730 -#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
3731 -
3732 -#define REG_GDM3_FWD_CFG GDM3_BASE
3733 -#define GDM3_PAD_EN_MASK BIT(28)
3734 -
3735 -#define REG_GDM4_FWD_CFG GDM4_BASE
3736 -#define GDM4_PAD_EN_MASK BIT(28)
3737 -#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
3738 -
3739 -#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
3740 -#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
3741 -#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
3742 -#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
3743 -
3744 -#define REG_IP_FRAG_FP 0x2010
3745 -#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
3746 -#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
3747 -#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
3748 -#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
3749 -
3750 -#define REG_MC_VLAN_EN 0x2100
3751 -#define MC_VLAN_EN_MASK BIT(0)
3752 -
3753 -#define REG_MC_VLAN_CFG 0x2104
3754 -#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
3755 -#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
3756 -#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
3757 -#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
3758 -#define MC_VLAN_CFG_RW_MASK BIT(0)
3759 -
3760 -#define REG_MC_VLAN_DATA 0x2108
3761 -
3762 -#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
3763 -
3764 -/* QDMA */
3765 -#define REG_QDMA_GLOBAL_CFG 0x0004
3766 -#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
3767 -#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
3768 -#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
3769 -#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
3770 -#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
3771 -#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
3772 -#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
3773 -#define GLOBAL_CFG_RESET_MASK BIT(23)
3774 -#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
3775 -#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
3776 -#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
3777 -#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
3778 -#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
3779 -#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
3780 -#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
3781 -#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
3782 -#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
3783 -#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
3784 -#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
3785 -#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
3786 -#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
3787 -#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
3788 -#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
3789 -
3790 -#define REG_FWD_DSCP_BASE 0x0010
3791 -#define REG_FWD_BUF_BASE 0x0014
3792 -
3793 -#define REG_HW_FWD_DSCP_CFG 0x0018
3794 -#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
3795 -#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
3796 -#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
3797 -
3798 -#define REG_INT_STATUS(_n) \
3799 - (((_n) == 4) ? 0x0730 : \
3800 - ((_n) == 3) ? 0x0724 : \
3801 - ((_n) == 2) ? 0x0720 : \
3802 - ((_n) == 1) ? 0x0024 : 0x0020)
3803 -
3804 -#define REG_INT_ENABLE(_n) \
3805 - (((_n) == 4) ? 0x0750 : \
3806 - ((_n) == 3) ? 0x0744 : \
3807 - ((_n) == 2) ? 0x0740 : \
3808 - ((_n) == 1) ? 0x002c : 0x0028)
3809 -
3810 -/* QDMA_CSR_INT_ENABLE1 */
3811 -#define RX15_COHERENT_INT_MASK BIT(31)
3812 -#define RX14_COHERENT_INT_MASK BIT(30)
3813 -#define RX13_COHERENT_INT_MASK BIT(29)
3814 -#define RX12_COHERENT_INT_MASK BIT(28)
3815 -#define RX11_COHERENT_INT_MASK BIT(27)
3816 -#define RX10_COHERENT_INT_MASK BIT(26)
3817 -#define RX9_COHERENT_INT_MASK BIT(25)
3818 -#define RX8_COHERENT_INT_MASK BIT(24)
3819 -#define RX7_COHERENT_INT_MASK BIT(23)
3820 -#define RX6_COHERENT_INT_MASK BIT(22)
3821 -#define RX5_COHERENT_INT_MASK BIT(21)
3822 -#define RX4_COHERENT_INT_MASK BIT(20)
3823 -#define RX3_COHERENT_INT_MASK BIT(19)
3824 -#define RX2_COHERENT_INT_MASK BIT(18)
3825 -#define RX1_COHERENT_INT_MASK BIT(17)
3826 -#define RX0_COHERENT_INT_MASK BIT(16)
3827 -#define TX7_COHERENT_INT_MASK BIT(15)
3828 -#define TX6_COHERENT_INT_MASK BIT(14)
3829 -#define TX5_COHERENT_INT_MASK BIT(13)
3830 -#define TX4_COHERENT_INT_MASK BIT(12)
3831 -#define TX3_COHERENT_INT_MASK BIT(11)
3832 -#define TX2_COHERENT_INT_MASK BIT(10)
3833 -#define TX1_COHERENT_INT_MASK BIT(9)
3834 -#define TX0_COHERENT_INT_MASK BIT(8)
3835 -#define CNT_OVER_FLOW_INT_MASK BIT(7)
3836 -#define IRQ1_FULL_INT_MASK BIT(5)
3837 -#define IRQ1_INT_MASK BIT(4)
3838 -#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
3839 -#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
3840 -#define IRQ0_FULL_INT_MASK BIT(1)
3841 -#define IRQ0_INT_MASK BIT(0)
3842 -
3843 -#define TX_DONE_INT_MASK(_n) \
3844 - ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
3845 - : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
3846 -
3847 -#define INT_TX_MASK \
3848 - (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
3849 - IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
3850 -
3851 -#define INT_IDX0_MASK \
3852 - (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
3853 - TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
3854 - TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
3855 - TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
3856 - RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
3857 - RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
3858 - RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
3859 - RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
3860 - RX15_COHERENT_INT_MASK | INT_TX_MASK)
3861 -
3862 -/* QDMA_CSR_INT_ENABLE2 */
3863 -#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
3864 -#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
3865 -#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
3866 -#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
3867 -#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
3868 -#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
3869 -#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
3870 -#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
3871 -#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
3872 -#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
3873 -#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
3874 -#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
3875 -#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
3876 -#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
3877 -#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
3878 -#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
3879 -#define RX15_DONE_INT_MASK BIT(15)
3880 -#define RX14_DONE_INT_MASK BIT(14)
3881 -#define RX13_DONE_INT_MASK BIT(13)
3882 -#define RX12_DONE_INT_MASK BIT(12)
3883 -#define RX11_DONE_INT_MASK BIT(11)
3884 -#define RX10_DONE_INT_MASK BIT(10)
3885 -#define RX9_DONE_INT_MASK BIT(9)
3886 -#define RX8_DONE_INT_MASK BIT(8)
3887 -#define RX7_DONE_INT_MASK BIT(7)
3888 -#define RX6_DONE_INT_MASK BIT(6)
3889 -#define RX5_DONE_INT_MASK BIT(5)
3890 -#define RX4_DONE_INT_MASK BIT(4)
3891 -#define RX3_DONE_INT_MASK BIT(3)
3892 -#define RX2_DONE_INT_MASK BIT(2)
3893 -#define RX1_DONE_INT_MASK BIT(1)
3894 -#define RX0_DONE_INT_MASK BIT(0)
3895 -
3896 -#define RX_DONE_INT_MASK \
3897 - (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
3898 - RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
3899 - RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
3900 - RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
3901 - RX15_DONE_INT_MASK)
3902 -#define INT_IDX1_MASK \
3903 - (RX_DONE_INT_MASK | \
3904 - RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
3905 - RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
3906 - RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
3907 - RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
3908 - RX15_NO_CPU_DSCP_INT_MASK)
3909 -
3910 -/* QDMA_CSR_INT_ENABLE5 */
3911 -#define TX31_COHERENT_INT_MASK BIT(31)
3912 -#define TX30_COHERENT_INT_MASK BIT(30)
3913 -#define TX29_COHERENT_INT_MASK BIT(29)
3914 -#define TX28_COHERENT_INT_MASK BIT(28)
3915 -#define TX27_COHERENT_INT_MASK BIT(27)
3916 -#define TX26_COHERENT_INT_MASK BIT(26)
3917 -#define TX25_COHERENT_INT_MASK BIT(25)
3918 -#define TX24_COHERENT_INT_MASK BIT(24)
3919 -#define TX23_COHERENT_INT_MASK BIT(23)
3920 -#define TX22_COHERENT_INT_MASK BIT(22)
3921 -#define TX21_COHERENT_INT_MASK BIT(21)
3922 -#define TX20_COHERENT_INT_MASK BIT(20)
3923 -#define TX19_COHERENT_INT_MASK BIT(19)
3924 -#define TX18_COHERENT_INT_MASK BIT(18)
3925 -#define TX17_COHERENT_INT_MASK BIT(17)
3926 -#define TX16_COHERENT_INT_MASK BIT(16)
3927 -#define TX15_COHERENT_INT_MASK BIT(15)
3928 -#define TX14_COHERENT_INT_MASK BIT(14)
3929 -#define TX13_COHERENT_INT_MASK BIT(13)
3930 -#define TX12_COHERENT_INT_MASK BIT(12)
3931 -#define TX11_COHERENT_INT_MASK BIT(11)
3932 -#define TX10_COHERENT_INT_MASK BIT(10)
3933 -#define TX9_COHERENT_INT_MASK BIT(9)
3934 -#define TX8_COHERENT_INT_MASK BIT(8)
3935 -
3936 -#define INT_IDX4_MASK \
3937 - (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
3938 - TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
3939 - TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
3940 - TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
3941 - TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
3942 - TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
3943 - TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
3944 - TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
3945 - TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
3946 - TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
3947 - TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
3948 - TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
3949 -
3950 -#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
3951 -
3952 -#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
3953 -#define TX_IRQ_THR_MASK GENMASK(27, 16)
3954 -#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
3955 -
3956 -#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
3957 -#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
3958 -
3959 -#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
3960 -#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
3961 -#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
3962 -
3963 -#define REG_TX_RING_BASE(_n) \
3964 - (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
3965 -
3966 -#define REG_TX_RING_BLOCKING(_n) \
3967 - (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
3968 -
3969 -#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
3970 -#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
3971 -#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
3972 -#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
3973 -#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
3974 -
3975 -#define REG_TX_CPU_IDX(_n) \
3976 - (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
3977 -
3978 -#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
3979 -
3980 -#define REG_TX_DMA_IDX(_n) \
3981 - (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
3982 -
3983 -#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
3984 -
3985 -#define IRQ_RING_IDX_MASK GENMASK(20, 16)
3986 -#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
3987 -
3988 -#define REG_RX_RING_BASE(_n) \
3989 - (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
3990 -
3991 -#define REG_RX_RING_SIZE(_n) \
3992 - (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
3993 -
3994 -#define RX_RING_THR_MASK GENMASK(31, 16)
3995 -#define RX_RING_SIZE_MASK GENMASK(15, 0)
3996 -
3997 -#define REG_RX_CPU_IDX(_n) \
3998 - (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
3999 -
4000 -#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
4001 -
4002 -#define REG_RX_DMA_IDX(_n) \
4003 - (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
4004 -
4005 -#define REG_RX_DELAY_INT_IDX(_n) \
4006 - (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
4007 -
4008 -#define RX_DELAY_INT_MASK GENMASK(15, 0)
4009 -
4010 -#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
4011 -
4012 -#define REG_INGRESS_TRTCM_CFG 0x0070
4013 -#define INGRESS_TRTCM_EN_MASK BIT(31)
4014 -#define INGRESS_TRTCM_MODE_MASK BIT(30)
4015 -#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4016 -#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
4017 -
4018 -#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
4019 -#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
4020 -
4021 -#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
4022 -#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
4023 -
4024 -#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
4025 -#define CNTR_EN_MASK BIT(31)
4026 -#define CNTR_ALL_CHAN_EN_MASK BIT(30)
4027 -#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
4028 -#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
4029 -#define CNTR_SRC_MASK GENMASK(27, 24)
4030 -#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
4031 -#define CNTR_CHAN_MASK GENMASK(7, 3)
4032 -#define CNTR_QUEUE_MASK GENMASK(2, 0)
4033 -
4034 -#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
4035 -
4036 -#define REG_LMGR_INIT_CFG 0x1000
4037 -#define LMGR_INIT_START BIT(31)
4038 -#define LMGR_SRAM_MODE_MASK BIT(30)
4039 -#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
4040 -#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
4041 -
4042 -#define REG_FWD_DSCP_LOW_THR 0x1004
4043 -#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
4044 -
4045 -#define REG_EGRESS_RATE_METER_CFG 0x100c
4046 -#define EGRESS_RATE_METER_EN_MASK BIT(31)
4047 -#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
4048 -#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
4049 -#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
4050 -
4051 -#define REG_EGRESS_TRTCM_CFG 0x1010
4052 -#define EGRESS_TRTCM_EN_MASK BIT(31)
4053 -#define EGRESS_TRTCM_MODE_MASK BIT(30)
4054 -#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4055 -#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
4056 -
4057 -#define TRTCM_PARAM_RW_MASK BIT(31)
4058 -#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
4059 -#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
4060 -#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
4061 -#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
4062 -#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
4063 -
4064 -#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
4065 -#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
4066 -#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
4067 -
4068 -#define REG_TXWRR_MODE_CFG 0x1020
4069 -#define TWRR_WEIGHT_SCALE_MASK BIT(31)
4070 -#define TWRR_WEIGHT_BASE_MASK BIT(3)
4071 -
4072 -#define REG_TXWRR_WEIGHT_CFG 0x1024
4073 -#define TWRR_RW_CMD_MASK BIT(31)
4074 -#define TWRR_RW_CMD_DONE BIT(30)
4075 -#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
4076 -#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
4077 -#define TWRR_VALUE_MASK GENMASK(15, 0)
4078 -
4079 -#define REG_PSE_BUF_USAGE_CFG 0x1028
4080 -#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
4081 -
4082 -#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
4083 -#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
4084 -
4085 -#define REG_GLB_TRTCM_CFG 0x1080
4086 -#define GLB_TRTCM_EN_MASK BIT(31)
4087 -#define GLB_TRTCM_MODE_MASK BIT(30)
4088 -#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4089 -#define GLB_FAST_TICK_MASK GENMASK(15, 0)
4090 -
4091 -#define REG_TXQ_CNGST_CFG 0x10a0
4092 -#define TXQ_CNGST_DROP_EN BIT(31)
4093 -#define TXQ_CNGST_DEI_DROP_EN BIT(30)
4094 -
4095 -#define REG_SLA_TRTCM_CFG 0x1150
4096 -#define SLA_TRTCM_EN_MASK BIT(31)
4097 -#define SLA_TRTCM_MODE_MASK BIT(30)
4098 -#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
4099 -#define SLA_FAST_TICK_MASK GENMASK(15, 0)
4100 -
4101 -/* CTRL */
4102 -#define QDMA_DESC_DONE_MASK BIT(31)
4103 -#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
4104 -#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
4105 -#define QDMA_DESC_DEI_MASK BIT(25)
4106 -#define QDMA_DESC_NO_DROP_MASK BIT(24)
4107 -#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
4108 -/* DATA */
4109 -#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
4110 -/* TX MSG0 */
4111 -#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
4112 -#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
4113 -#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
4114 -#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
4115 -#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
4116 -#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
4117 -#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
4118 -#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
4119 -#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
4120 -#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
4121 -/* TX MSG1 */
4122 -#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
4123 -#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
4124 -#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
4125 -#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
4126 -#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
4127 -#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
4128 -#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
4129 -#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
4130 -#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
4131 -
4132 -/* RX MSG1 */
4133 -#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
4134 -#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
4135 -#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
4136 -#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
4137 -#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
4138 -#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
4139 -#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
4140 -#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
4141 -#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
4142 -
4143 -struct airoha_qdma_desc {
4144 - __le32 rsv;
4145 - __le32 ctrl;
4146 - __le32 addr;
4147 - __le32 data;
4148 - __le32 msg0;
4149 - __le32 msg1;
4150 - __le32 msg2;
4151 - __le32 msg3;
4152 -};
4153 -
4154 -/* CTRL0 */
4155 -#define QDMA_FWD_DESC_CTX_MASK BIT(31)
4156 -#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
4157 -#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
4158 -#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
4159 -/* CTRL1 */
4160 -#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
4161 -/* CTRL2 */
4162 -#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
4163 -
4164 -struct airoha_qdma_fwd_desc {
4165 - __le32 addr;
4166 - __le32 ctrl0;
4167 - __le32 ctrl1;
4168 - __le32 ctrl2;
4169 - __le32 msg0;
4170 - __le32 msg1;
4171 - __le32 rsv0;
4172 - __le32 rsv1;
4173 -};
4174 -
4175 -enum {
4176 - QDMA_INT_REG_IDX0,
4177 - QDMA_INT_REG_IDX1,
4178 - QDMA_INT_REG_IDX2,
4179 - QDMA_INT_REG_IDX3,
4180 - QDMA_INT_REG_IDX4,
4181 - QDMA_INT_REG_MAX
4182 -};
4183 -
4184 -enum {
4185 - XSI_PCIE0_PORT,
4186 - XSI_PCIE1_PORT,
4187 - XSI_USB_PORT,
4188 - XSI_AE_PORT,
4189 - XSI_ETH_PORT,
4190 -};
4191 -
4192 -enum {
4193 - XSI_PCIE0_VIP_PORT_MASK = BIT(22),
4194 - XSI_PCIE1_VIP_PORT_MASK = BIT(23),
4195 - XSI_USB_VIP_PORT_MASK = BIT(25),
4196 - XSI_ETH_VIP_PORT_MASK = BIT(24),
4197 -};
4198 -
4199 -enum {
4200 - DEV_STATE_INITIALIZED,
4201 -};
4202 -
4203 -enum {
4204 - CDM_CRSN_QSEL_Q1 = 1,
4205 - CDM_CRSN_QSEL_Q5 = 5,
4206 - CDM_CRSN_QSEL_Q6 = 6,
4207 - CDM_CRSN_QSEL_Q15 = 15,
4208 -};
4209 -
4210 -enum {
4211 - CRSN_08 = 0x8,
4212 - CRSN_21 = 0x15, /* KA */
4213 - CRSN_22 = 0x16, /* hit bind and force route to CPU */
4214 - CRSN_24 = 0x18,
4215 - CRSN_25 = 0x19,
4216 -};
4217 -
4218 -enum {
4219 - FE_PSE_PORT_CDM1,
4220 - FE_PSE_PORT_GDM1,
4221 - FE_PSE_PORT_GDM2,
4222 - FE_PSE_PORT_GDM3,
4223 - FE_PSE_PORT_PPE1,
4224 - FE_PSE_PORT_CDM2,
4225 - FE_PSE_PORT_CDM3,
4226 - FE_PSE_PORT_CDM4,
4227 - FE_PSE_PORT_PPE2,
4228 - FE_PSE_PORT_GDM4,
4229 - FE_PSE_PORT_CDM5,
4230 - FE_PSE_PORT_DROP = 0xf,
4231 -};
4232 -
4233 -enum tx_sched_mode {
4234 - TC_SCH_WRR8,
4235 - TC_SCH_SP,
4236 - TC_SCH_WRR7,
4237 - TC_SCH_WRR6,
4238 - TC_SCH_WRR5,
4239 - TC_SCH_WRR4,
4240 - TC_SCH_WRR3,
4241 - TC_SCH_WRR2,
4242 -};
4243 -
4244 -enum trtcm_param_type {
4245 - TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
4246 - TRTCM_TOKEN_RATE_MODE,
4247 - TRTCM_BUCKETSIZE_SHIFT_MODE,
4248 - TRTCM_BUCKET_COUNTER_MODE,
4249 -};
4250 -
4251 -enum trtcm_mode_type {
4252 - TRTCM_COMMIT_MODE,
4253 - TRTCM_PEAK_MODE,
4254 -};
4255 -
4256 -enum trtcm_param {
4257 - TRTCM_TICK_SEL = BIT(0),
4258 - TRTCM_PKT_MODE = BIT(1),
4259 - TRTCM_METER_MODE = BIT(2),
4260 -};
4261 -
4262 -#define MIN_TOKEN_SIZE 4096
4263 -#define MAX_TOKEN_SIZE_OFFSET 17
4264 -#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
4265 -#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
4266 -
4267 -struct airoha_queue_entry {
4268 - union {
4269 - void *buf;
4270 - struct sk_buff *skb;
4271 - };
4272 - dma_addr_t dma_addr;
4273 - u16 dma_len;
4274 -};
4275 -
4276 -struct airoha_queue {
4277 - struct airoha_qdma *qdma;
4278 -
4279 - /* protect concurrent queue accesses */
4280 - spinlock_t lock;
4281 - struct airoha_queue_entry *entry;
4282 - struct airoha_qdma_desc *desc;
4283 - u16 head;
4284 - u16 tail;
4285 -
4286 - int queued;
4287 - int ndesc;
4288 - int free_thr;
4289 - int buf_size;
4290 -
4291 - struct napi_struct napi;
4292 - struct page_pool *page_pool;
4293 -};
4294 -
4295 -struct airoha_tx_irq_queue {
4296 - struct airoha_qdma *qdma;
4297 -
4298 - struct napi_struct napi;
4299 -
4300 - int size;
4301 - u32 *q;
4302 -};
4303 -
4304 -struct airoha_hw_stats {
4305 - /* protect concurrent hw_stats accesses */
4306 - spinlock_t lock;
4307 - struct u64_stats_sync syncp;
4308 -
4309 - /* get_stats64 */
4310 - u64 rx_ok_pkts;
4311 - u64 tx_ok_pkts;
4312 - u64 rx_ok_bytes;
4313 - u64 tx_ok_bytes;
4314 - u64 rx_multicast;
4315 - u64 rx_errors;
4316 - u64 rx_drops;
4317 - u64 tx_drops;
4318 - u64 rx_crc_error;
4319 - u64 rx_over_errors;
4320 - /* ethtool stats */
4321 - u64 tx_broadcast;
4322 - u64 tx_multicast;
4323 - u64 tx_len[7];
4324 - u64 rx_broadcast;
4325 - u64 rx_fragment;
4326 - u64 rx_jabber;
4327 - u64 rx_len[7];
4328 -};
4329 -
4330 -struct airoha_qdma {
4331 - struct airoha_eth *eth;
4332 - void __iomem *regs;
4333 -
4334 - /* protect concurrent irqmask accesses */
4335 - spinlock_t irq_lock;
4336 - u32 irqmask[QDMA_INT_REG_MAX];
4337 - int irq;
4338 -
4339 - struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
4340 -
4341 - struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
4342 - struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
4343 -
4344 - /* descriptor and packet buffers for qdma hw forward */
4345 - struct {
4346 - void *desc;
4347 - void *q;
4348 - } hfwd;
4349 -};
4350 -
4351 -struct airoha_gdm_port {
4352 - struct airoha_qdma *qdma;
4353 - struct net_device *dev;
4354 - int id;
4355 -
4356 - struct airoha_hw_stats stats;
4357 -
4358 - DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
4359 -
4360 - /* qos stats counters */
4361 - u64 cpu_tx_packets;
4362 - u64 fwd_tx_packets;
4363 -};
4364 -
4365 -struct airoha_eth {
4366 - struct device *dev;
4367 -
4368 - unsigned long state;
4369 - void __iomem *fe_regs;
4370 -
4371 - struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
4372 - struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
4373 -
4374 - struct net_device *napi_dev;
4375 -
4376 - struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
4377 - struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
4378 -};
4379 -
4380 -static u32 airoha_rr(void __iomem *base, u32 offset)
4381 -{
4382 - return readl(base + offset);
4383 -}
4384 -
4385 -static void airoha_wr(void __iomem *base, u32 offset, u32 val)
4386 -{
4387 - writel(val, base + offset);
4388 -}
4389 -
4390 -static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
4391 -{
4392 - val |= (airoha_rr(base, offset) & ~mask);
4393 - airoha_wr(base, offset, val);
4394 -
4395 - return val;
4396 -}
4397 -
4398 -#define airoha_fe_rr(eth, offset) \
4399 - airoha_rr((eth)->fe_regs, (offset))
4400 -#define airoha_fe_wr(eth, offset, val) \
4401 - airoha_wr((eth)->fe_regs, (offset), (val))
4402 -#define airoha_fe_rmw(eth, offset, mask, val) \
4403 - airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
4404 -#define airoha_fe_set(eth, offset, val) \
4405 - airoha_rmw((eth)->fe_regs, (offset), 0, (val))
4406 -#define airoha_fe_clear(eth, offset, val) \
4407 - airoha_rmw((eth)->fe_regs, (offset), (val), 0)
4408 -
4409 -#define airoha_qdma_rr(qdma, offset) \
4410 - airoha_rr((qdma)->regs, (offset))
4411 -#define airoha_qdma_wr(qdma, offset, val) \
4412 - airoha_wr((qdma)->regs, (offset), (val))
4413 -#define airoha_qdma_rmw(qdma, offset, mask, val) \
4414 - airoha_rmw((qdma)->regs, (offset), (mask), (val))
4415 -#define airoha_qdma_set(qdma, offset, val) \
4416 - airoha_rmw((qdma)->regs, (offset), 0, (val))
4417 -#define airoha_qdma_clear(qdma, offset, val) \
4418 - airoha_rmw((qdma)->regs, (offset), (val), 0)
4419 -
4420 -static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
4421 - u32 clear, u32 set)
4422 -{
4423 - unsigned long flags;
4424 -
4425 - if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
4426 - return;
4427 -
4428 - spin_lock_irqsave(&qdma->irq_lock, flags);
4429 -
4430 - qdma->irqmask[index] &= ~clear;
4431 - qdma->irqmask[index] |= set;
4432 - airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
4433 - /* Read irq_enable register in order to guarantee the update above
4434 - * completes in the spinlock critical section.
4435 - */
4436 - airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
4437 -
4438 - spin_unlock_irqrestore(&qdma->irq_lock, flags);
4439 -}
4440 -
4441 -static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
4442 - u32 mask)
4443 -{
4444 - airoha_qdma_set_irqmask(qdma, index, 0, mask);
4445 -}
4446 -
4447 -static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
4448 - u32 mask)
4449 -{
4450 - airoha_qdma_set_irqmask(qdma, index, mask, 0);
4451 -}
4452 -
4453 -static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
4454 -{
4455 - /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
4456 - * GDM{2,3,4} can be used as wan port connected to an external
4457 - * phy module.
4458 - */
4459 - return port->id == 1;
4460 -}
4461 -
4462 -static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
4463 -{
4464 - struct airoha_eth *eth = port->qdma->eth;
4465 - u32 val, reg;
4466 -
4467 - reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
4468 - : REG_FE_WAN_MAC_H;
4469 - val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
4470 - airoha_fe_wr(eth, reg, val);
4471 -
4472 - val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
4473 - airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
4474 - airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
4475 -}
4476 -
4477 -static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
4478 - u32 val)
4479 -{
4480 - airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
4481 - FIELD_PREP(GDM_OCFQ_MASK, val));
4482 - airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
4483 - FIELD_PREP(GDM_MCFQ_MASK, val));
4484 - airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
4485 - FIELD_PREP(GDM_BCFQ_MASK, val));
4486 - airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
4487 - FIELD_PREP(GDM_UCFQ_MASK, val));
4488 -}
4489 -
4490 -static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
4491 -{
4492 - u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
4493 - u32 vip_port, cfg_addr;
4494 -
4495 - switch (port) {
4496 - case XSI_PCIE0_PORT:
4497 - vip_port = XSI_PCIE0_VIP_PORT_MASK;
4498 - cfg_addr = REG_GDM_FWD_CFG(3);
4499 - break;
4500 - case XSI_PCIE1_PORT:
4501 - vip_port = XSI_PCIE1_VIP_PORT_MASK;
4502 - cfg_addr = REG_GDM_FWD_CFG(3);
4503 - break;
4504 - case XSI_USB_PORT:
4505 - vip_port = XSI_USB_VIP_PORT_MASK;
4506 - cfg_addr = REG_GDM_FWD_CFG(4);
4507 - break;
4508 - case XSI_ETH_PORT:
4509 - vip_port = XSI_ETH_VIP_PORT_MASK;
4510 - cfg_addr = REG_GDM_FWD_CFG(4);
4511 - break;
4512 - default:
4513 - return -EINVAL;
4514 - }
4515 -
4516 - if (enable) {
4517 - airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
4518 - airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
4519 - } else {
4520 - airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
4521 - airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
4522 - }
4523 -
4524 - airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
4525 -
4526 - return 0;
4527 -}
4528 -
4529 -static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
4530 -{
4531 - const int port_list[] = {
4532 - XSI_PCIE0_PORT,
4533 - XSI_PCIE1_PORT,
4534 - XSI_USB_PORT,
4535 - XSI_ETH_PORT
4536 - };
4537 - int i, err;
4538 -
4539 - for (i = 0; i < ARRAY_SIZE(port_list); i++) {
4540 - err = airoha_set_gdm_port(eth, port_list[i], enable);
4541 - if (err)
4542 - goto error;
4543 - }
4544 -
4545 - return 0;
4546 -
4547 -error:
4548 - for (i--; i >= 0; i--)
4549 - airoha_set_gdm_port(eth, port_list[i], false);
4550 -
4551 - return err;
4552 -}
4553 -
4554 -static void airoha_fe_maccr_init(struct airoha_eth *eth)
4555 -{
4556 - int p;
4557 -
4558 - for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
4559 - airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
4560 - GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
4561 - GDM_DROP_CRC_ERR);
4562 - airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
4563 - FE_PSE_PORT_CDM1);
4564 - airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
4565 - GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
4566 - FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
4567 - FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
4568 - }
4569 -
4570 - airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
4571 - FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
4572 -
4573 - airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
4574 -}
4575 -
4576 -static void airoha_fe_vip_setup(struct airoha_eth *eth)
4577 -{
4578 - airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
4579 - airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
4580 -
4581 - airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
4582 - airoha_fe_wr(eth, REG_FE_VIP_EN(4),
4583 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4584 - PATN_EN_MASK);
4585 -
4586 - airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
4587 - airoha_fe_wr(eth, REG_FE_VIP_EN(6),
4588 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4589 - PATN_EN_MASK);
4590 -
4591 - airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
4592 - airoha_fe_wr(eth, REG_FE_VIP_EN(7),
4593 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4594 - PATN_EN_MASK);
4595 -
4596 - /* BOOTP (0x43) */
4597 - airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
4598 - airoha_fe_wr(eth, REG_FE_VIP_EN(8),
4599 - PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
4600 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4601 -
4602 - /* BOOTP (0x44) */
4603 - airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
4604 - airoha_fe_wr(eth, REG_FE_VIP_EN(9),
4605 - PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
4606 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4607 -
4608 - /* ISAKMP */
4609 - airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
4610 - airoha_fe_wr(eth, REG_FE_VIP_EN(10),
4611 - PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
4612 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4613 -
4614 - airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
4615 - airoha_fe_wr(eth, REG_FE_VIP_EN(11),
4616 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4617 - PATN_EN_MASK);
4618 -
4619 - /* DHCPv6 */
4620 - airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
4621 - airoha_fe_wr(eth, REG_FE_VIP_EN(12),
4622 - PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
4623 - FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
4624 -
4625 - airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
4626 - airoha_fe_wr(eth, REG_FE_VIP_EN(19),
4627 - PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
4628 - PATN_EN_MASK);
4629 -
4630 - /* ETH->ETH_P_1905 (0x893a) */
4631 - airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
4632 - airoha_fe_wr(eth, REG_FE_VIP_EN(20),
4633 - PATN_FCPU_EN_MASK | PATN_EN_MASK);
4634 -
4635 - airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
4636 - airoha_fe_wr(eth, REG_FE_VIP_EN(21),
4637 - PATN_FCPU_EN_MASK | PATN_EN_MASK);
4638 -}
4639 -
4640 -static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
4641 - u32 port, u32 queue)
4642 -{
4643 - u32 val;
4644 -
4645 - airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
4646 - PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
4647 - FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
4648 - FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
4649 - val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
4650 -
4651 - return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
4652 -}
4653 -
4654 -static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
4655 - u32 port, u32 queue, u32 val)
4656 -{
4657 - airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
4658 - FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
4659 - airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
4660 - PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
4661 - PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
4662 - FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
4663 - FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
4664 - PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
4665 -}
4666 -
4667 -static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
4668 -{
4669 - u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
4670 -
4671 - return FIELD_GET(PSE_ALLRSV_MASK, val);
4672 -}
4673 -
4674 -static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
4675 - u32 port, u32 queue, u32 val)
4676 -{
4677 - u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
4678 - u32 tmp, all_rsv, fq_limit;
4679 -
4680 - airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
4681 -
4682 - /* modify all rsv */
4683 - all_rsv = airoha_fe_get_pse_all_rsv(eth);
4684 - all_rsv += (val - orig_val);
4685 - airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
4686 - FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
4687 -
4688 - /* modify hthd */
4689 - tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
4690 - fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
4691 - tmp = fq_limit - all_rsv - 0x20;
4692 - airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
4693 - PSE_SHARE_USED_HTHD_MASK,
4694 - FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
4695 -
4696 - tmp = fq_limit - all_rsv - 0x100;
4697 - airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
4698 - PSE_SHARE_USED_MTHD_MASK,
4699 - FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
4700 - tmp = (3 * tmp) >> 2;
4701 - airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
4702 - PSE_SHARE_USED_LTHD_MASK,
4703 - FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
4704 -
4705 - return 0;
4706 -}
4707 -
4708 -static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
4709 -{
4710 - const u32 pse_port_num_queues[] = {
4711 - [FE_PSE_PORT_CDM1] = 6,
4712 - [FE_PSE_PORT_GDM1] = 6,
4713 - [FE_PSE_PORT_GDM2] = 32,
4714 - [FE_PSE_PORT_GDM3] = 6,
4715 - [FE_PSE_PORT_PPE1] = 4,
4716 - [FE_PSE_PORT_CDM2] = 6,
4717 - [FE_PSE_PORT_CDM3] = 8,
4718 - [FE_PSE_PORT_CDM4] = 10,
4719 - [FE_PSE_PORT_PPE2] = 4,
4720 - [FE_PSE_PORT_GDM4] = 2,
4721 - [FE_PSE_PORT_CDM5] = 2,
4722 - };
4723 - u32 all_rsv;
4724 - int q;
4725 -
4726 - all_rsv = airoha_fe_get_pse_all_rsv(eth);
4727 - /* hw misses PPE2 oq rsv */
4728 - all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
4729 - airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
4730 -
4731 - /* CMD1 */
4732 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
4733 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
4734 - PSE_QUEUE_RSV_PAGES);
4735 - /* GMD1 */
4736 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
4737 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
4738 - PSE_QUEUE_RSV_PAGES);
4739 - /* GMD2 */
4740 - for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
4741 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
4742 - /* GMD3 */
4743 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
4744 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
4745 - PSE_QUEUE_RSV_PAGES);
4746 - /* PPE1 */
4747 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
4748 - if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
4749 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
4750 - PSE_QUEUE_RSV_PAGES);
4751 - else
4752 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
4753 - }
4754 - /* CDM2 */
4755 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
4756 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
4757 - PSE_QUEUE_RSV_PAGES);
4758 - /* CDM3 */
4759 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
4760 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
4761 - /* CDM4 */
4762 - for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
4763 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
4764 - PSE_QUEUE_RSV_PAGES);
4765 - /* PPE2 */
4766 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
4767 - if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
4768 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
4769 - PSE_QUEUE_RSV_PAGES);
4770 - else
4771 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
4772 - }
4773 - /* GMD4 */
4774 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
4775 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
4776 - PSE_QUEUE_RSV_PAGES);
4777 - /* CDM5 */
4778 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
4779 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
4780 - PSE_QUEUE_RSV_PAGES);
4781 -}
4782 -
4783 -static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
4784 -{
4785 - int i;
4786 -
4787 - for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
4788 - int err, j;
4789 - u32 val;
4790 -
4791 - airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
4792 -
4793 - val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
4794 - MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
4795 - airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
4796 - err = read_poll_timeout(airoha_fe_rr, val,
4797 - val & MC_VLAN_CFG_CMD_DONE_MASK,
4798 - USEC_PER_MSEC, 5 * USEC_PER_MSEC,
4799 - false, eth, REG_MC_VLAN_CFG);
4800 - if (err)
4801 - return err;
4802 -
4803 - for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
4804 - airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
4805 -
4806 - val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
4807 - FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
4808 - MC_VLAN_CFG_RW_MASK;
4809 - airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
4810 - err = read_poll_timeout(airoha_fe_rr, val,
4811 - val & MC_VLAN_CFG_CMD_DONE_MASK,
4812 - USEC_PER_MSEC,
4813 - 5 * USEC_PER_MSEC, false, eth,
4814 - REG_MC_VLAN_CFG);
4815 - if (err)
4816 - return err;
4817 - }
4818 - }
4819 -
4820 - return 0;
4821 -}
4822 -
4823 -static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
4824 -{
4825 - /* CDM1_CRSN_QSEL */
4826 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
4827 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
4828 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
4829 - CDM_CRSN_QSEL_Q1));
4830 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
4831 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
4832 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
4833 - CDM_CRSN_QSEL_Q1));
4834 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
4835 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
4836 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
4837 - CDM_CRSN_QSEL_Q1));
4838 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
4839 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
4840 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
4841 - CDM_CRSN_QSEL_Q6));
4842 - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
4843 - CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
4844 - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
4845 - CDM_CRSN_QSEL_Q1));
4846 - /* CDM2_CRSN_QSEL */
4847 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
4848 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
4849 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
4850 - CDM_CRSN_QSEL_Q1));
4851 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
4852 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
4853 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
4854 - CDM_CRSN_QSEL_Q1));
4855 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
4856 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
4857 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
4858 - CDM_CRSN_QSEL_Q1));
4859 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
4860 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
4861 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
4862 - CDM_CRSN_QSEL_Q6));
4863 - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
4864 - CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
4865 - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
4866 - CDM_CRSN_QSEL_Q1));
4867 -}
4868 -
4869 -static int airoha_fe_init(struct airoha_eth *eth)
4870 -{
4871 - airoha_fe_maccr_init(eth);
4872 -
4873 - /* PSE IQ reserve */
4874 - airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
4875 - FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
4876 - airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
4877 - PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
4878 - FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
4879 - FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
4880 -
4881 - /* enable FE copy engine for MC/KA/DPI */
4882 - airoha_fe_wr(eth, REG_FE_PCE_CFG,
4883 - PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
4884 - /* set vip queue selection to ring 1 */
4885 - airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
4886 - FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
4887 - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
4888 - FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
4889 - /* set GDM4 source interface offset to 8 */
4890 - airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
4891 - GDM4_SPORT_OFF2_MASK |
4892 - GDM4_SPORT_OFF1_MASK |
4893 - GDM4_SPORT_OFF0_MASK,
4894 - FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
4895 - FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
4896 - FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
4897 -
4898 - /* set PSE Page as 128B */
4899 - airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
4900 - FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
4901 - FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
4902 - FE_DMA_GLO_PG_SZ_MASK);
4903 - airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
4904 - FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
4905 - FE_RST_GDM4_MBI_ARB_MASK);
4906 - usleep_range(1000, 2000);
4907 -
4908 - /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
4909 - * connect other rings to PSE Port0 OQ-0
4910 - */
4911 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
4912 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
4913 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
4914 - airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
4915 -
4916 - airoha_fe_vip_setup(eth);
4917 - airoha_fe_pse_ports_init(eth);
4918 -
4919 - airoha_fe_set(eth, REG_GDM_MISC_CFG,
4920 - GDM2_RDM_ACK_WAIT_PREF_MASK |
4921 - GDM2_CHN_VLD_MODE_MASK);
4922 - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
4923 - FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
4924 -
4925 - /* init fragment and assemble Force Port */
4926 - /* NPU Core-3, NPU Bridge Channel-3 */
4927 - airoha_fe_rmw(eth, REG_IP_FRAG_FP,
4928 - IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
4929 - FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
4930 - FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
4931 - /* QDMA LAN, RX Ring-22 */
4932 - airoha_fe_rmw(eth, REG_IP_FRAG_FP,
4933 - IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
4934 - FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
4935 - FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
4936 -
4937 - airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
4938 - airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
4939 -
4940 - airoha_fe_crsn_qsel_init(eth);
4941 -
4942 - airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
4943 - airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
4944 -
4945 - /* default aging mode for mbi unlock issue */
4946 - airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
4947 - MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
4948 - FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
4949 - FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
4950 -
4951 - /* disable IFC by default */
4952 - airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
4953 -
4954 - /* enable 1:N vlan action, init vlan table */
4955 - airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
4956 -
4957 - return airoha_fe_mc_vlan_clear(eth);
4958 -}
4959 -
4960 -static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
4961 -{
4962 - enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
4963 - struct airoha_qdma *qdma = q->qdma;
4964 - struct airoha_eth *eth = qdma->eth;
4965 - int qid = q - &qdma->q_rx[0];
4966 - int nframes = 0;
4967 -
4968 - while (q->queued < q->ndesc - 1) {
4969 - struct airoha_queue_entry *e = &q->entry[q->head];
4970 - struct airoha_qdma_desc *desc = &q->desc[q->head];
4971 - struct page *page;
4972 - int offset;
4973 - u32 val;
4974 -
4975 - page = page_pool_dev_alloc_frag(q->page_pool, &offset,
4976 - q->buf_size);
4977 - if (!page)
4978 - break;
4979 -
4980 - q->head = (q->head + 1) % q->ndesc;
4981 - q->queued++;
4982 - nframes++;
4983 -
4984 - e->buf = page_address(page) + offset;
4985 - e->dma_addr = page_pool_get_dma_addr(page) + offset;
4986 - e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
4987 -
4988 - dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
4989 - dir);
4990 -
4991 - val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
4992 - WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
4993 - WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
4994 - val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
4995 - WRITE_ONCE(desc->data, cpu_to_le32(val));
4996 - WRITE_ONCE(desc->msg0, 0);
4997 - WRITE_ONCE(desc->msg1, 0);
4998 - WRITE_ONCE(desc->msg2, 0);
4999 - WRITE_ONCE(desc->msg3, 0);
5000 -
5001 - airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
5002 - RX_RING_CPU_IDX_MASK,
5003 - FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
5004 - }
5005 -
5006 - return nframes;
5007 -}
5008 -
5009 -static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
5010 - struct airoha_qdma_desc *desc)
5011 -{
5012 - u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
5013 -
5014 - sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
5015 - switch (sport) {
5016 - case 0x10 ... 0x13:
5017 - port = 0;
5018 - break;
5019 - case 0x2 ... 0x4:
5020 - port = sport - 1;
5021 - break;
5022 - default:
5023 - return -EINVAL;
5024 - }
5025 -
5026 - return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
5027 -}
5028 -
5029 -static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
5030 -{
5031 - enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
5032 - struct airoha_qdma *qdma = q->qdma;
5033 - struct airoha_eth *eth = qdma->eth;
5034 - int qid = q - &qdma->q_rx[0];
5035 - int done = 0;
5036 -
5037 - while (done < budget) {
5038 - struct airoha_queue_entry *e = &q->entry[q->tail];
5039 - struct airoha_qdma_desc *desc = &q->desc[q->tail];
5040 - dma_addr_t dma_addr = le32_to_cpu(desc->addr);
5041 - u32 desc_ctrl = le32_to_cpu(desc->ctrl);
5042 - struct sk_buff *skb;
5043 - int len, p;
5044 -
5045 - if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
5046 - break;
5047 -
5048 - if (!dma_addr)
5049 - break;
5050 -
5051 - len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
5052 - if (!len)
5053 - break;
5054 -
5055 - q->tail = (q->tail + 1) % q->ndesc;
5056 - q->queued--;
5057 -
5058 - dma_sync_single_for_cpu(eth->dev, dma_addr,
5059 - SKB_WITH_OVERHEAD(q->buf_size), dir);
5060 -
5061 - p = airoha_qdma_get_gdm_port(eth, desc);
5062 - if (p < 0 || !eth->ports[p]) {
5063 - page_pool_put_full_page(q->page_pool,
5064 - virt_to_head_page(e->buf),
5065 - true);
5066 - continue;
5067 - }
5068 -
5069 - skb = napi_build_skb(e->buf, q->buf_size);
5070 - if (!skb) {
5071 - page_pool_put_full_page(q->page_pool,
5072 - virt_to_head_page(e->buf),
5073 - true);
5074 - break;
5075 - }
5076 -
5077 - skb_reserve(skb, 2);
5078 - __skb_put(skb, len);
5079 - skb_mark_for_recycle(skb);
5080 - skb->dev = eth->ports[p]->dev;
5081 - skb->protocol = eth_type_trans(skb, skb->dev);
5082 - skb->ip_summed = CHECKSUM_UNNECESSARY;
5083 - skb_record_rx_queue(skb, qid);
5084 - napi_gro_receive(&q->napi, skb);
5085 -
5086 - done++;
5087 - }
5088 - airoha_qdma_fill_rx_queue(q);
5089 -
5090 - return done;
5091 -}
5092 -
5093 -static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
5094 -{
5095 - struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
5096 - int cur, done = 0;
5097 -
5098 - do {
5099 - cur = airoha_qdma_rx_process(q, budget - done);
5100 - done += cur;
5101 - } while (cur && done < budget);
5102 -
5103 - if (done < budget && napi_complete(napi))
5104 - airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
5105 - RX_DONE_INT_MASK);
5106 -
5107 - return done;
5108 -}
5109 -
5110 -static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
5111 - struct airoha_qdma *qdma, int ndesc)
5112 -{
5113 - const struct page_pool_params pp_params = {
5114 - .order = 0,
5115 - .pool_size = 256,
5116 - .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
5117 - PP_FLAG_PAGE_FRAG,
5118 - .dma_dir = DMA_FROM_DEVICE,
5119 - .max_len = PAGE_SIZE,
5120 - .nid = NUMA_NO_NODE,
5121 - .dev = qdma->eth->dev,
5122 - .napi = &q->napi,
5123 - };
5124 - struct airoha_eth *eth = qdma->eth;
5125 - int qid = q - &qdma->q_rx[0], thr;
5126 - dma_addr_t dma_addr;
5127 -
5128 - q->buf_size = PAGE_SIZE / 2;
5129 - q->ndesc = ndesc;
5130 - q->qdma = qdma;
5131 -
5132 - q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
5133 - GFP_KERNEL);
5134 - if (!q->entry)
5135 - return -ENOMEM;
5136 -
5137 - q->page_pool = page_pool_create(&pp_params);
5138 - if (IS_ERR(q->page_pool)) {
5139 - int err = PTR_ERR(q->page_pool);
5140 -
5141 - q->page_pool = NULL;
5142 - return err;
5143 - }
5144 -
5145 - q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
5146 - &dma_addr, GFP_KERNEL);
5147 - if (!q->desc)
5148 - return -ENOMEM;
5149 -
5150 - netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
5151 -
5152 - airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
5153 - airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
5154 - RX_RING_SIZE_MASK,
5155 - FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
5156 -
5157 - thr = clamp(ndesc >> 3, 1, 32);
5158 - airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
5159 - FIELD_PREP(RX_RING_THR_MASK, thr));
5160 - airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
5161 - FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
5162 -
5163 - airoha_qdma_fill_rx_queue(q);
5164 -
5165 - return 0;
5166 -}
5167 -
5168 -static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
5169 -{
5170 - struct airoha_eth *eth = q->qdma->eth;
5171 -
5172 - while (q->queued) {
5173 - struct airoha_queue_entry *e = &q->entry[q->tail];
5174 - struct page *page = virt_to_head_page(e->buf);
5175 -
5176 - dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
5177 - page_pool_get_dma_dir(q->page_pool));
5178 - page_pool_put_full_page(q->page_pool, page, false);
5179 - q->tail = (q->tail + 1) % q->ndesc;
5180 - q->queued--;
5181 - }
5182 -}
5183 -
5184 -static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
5185 -{
5186 - int i;
5187 -
5188 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5189 - int err;
5190 -
5191 - if (!(RX_DONE_INT_MASK & BIT(i))) {
5192 - /* rx-queue not binded to irq */
5193 - continue;
5194 - }
5195 -
5196 - err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
5197 - RX_DSCP_NUM(i));
5198 - if (err)
5199 - return err;
5200 - }
5201 -
5202 - return 0;
5203 -}
5204 -
5205 -static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
5206 -{
5207 - struct airoha_tx_irq_queue *irq_q;
5208 - int id, done = 0, irq_queued;
5209 - struct airoha_qdma *qdma;
5210 - struct airoha_eth *eth;
5211 - u32 status, head;
5212 -
5213 - irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
5214 - qdma = irq_q->qdma;
5215 - id = irq_q - &qdma->q_tx_irq[0];
5216 - eth = qdma->eth;
5217 -
5218 - status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
5219 - head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
5220 - head = head % irq_q->size;
5221 - irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
5222 -
5223 - while (irq_queued > 0 && done < budget) {
5224 - u32 qid, val = irq_q->q[head];
5225 - struct airoha_qdma_desc *desc;
5226 - struct airoha_queue_entry *e;
5227 - struct airoha_queue *q;
5228 - u32 index, desc_ctrl;
5229 - struct sk_buff *skb;
5230 -
5231 - if (val == 0xff)
5232 - break;
5233 -
5234 - irq_q->q[head] = 0xff; /* mark as done */
5235 - head = (head + 1) % irq_q->size;
5236 - irq_queued--;
5237 - done++;
5238 -
5239 - qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
5240 - if (qid >= ARRAY_SIZE(qdma->q_tx))
5241 - continue;
5242 -
5243 - q = &qdma->q_tx[qid];
5244 - if (!q->ndesc)
5245 - continue;
5246 -
5247 - index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
5248 - if (index >= q->ndesc)
5249 - continue;
5250 -
5251 - spin_lock_bh(&q->lock);
5252 -
5253 - if (!q->queued)
5254 - goto unlock;
5255 -
5256 - desc = &q->desc[index];
5257 - desc_ctrl = le32_to_cpu(desc->ctrl);
5258 -
5259 - if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
5260 - !(desc_ctrl & QDMA_DESC_DROP_MASK))
5261 - goto unlock;
5262 -
5263 - e = &q->entry[index];
5264 - skb = e->skb;
5265 -
5266 - dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
5267 - DMA_TO_DEVICE);
5268 - memset(e, 0, sizeof(*e));
5269 - WRITE_ONCE(desc->msg0, 0);
5270 - WRITE_ONCE(desc->msg1, 0);
5271 - q->queued--;
5272 -
5273 - /* completion ring can report out-of-order indexes if hw QoS
5274 - * is enabled and packets with different priority are queued
5275 - * to same DMA ring. Take into account possible out-of-order
5276 - * reports incrementing DMA ring tail pointer
5277 - */
5278 - while (q->tail != q->head && !q->entry[q->tail].dma_addr)
5279 - q->tail = (q->tail + 1) % q->ndesc;
5280 -
5281 - if (skb) {
5282 - u16 queue = skb_get_queue_mapping(skb);
5283 - struct netdev_queue *txq;
5284 -
5285 - txq = netdev_get_tx_queue(skb->dev, queue);
5286 - netdev_tx_completed_queue(txq, 1, skb->len);
5287 - if (netif_tx_queue_stopped(txq) &&
5288 - q->ndesc - q->queued >= q->free_thr)
5289 - netif_tx_wake_queue(txq);
5290 -
5291 - dev_kfree_skb_any(skb);
5292 - }
5293 -unlock:
5294 - spin_unlock_bh(&q->lock);
5295 - }
5296 -
5297 - if (done) {
5298 - int i, len = done >> 7;
5299 -
5300 - for (i = 0; i < len; i++)
5301 - airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
5302 - IRQ_CLEAR_LEN_MASK, 0x80);
5303 - airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
5304 - IRQ_CLEAR_LEN_MASK, (done & 0x7f));
5305 - }
5306 -
5307 - if (done < budget && napi_complete(napi))
5308 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
5309 - TX_DONE_INT_MASK(id));
5310 -
5311 - return done;
5312 -}
5313 -
5314 -static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
5315 - struct airoha_qdma *qdma, int size)
5316 -{
5317 - struct airoha_eth *eth = qdma->eth;
5318 - int i, qid = q - &qdma->q_tx[0];
5319 - dma_addr_t dma_addr;
5320 -
5321 - spin_lock_init(&q->lock);
5322 - q->ndesc = size;
5323 - q->qdma = qdma;
5324 - q->free_thr = 1 + MAX_SKB_FRAGS;
5325 -
5326 - q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
5327 - GFP_KERNEL);
5328 - if (!q->entry)
5329 - return -ENOMEM;
5330 -
5331 - q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
5332 - &dma_addr, GFP_KERNEL);
5333 - if (!q->desc)
5334 - return -ENOMEM;
5335 -
5336 - for (i = 0; i < q->ndesc; i++) {
5337 - u32 val;
5338 -
5339 - val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
5340 - WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
5341 - }
5342 -
5343 - /* xmit ring drop default setting */
5344 - airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
5345 - TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
5346 -
5347 - airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
5348 - airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
5349 - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
5350 - airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
5351 - FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
5352 -
5353 - return 0;
5354 -}
5355 -
5356 -static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
5357 - struct airoha_qdma *qdma, int size)
5358 -{
5359 - int id = irq_q - &qdma->q_tx_irq[0];
5360 - struct airoha_eth *eth = qdma->eth;
5361 - dma_addr_t dma_addr;
5362 -
5363 - netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
5364 - airoha_qdma_tx_napi_poll);
5365 - irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
5366 - &dma_addr, GFP_KERNEL);
5367 - if (!irq_q->q)
5368 - return -ENOMEM;
5369 -
5370 - memset(irq_q->q, 0xff, size * sizeof(u32));
5371 - irq_q->size = size;
5372 - irq_q->qdma = qdma;
5373 -
5374 - airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
5375 - airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
5376 - FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
5377 - airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
5378 - FIELD_PREP(TX_IRQ_THR_MASK, 1));
5379 -
5380 - return 0;
5381 -}
5382 -
5383 -static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
5384 -{
5385 - int i, err;
5386 -
5387 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
5388 - err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
5389 - IRQ_QUEUE_LEN(i));
5390 - if (err)
5391 - return err;
5392 - }
5393 -
5394 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5395 - err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
5396 - TX_DSCP_NUM);
5397 - if (err)
5398 - return err;
5399 - }
5400 -
5401 - return 0;
5402 -}
5403 -
5404 -static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
5405 -{
5406 - struct airoha_eth *eth = q->qdma->eth;
5407 -
5408 - spin_lock_bh(&q->lock);
5409 - while (q->queued) {
5410 - struct airoha_queue_entry *e = &q->entry[q->tail];
5411 -
5412 - dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
5413 - DMA_TO_DEVICE);
5414 - dev_kfree_skb_any(e->skb);
5415 - e->skb = NULL;
5416 -
5417 - q->tail = (q->tail + 1) % q->ndesc;
5418 - q->queued--;
5419 - }
5420 - spin_unlock_bh(&q->lock);
5421 -}
5422 -
5423 -static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
5424 -{
5425 - struct airoha_eth *eth = qdma->eth;
5426 - dma_addr_t dma_addr;
5427 - u32 status;
5428 - int size;
5429 -
5430 - size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
5431 - qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
5432 - GFP_KERNEL);
5433 - if (!qdma->hfwd.desc)
5434 - return -ENOMEM;
5435 -
5436 - airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
5437 -
5438 - size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
5439 - qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
5440 - GFP_KERNEL);
5441 - if (!qdma->hfwd.q)
5442 - return -ENOMEM;
5443 -
5444 - airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
5445 -
5446 - airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
5447 - HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
5448 - FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
5449 - airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
5450 - FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
5451 - airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
5452 - LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
5453 - HW_FWD_DESC_NUM_MASK,
5454 - FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
5455 - LMGR_INIT_START);
5456 -
5457 - return read_poll_timeout(airoha_qdma_rr, status,
5458 - !(status & LMGR_INIT_START), USEC_PER_MSEC,
5459 - 30 * USEC_PER_MSEC, true, qdma,
5460 - REG_LMGR_INIT_CFG);
5461 -}
5462 -
5463 -static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
5464 -{
5465 - airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
5466 - airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
5467 -
5468 - airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
5469 - PSE_BUF_ESTIMATE_EN_MASK);
5470 -
5471 - airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
5472 - EGRESS_RATE_METER_EN_MASK |
5473 - EGRESS_RATE_METER_EQ_RATE_EN_MASK);
5474 - /* 2047us x 31 = 63.457ms */
5475 - airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
5476 - EGRESS_RATE_METER_WINDOW_SZ_MASK,
5477 - FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
5478 - airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
5479 - EGRESS_RATE_METER_TIMESLICE_MASK,
5480 - FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
5481 -
5482 - /* ratelimit init */
5483 - airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
5484 - /* fast-tick 25us */
5485 - airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
5486 - FIELD_PREP(GLB_FAST_TICK_MASK, 25));
5487 - airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
5488 - FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
5489 -
5490 - airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
5491 - airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
5492 - FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
5493 - airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
5494 - EGRESS_SLOW_TICK_RATIO_MASK,
5495 - FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
5496 -
5497 - airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
5498 - airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
5499 - INGRESS_TRTCM_MODE_MASK);
5500 - airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
5501 - FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
5502 - airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
5503 - INGRESS_SLOW_TICK_RATIO_MASK,
5504 - FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
5505 -
5506 - airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
5507 - airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
5508 - FIELD_PREP(SLA_FAST_TICK_MASK, 25));
5509 - airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
5510 - FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
5511 -}
5512 -
5513 -static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
5514 -{
5515 - int i;
5516 -
5517 - for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
5518 - /* Tx-cpu transferred count */
5519 - airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
5520 - airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
5521 - CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
5522 - CNTR_ALL_DSCP_RING_EN_MASK |
5523 - FIELD_PREP(CNTR_CHAN_MASK, i));
5524 - /* Tx-fwd transferred count */
5525 - airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
5526 - airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
5527 - CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
5528 - CNTR_ALL_DSCP_RING_EN_MASK |
5529 - FIELD_PREP(CNTR_SRC_MASK, 1) |
5530 - FIELD_PREP(CNTR_CHAN_MASK, i));
5531 - }
5532 -}
5533 -
5534 -static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
5535 -{
5536 - int i;
5537 -
5538 - /* clear pending irqs */
5539 - for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
5540 - airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
5541 -
5542 - /* setup irqs */
5543 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
5544 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
5545 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
5546 -
5547 - /* setup irq binding */
5548 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5549 - if (!qdma->q_tx[i].ndesc)
5550 - continue;
5551 -
5552 - if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
5553 - airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
5554 - TX_RING_IRQ_BLOCKING_CFG_MASK);
5555 - else
5556 - airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
5557 - TX_RING_IRQ_BLOCKING_CFG_MASK);
5558 - }
5559 -
5560 - airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
5561 - GLOBAL_CFG_RX_2B_OFFSET_MASK |
5562 - FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
5563 - GLOBAL_CFG_CPU_TXR_RR_MASK |
5564 - GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
5565 - GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
5566 - GLOBAL_CFG_MULTICAST_EN_MASK |
5567 - GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
5568 - GLOBAL_CFG_TX_WB_DONE_MASK |
5569 - FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
5570 -
5571 - airoha_qdma_init_qos(qdma);
5572 -
5573 - /* disable qdma rx delay interrupt */
5574 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5575 - if (!qdma->q_rx[i].ndesc)
5576 - continue;
5577 -
5578 - airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
5579 - RX_DELAY_INT_MASK);
5580 - }
5581 -
5582 - airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
5583 - TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
5584 - airoha_qdma_init_qos_stats(qdma);
5585 -
5586 - return 0;
5587 -}
5588 -
5589 -static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
5590 -{
5591 - struct airoha_qdma *qdma = dev_instance;
5592 - u32 intr[ARRAY_SIZE(qdma->irqmask)];
5593 - int i;
5594 -
5595 - for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
5596 - intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
5597 - intr[i] &= qdma->irqmask[i];
5598 - airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
5599 - }
5600 -
5601 - if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
5602 - return IRQ_NONE;
5603 -
5604 - if (intr[1] & RX_DONE_INT_MASK) {
5605 - airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
5606 - RX_DONE_INT_MASK);
5607 -
5608 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5609 - if (!qdma->q_rx[i].ndesc)
5610 - continue;
5611 -
5612 - if (intr[1] & BIT(i))
5613 - napi_schedule(&qdma->q_rx[i].napi);
5614 - }
5615 - }
5616 -
5617 - if (intr[0] & INT_TX_MASK) {
5618 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
5619 - if (!(intr[0] & TX_DONE_INT_MASK(i)))
5620 - continue;
5621 -
5622 - airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
5623 - TX_DONE_INT_MASK(i));
5624 - napi_schedule(&qdma->q_tx_irq[i].napi);
5625 - }
5626 - }
5627 -
5628 - return IRQ_HANDLED;
5629 -}
5630 -
5631 -static int airoha_qdma_init(struct platform_device *pdev,
5632 - struct airoha_eth *eth,
5633 - struct airoha_qdma *qdma)
5634 -{
5635 - int err, id = qdma - &eth->qdma[0];
5636 - const char *res;
5637 -
5638 - spin_lock_init(&qdma->irq_lock);
5639 - qdma->eth = eth;
5640 -
5641 - res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
5642 - if (!res)
5643 - return -ENOMEM;
5644 -
5645 - qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
5646 - if (IS_ERR(qdma->regs))
5647 - return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
5648 - "failed to iomap qdma%d regs\n", id);
5649 -
5650 - qdma->irq = platform_get_irq(pdev, 4 * id);
5651 - if (qdma->irq < 0)
5652 - return qdma->irq;
5653 -
5654 - err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
5655 - IRQF_SHARED, KBUILD_MODNAME, qdma);
5656 - if (err)
5657 - return err;
5658 -
5659 - err = airoha_qdma_init_rx(qdma);
5660 - if (err)
5661 - return err;
5662 -
5663 - err = airoha_qdma_init_tx(qdma);
5664 - if (err)
5665 - return err;
5666 -
5667 - err = airoha_qdma_init_hfwd_queues(qdma);
5668 - if (err)
5669 - return err;
5670 -
5671 - return airoha_qdma_hw_init(qdma);
5672 -}
5673 -
5674 -static int airoha_hw_init(struct platform_device *pdev,
5675 - struct airoha_eth *eth)
5676 -{
5677 - int err, i;
5678 -
5679 - /* disable xsi */
5680 - err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
5681 - eth->xsi_rsts);
5682 - if (err)
5683 - return err;
5684 -
5685 - err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
5686 - if (err)
5687 - return err;
5688 -
5689 - msleep(20);
5690 - err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
5691 - if (err)
5692 - return err;
5693 -
5694 - msleep(20);
5695 - err = airoha_fe_init(eth);
5696 - if (err)
5697 - return err;
5698 -
5699 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
5700 - err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
5701 - if (err)
5702 - return err;
5703 - }
5704 -
5705 - set_bit(DEV_STATE_INITIALIZED, &eth->state);
5706 -
5707 - return 0;
5708 -}
5709 -
5710 -static void airoha_hw_cleanup(struct airoha_qdma *qdma)
5711 -{
5712 - int i;
5713 -
5714 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5715 - if (!qdma->q_rx[i].ndesc)
5716 - continue;
5717 -
5718 - netif_napi_del(&qdma->q_rx[i].napi);
5719 - airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
5720 - if (qdma->q_rx[i].page_pool)
5721 - page_pool_destroy(qdma->q_rx[i].page_pool);
5722 - }
5723 -
5724 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
5725 - netif_napi_del(&qdma->q_tx_irq[i].napi);
5726 -
5727 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5728 - if (!qdma->q_tx[i].ndesc)
5729 - continue;
5730 -
5731 - airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
5732 - }
5733 -}
5734 -
5735 -static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
5736 -{
5737 - int i;
5738 -
5739 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
5740 - napi_enable(&qdma->q_tx_irq[i].napi);
5741 -
5742 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5743 - if (!qdma->q_rx[i].ndesc)
5744 - continue;
5745 -
5746 - napi_enable(&qdma->q_rx[i].napi);
5747 - }
5748 -}
5749 -
5750 -static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
5751 -{
5752 - int i;
5753 -
5754 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
5755 - napi_disable(&qdma->q_tx_irq[i].napi);
5756 -
5757 - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
5758 - if (!qdma->q_rx[i].ndesc)
5759 - continue;
5760 -
5761 - napi_disable(&qdma->q_rx[i].napi);
5762 - }
5763 -}
5764 -
5765 -static void airoha_update_hw_stats(struct airoha_gdm_port *port)
5766 -{
5767 - struct airoha_eth *eth = port->qdma->eth;
5768 - u32 val, i = 0;
5769 -
5770 - spin_lock(&port->stats.lock);
5771 - u64_stats_update_begin(&port->stats.syncp);
5772 -
5773 - /* TX */
5774 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
5775 - port->stats.tx_ok_pkts += ((u64)val << 32);
5776 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
5777 - port->stats.tx_ok_pkts += val;
5778 -
5779 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
5780 - port->stats.tx_ok_bytes += ((u64)val << 32);
5781 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
5782 - port->stats.tx_ok_bytes += val;
5783 -
5784 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
5785 - port->stats.tx_drops += val;
5786 -
5787 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
5788 - port->stats.tx_broadcast += val;
5789 -
5790 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
5791 - port->stats.tx_multicast += val;
5792 -
5793 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
5794 - port->stats.tx_len[i] += val;
5795 -
5796 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
5797 - port->stats.tx_len[i] += ((u64)val << 32);
5798 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
5799 - port->stats.tx_len[i++] += val;
5800 -
5801 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
5802 - port->stats.tx_len[i] += ((u64)val << 32);
5803 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
5804 - port->stats.tx_len[i++] += val;
5805 -
5806 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
5807 - port->stats.tx_len[i] += ((u64)val << 32);
5808 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
5809 - port->stats.tx_len[i++] += val;
5810 -
5811 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
5812 - port->stats.tx_len[i] += ((u64)val << 32);
5813 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
5814 - port->stats.tx_len[i++] += val;
5815 -
5816 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
5817 - port->stats.tx_len[i] += ((u64)val << 32);
5818 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
5819 - port->stats.tx_len[i++] += val;
5820 -
5821 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
5822 - port->stats.tx_len[i] += ((u64)val << 32);
5823 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
5824 - port->stats.tx_len[i++] += val;
5825 -
5826 - val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
5827 - port->stats.tx_len[i++] += val;
5828 -
5829 - /* RX */
5830 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
5831 - port->stats.rx_ok_pkts += ((u64)val << 32);
5832 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
5833 - port->stats.rx_ok_pkts += val;
5834 -
5835 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
5836 - port->stats.rx_ok_bytes += ((u64)val << 32);
5837 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
5838 - port->stats.rx_ok_bytes += val;
5839 -
5840 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
5841 - port->stats.rx_drops += val;
5842 -
5843 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
5844 - port->stats.rx_broadcast += val;
5845 -
5846 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
5847 - port->stats.rx_multicast += val;
5848 -
5849 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
5850 - port->stats.rx_errors += val;
5851 -
5852 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
5853 - port->stats.rx_crc_error += val;
5854 -
5855 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
5856 - port->stats.rx_over_errors += val;
5857 -
5858 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
5859 - port->stats.rx_fragment += val;
5860 -
5861 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
5862 - port->stats.rx_jabber += val;
5863 -
5864 - i = 0;
5865 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
5866 - port->stats.rx_len[i] += val;
5867 -
5868 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
5869 - port->stats.rx_len[i] += ((u64)val << 32);
5870 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
5871 - port->stats.rx_len[i++] += val;
5872 -
5873 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
5874 - port->stats.rx_len[i] += ((u64)val << 32);
5875 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
5876 - port->stats.rx_len[i++] += val;
5877 -
5878 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
5879 - port->stats.rx_len[i] += ((u64)val << 32);
5880 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
5881 - port->stats.rx_len[i++] += val;
5882 -
5883 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
5884 - port->stats.rx_len[i] += ((u64)val << 32);
5885 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
5886 - port->stats.rx_len[i++] += val;
5887 -
5888 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
5889 - port->stats.rx_len[i] += ((u64)val << 32);
5890 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
5891 - port->stats.rx_len[i++] += val;
5892 -
5893 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
5894 - port->stats.rx_len[i] += ((u64)val << 32);
5895 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
5896 - port->stats.rx_len[i++] += val;
5897 -
5898 - val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
5899 - port->stats.rx_len[i++] += val;
5900 -
5901 - /* reset mib counters */
5902 - airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
5903 - FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
5904 -
5905 - u64_stats_update_end(&port->stats.syncp);
5906 - spin_unlock(&port->stats.lock);
5907 -}
5908 -
5909 -static int airoha_dev_open(struct net_device *dev)
5910 -{
5911 - struct airoha_gdm_port *port = netdev_priv(dev);
5912 - struct airoha_qdma *qdma = port->qdma;
5913 - int err;
5914 -
5915 - netif_tx_start_all_queues(dev);
5916 - err = airoha_set_gdm_ports(qdma->eth, true);
5917 - if (err)
5918 - return err;
5919 -
5920 - if (netdev_uses_dsa(dev))
5921 - airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
5922 - GDM_STAG_EN_MASK);
5923 - else
5924 - airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
5925 - GDM_STAG_EN_MASK);
5926 -
5927 - airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
5928 - GLOBAL_CFG_TX_DMA_EN_MASK |
5929 - GLOBAL_CFG_RX_DMA_EN_MASK);
5930 -
5931 - return 0;
5932 -}
5933 -
5934 -static int airoha_dev_stop(struct net_device *dev)
5935 -{
5936 - struct airoha_gdm_port *port = netdev_priv(dev);
5937 - struct airoha_qdma *qdma = port->qdma;
5938 - int i, err;
5939 -
5940 - netif_tx_disable(dev);
5941 - err = airoha_set_gdm_ports(qdma->eth, false);
5942 - if (err)
5943 - return err;
5944 -
5945 - airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
5946 - GLOBAL_CFG_TX_DMA_EN_MASK |
5947 - GLOBAL_CFG_RX_DMA_EN_MASK);
5948 -
5949 - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
5950 - if (!qdma->q_tx[i].ndesc)
5951 - continue;
5952 -
5953 - airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
5954 - netdev_tx_reset_subqueue(dev, i);
5955 - }
5956 -
5957 - return 0;
5958 -}
5959 -
5960 -static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
5961 -{
5962 - struct airoha_gdm_port *port = netdev_priv(dev);
5963 - int err;
5964 -
5965 - err = eth_mac_addr(dev, p);
5966 - if (err)
5967 - return err;
5968 -
5969 - airoha_set_macaddr(port, dev->dev_addr);
5970 -
5971 - return 0;
5972 -}
5973 -
5974 -static int airoha_dev_init(struct net_device *dev)
5975 -{
5976 - struct airoha_gdm_port *port = netdev_priv(dev);
5977 -
5978 - airoha_set_macaddr(port, dev->dev_addr);
5979 -
5980 - return 0;
5981 -}
5982 -
5983 -static void airoha_dev_get_stats64(struct net_device *dev,
5984 - struct rtnl_link_stats64 *storage)
5985 -{
5986 - struct airoha_gdm_port *port = netdev_priv(dev);
5987 - unsigned int start;
5988 -
5989 - airoha_update_hw_stats(port);
5990 - do {
5991 - start = u64_stats_fetch_begin(&port->stats.syncp);
5992 - storage->rx_packets = port->stats.rx_ok_pkts;
5993 - storage->tx_packets = port->stats.tx_ok_pkts;
5994 - storage->rx_bytes = port->stats.rx_ok_bytes;
5995 - storage->tx_bytes = port->stats.tx_ok_bytes;
5996 - storage->multicast = port->stats.rx_multicast;
5997 - storage->rx_errors = port->stats.rx_errors;
5998 - storage->rx_dropped = port->stats.rx_drops;
5999 - storage->tx_dropped = port->stats.tx_drops;
6000 - storage->rx_crc_errors = port->stats.rx_crc_error;
6001 - storage->rx_over_errors = port->stats.rx_over_errors;
6002 - } while (u64_stats_fetch_retry(&port->stats.syncp, start));
6003 -}
6004 -
6005 -static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
6006 - struct net_device *sb_dev)
6007 -{
6008 - struct airoha_gdm_port *port = netdev_priv(dev);
6009 - int queue, channel;
6010 -
6011 - /* For dsa device select QoS channel according to the dsa user port
6012 - * index, rely on port id otherwise. Select QoS queue based on the
6013 - * skb priority.
6014 - */
6015 - channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
6016 - channel = channel % AIROHA_NUM_QOS_CHANNELS;
6017 - queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
6018 - queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
6019 -
6020 - return queue < dev->num_tx_queues ? queue : 0;
6021 -}
6022 -
6023 -static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
6024 - struct net_device *dev)
6025 -{
6026 - struct airoha_gdm_port *port = netdev_priv(dev);
6027 - u32 nr_frags = 1 + skb_shinfo(skb)->nr_frags;
6028 - u32 msg0, msg1, len = skb_headlen(skb);
6029 - struct airoha_qdma *qdma = port->qdma;
6030 - struct netdev_queue *txq;
6031 - struct airoha_queue *q;
6032 - void *data = skb->data;
6033 - int i, qid;
6034 - u16 index;
6035 - u8 fport;
6036 -
6037 - qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
6038 - msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
6039 - qid / AIROHA_NUM_QOS_QUEUES) |
6040 - FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
6041 - qid % AIROHA_NUM_QOS_QUEUES);
6042 - if (skb->ip_summed == CHECKSUM_PARTIAL)
6043 - msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
6044 - FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
6045 - FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
6046 -
6047 - /* TSO: fill MSS info in tcp checksum field */
6048 - if (skb_is_gso(skb)) {
6049 - if (skb_cow_head(skb, 0))
6050 - goto error;
6051 -
6052 - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
6053 - SKB_GSO_TCPV6)) {
6054 - __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
6055 -
6056 - tcp_hdr(skb)->check = (__force __sum16)csum;
6057 - msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
6058 - }
6059 - }
6060 -
6061 - fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
6062 - msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
6063 - FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
6064 -
6065 - q = &qdma->q_tx[qid];
6066 - if (WARN_ON_ONCE(!q->ndesc))
6067 - goto error;
6068 -
6069 - spin_lock_bh(&q->lock);
6070 -
6071 - txq = netdev_get_tx_queue(dev, qid);
6072 - if (q->queued + nr_frags > q->ndesc) {
6073 - /* not enough space in the queue */
6074 - netif_tx_stop_queue(txq);
6075 - spin_unlock_bh(&q->lock);
6076 - return NETDEV_TX_BUSY;
6077 - }
6078 -
6079 - index = q->head;
6080 - for (i = 0; i < nr_frags; i++) {
6081 - struct airoha_qdma_desc *desc = &q->desc[index];
6082 - struct airoha_queue_entry *e = &q->entry[index];
6083 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6084 - dma_addr_t addr;
6085 - u32 val;
6086 -
6087 - addr = dma_map_single(dev->dev.parent, data, len,
6088 - DMA_TO_DEVICE);
6089 - if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
6090 - goto error_unmap;
6091 -
6092 - index = (index + 1) % q->ndesc;
6093 -
6094 - val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
6095 - if (i < nr_frags - 1)
6096 - val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
6097 - WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
6098 - WRITE_ONCE(desc->addr, cpu_to_le32(addr));
6099 - val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
6100 - WRITE_ONCE(desc->data, cpu_to_le32(val));
6101 - WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
6102 - WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
6103 - WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
6104 -
6105 - e->skb = i ? NULL : skb;
6106 - e->dma_addr = addr;
6107 - e->dma_len = len;
6108 -
6109 - data = skb_frag_address(frag);
6110 - len = skb_frag_size(frag);
6111 - }
6112 -
6113 - q->head = index;
6114 - q->queued += i;
6115 -
6116 - skb_tx_timestamp(skb);
6117 - netdev_tx_sent_queue(txq, skb->len);
6118 -
6119 - if (netif_xmit_stopped(txq) || !netdev_xmit_more())
6120 - airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
6121 - TX_RING_CPU_IDX_MASK,
6122 - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
6123 -
6124 - if (q->ndesc - q->queued < q->free_thr)
6125 - netif_tx_stop_queue(txq);
6126 -
6127 - spin_unlock_bh(&q->lock);
6128 -
6129 - return NETDEV_TX_OK;
6130 -
6131 -error_unmap:
6132 - for (i--; i >= 0; i--) {
6133 - index = (q->head + i) % q->ndesc;
6134 - dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
6135 - q->entry[index].dma_len, DMA_TO_DEVICE);
6136 - }
6137 -
6138 - spin_unlock_bh(&q->lock);
6139 -error:
6140 - dev_kfree_skb_any(skb);
6141 - dev->stats.tx_dropped++;
6142 -
6143 - return NETDEV_TX_OK;
6144 -}
6145 -
6146 -static void airoha_ethtool_get_drvinfo(struct net_device *dev,
6147 - struct ethtool_drvinfo *info)
6148 -{
6149 - struct airoha_gdm_port *port = netdev_priv(dev);
6150 - struct airoha_eth *eth = port->qdma->eth;
6151 -
6152 - strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
6153 - strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
6154 -}
6155 -
6156 -static void airoha_ethtool_get_mac_stats(struct net_device *dev,
6157 - struct ethtool_eth_mac_stats *stats)
6158 -{
6159 - struct airoha_gdm_port *port = netdev_priv(dev);
6160 - unsigned int start;
6161 -
6162 - airoha_update_hw_stats(port);
6163 - do {
6164 - start = u64_stats_fetch_begin(&port->stats.syncp);
6165 - stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
6166 - stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
6167 - stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
6168 - } while (u64_stats_fetch_retry(&port->stats.syncp, start));
6169 -}
6170 -
6171 -static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
6172 - { 0, 64 },
6173 - { 65, 127 },
6174 - { 128, 255 },
6175 - { 256, 511 },
6176 - { 512, 1023 },
6177 - { 1024, 1518 },
6178 - { 1519, 10239 },
6179 - {},
6180 -};
6181 -
6182 -static void
6183 -airoha_ethtool_get_rmon_stats(struct net_device *dev,
6184 - struct ethtool_rmon_stats *stats,
6185 - const struct ethtool_rmon_hist_range **ranges)
6186 -{
6187 - struct airoha_gdm_port *port = netdev_priv(dev);
6188 - struct airoha_hw_stats *hw_stats = &port->stats;
6189 - unsigned int start;
6190 -
6191 - BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
6192 - ARRAY_SIZE(hw_stats->tx_len) + 1);
6193 - BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
6194 - ARRAY_SIZE(hw_stats->rx_len) + 1);
6195 -
6196 - *ranges = airoha_ethtool_rmon_ranges;
6197 - airoha_update_hw_stats(port);
6198 - do {
6199 - int i;
6200 -
6201 - start = u64_stats_fetch_begin(&port->stats.syncp);
6202 - stats->fragments = hw_stats->rx_fragment;
6203 - stats->jabbers = hw_stats->rx_jabber;
6204 - for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
6205 - i++) {
6206 - stats->hist[i] = hw_stats->rx_len[i];
6207 - stats->hist_tx[i] = hw_stats->tx_len[i];
6208 - }
6209 - } while (u64_stats_fetch_retry(&port->stats.syncp, start));
6210 -}
6211 -
6212 -static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
6213 - int channel, enum tx_sched_mode mode,
6214 - const u16 *weights, u8 n_weights)
6215 -{
6216 - int i;
6217 -
6218 - for (i = 0; i < AIROHA_NUM_TX_RING; i++)
6219 - airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
6220 - TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
6221 -
6222 - for (i = 0; i < n_weights; i++) {
6223 - u32 status;
6224 - int err;
6225 -
6226 - airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
6227 - TWRR_RW_CMD_MASK |
6228 - FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
6229 - FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
6230 - FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
6231 - err = read_poll_timeout(airoha_qdma_rr, status,
6232 - status & TWRR_RW_CMD_DONE,
6233 - USEC_PER_MSEC, 10 * USEC_PER_MSEC,
6234 - true, port->qdma,
6235 - REG_TXWRR_WEIGHT_CFG);
6236 - if (err)
6237 - return err;
6238 - }
6239 -
6240 - airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
6241 - CHAN_QOS_MODE_MASK(channel),
6242 - mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
6243 -
6244 - return 0;
6245 -}
6246 -
6247 -static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
6248 - int channel)
6249 -{
6250 - static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
6251 -
6252 - return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
6253 - ARRAY_SIZE(w));
6254 -}
6255 -
6256 -static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
6257 - int channel,
6258 - struct tc_ets_qopt_offload *opt)
6259 -{
6260 - struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
6261 - enum tx_sched_mode mode = TC_SCH_SP;
6262 - u16 w[AIROHA_NUM_QOS_QUEUES] = {};
6263 - int i, nstrict = 0, nwrr, qidx;
6264 -
6265 - if (p->bands > AIROHA_NUM_QOS_QUEUES)
6266 - return -EINVAL;
6267 -
6268 - for (i = 0; i < p->bands; i++) {
6269 - if (!p->quanta[i])
6270 - nstrict++;
6271 - }
6272 -
6273 - /* this configuration is not supported by the hw */
6274 - if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
6275 - return -EINVAL;
6276 -
6277 - /* EN7581 SoC supports fixed QoS band priority where WRR queues have
6278 - * lowest priorities with respect to SP ones.
6279 - * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
6280 - */
6281 - nwrr = p->bands - nstrict;
6282 - qidx = nstrict && nwrr ? nstrict : 0;
6283 - for (i = 1; i <= p->bands; i++) {
6284 - if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
6285 - return -EINVAL;
6286 -
6287 - qidx = i == nwrr ? 0 : qidx + 1;
6288 - }
6289 -
6290 - for (i = 0; i < nwrr; i++)
6291 - w[i] = p->weights[nstrict + i];
6292 -
6293 - if (!nstrict)
6294 - mode = TC_SCH_WRR8;
6295 - else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
6296 - mode = nstrict + 1;
6297 -
6298 - return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
6299 - ARRAY_SIZE(w));
6300 -}
6301 -
6302 -static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
6303 - int channel,
6304 - struct tc_ets_qopt_offload *opt)
6305 -{
6306 - u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
6307 - REG_CNTR_VAL(channel << 1));
6308 - u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
6309 - REG_CNTR_VAL((channel << 1) + 1));
6310 - u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
6311 - (fwd_tx_packets - port->fwd_tx_packets);
6312 - _bstats_update(opt->stats.bstats, 0, tx_packets);
6313 -
6314 - port->cpu_tx_packets = cpu_tx_packets;
6315 - port->fwd_tx_packets = fwd_tx_packets;
6316 -
6317 - return 0;
6318 -}
6319 -
6320 -static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
6321 - struct tc_ets_qopt_offload *opt)
6322 -{
6323 - int channel = TC_H_MAJ(opt->handle) >> 16;
6324 -
6325 - if (opt->parent == TC_H_ROOT)
6326 - return -EINVAL;
6327 -
6328 - switch (opt->command) {
6329 - case TC_ETS_REPLACE:
6330 - return airoha_qdma_set_tx_ets_sched(port, channel, opt);
6331 - case TC_ETS_DESTROY:
6332 - /* PRIO is default qdisc scheduler */
6333 - return airoha_qdma_set_tx_prio_sched(port, channel);
6334 - case TC_ETS_STATS:
6335 - return airoha_qdma_get_tx_ets_stats(port, channel, opt);
6336 - default:
6337 - return -EOPNOTSUPP;
6338 - }
6339 -}
6340 -
6341 -static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
6342 - u32 addr, enum trtcm_param_type param,
6343 - enum trtcm_mode_type mode,
6344 - u32 *val_low, u32 *val_high)
6345 -{
6346 - u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
6347 - u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
6348 - FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
6349 - FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
6350 - FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
6351 -
6352 - airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
6353 - if (read_poll_timeout(airoha_qdma_rr, val,
6354 - val & TRTCM_PARAM_RW_DONE_MASK,
6355 - USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
6356 - qdma, REG_TRTCM_CFG_PARAM(addr)))
6357 - return -ETIMEDOUT;
6358 -
6359 - *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
6360 - if (val_high)
6361 - *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
6362 -
6363 - return 0;
6364 -}
6365 -
6366 -static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
6367 - u32 addr, enum trtcm_param_type param,
6368 - enum trtcm_mode_type mode, u32 val)
6369 -{
6370 - u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
6371 - u32 config = TRTCM_PARAM_RW_MASK |
6372 - FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
6373 - FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
6374 - FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
6375 - FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
6376 -
6377 - airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
6378 - airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
6379 -
6380 - return read_poll_timeout(airoha_qdma_rr, val,
6381 - val & TRTCM_PARAM_RW_DONE_MASK,
6382 - USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
6383 - qdma, REG_TRTCM_CFG_PARAM(addr));
6384 -}
6385 -
6386 -static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
6387 - u32 addr, enum trtcm_mode_type mode,
6388 - bool enable, u32 enable_mask)
6389 -{
6390 - u32 val;
6391 -
6392 - if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
6393 - mode, &val, NULL))
6394 - return -EINVAL;
6395 -
6396 - val = enable ? val | enable_mask : val & ~enable_mask;
6397 -
6398 - return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
6399 - mode, val);
6400 -}
6401 -
6402 -static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
6403 - int channel, u32 addr,
6404 - enum trtcm_mode_type mode,
6405 - u32 rate_val, u32 bucket_size)
6406 -{
6407 - u32 val, config, tick, unit, rate, rate_frac;
6408 - int err;
6409 -
6410 - if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
6411 - mode, &config, NULL))
6412 - return -EINVAL;
6413 -
6414 - val = airoha_qdma_rr(qdma, addr);
6415 - tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
6416 - if (config & TRTCM_TICK_SEL)
6417 - tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
6418 - if (!tick)
6419 - return -EINVAL;
6420 -
6421 - unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
6422 - if (!unit)
6423 - return -EINVAL;
6424 -
6425 - rate = rate_val / unit;
6426 - rate_frac = rate_val % unit;
6427 - rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
6428 - rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
6429 - FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
6430 -
6431 - err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
6432 - TRTCM_TOKEN_RATE_MODE, mode, rate);
6433 - if (err)
6434 - return err;
6435 -
6436 - val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
6437 - val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
6438 -
6439 - return airoha_qdma_set_trtcm_param(qdma, channel, addr,
6440 - TRTCM_BUCKETSIZE_SHIFT_MODE,
6441 - mode, val);
6442 -}
6443 -
6444 -static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
6445 - int channel, u32 rate,
6446 - u32 bucket_size)
6447 -{
6448 - int i, err;
6449 -
6450 - for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
6451 - err = airoha_qdma_set_trtcm_config(port->qdma, channel,
6452 - REG_EGRESS_TRTCM_CFG, i,
6453 - !!rate, TRTCM_METER_MODE);
6454 - if (err)
6455 - return err;
6456 -
6457 - err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
6458 - REG_EGRESS_TRTCM_CFG,
6459 - i, rate, bucket_size);
6460 - if (err)
6461 - return err;
6462 - }
6463 -
6464 - return 0;
6465 -}
6466 -
6467 -static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
6468 - struct tc_htb_qopt_offload *opt)
6469 -{
6470 - u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
6471 - u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
6472 - struct net_device *dev = port->dev;
6473 - int num_tx_queues = dev->real_num_tx_queues;
6474 - int err;
6475 -
6476 - if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
6477 - NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
6478 - return -EINVAL;
6479 - }
6480 -
6481 - err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
6482 - if (err) {
6483 - NL_SET_ERR_MSG_MOD(opt->extack,
6484 - "failed configuring htb offload");
6485 - return err;
6486 - }
6487 -
6488 - if (opt->command == TC_HTB_NODE_MODIFY)
6489 - return 0;
6490 -
6491 - err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
6492 - if (err) {
6493 - airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
6494 - NL_SET_ERR_MSG_MOD(opt->extack,
6495 - "failed setting real_num_tx_queues");
6496 - return err;
6497 - }
6498 -
6499 - set_bit(channel, port->qos_sq_bmap);
6500 - opt->qid = AIROHA_NUM_TX_RING + channel;
6501 -
6502 - return 0;
6503 -}
6504 -
6505 -static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
6506 -{
6507 - struct net_device *dev = port->dev;
6508 -
6509 - netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
6510 - airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
6511 - clear_bit(queue, port->qos_sq_bmap);
6512 -}
6513 -
6514 -static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
6515 - struct tc_htb_qopt_offload *opt)
6516 -{
6517 - u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
6518 -
6519 - if (!test_bit(channel, port->qos_sq_bmap)) {
6520 - NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
6521 - return -EINVAL;
6522 - }
6523 -
6524 - airoha_tc_remove_htb_queue(port, channel);
6525 -
6526 - return 0;
6527 -}
6528 -
6529 -static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
6530 -{
6531 - int q;
6532 -
6533 - for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
6534 - airoha_tc_remove_htb_queue(port, q);
6535 -
6536 - return 0;
6537 -}
6538 -
6539 -static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
6540 - struct tc_htb_qopt_offload *opt)
6541 -{
6542 - u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
6543 -
6544 - if (!test_bit(channel, port->qos_sq_bmap)) {
6545 - NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
6546 - return -EINVAL;
6547 - }
6548 -
6549 - opt->qid = channel;
6550 -
6551 - return 0;
6552 -}
6553 -
6554 -static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
6555 - struct tc_htb_qopt_offload *opt)
6556 -{
6557 - switch (opt->command) {
6558 - case TC_HTB_CREATE:
6559 - break;
6560 - case TC_HTB_DESTROY:
6561 - return airoha_tc_htb_destroy(port);
6562 - case TC_HTB_NODE_MODIFY:
6563 - case TC_HTB_LEAF_ALLOC_QUEUE:
6564 - return airoha_tc_htb_alloc_leaf_queue(port, opt);
6565 - case TC_HTB_LEAF_DEL:
6566 - case TC_HTB_LEAF_DEL_LAST:
6567 - case TC_HTB_LEAF_DEL_LAST_FORCE:
6568 - return airoha_tc_htb_delete_leaf_queue(port, opt);
6569 - case TC_HTB_LEAF_QUERY_QUEUE:
6570 - return airoha_tc_get_htb_get_leaf_queue(port, opt);
6571 - default:
6572 - return -EOPNOTSUPP;
6573 - }
6574 -
6575 - return 0;
6576 -}
6577 -
6578 -static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
6579 - void *type_data)
6580 -{
6581 - struct airoha_gdm_port *port = netdev_priv(dev);
6582 -
6583 - switch (type) {
6584 - case TC_SETUP_QDISC_ETS:
6585 - return airoha_tc_setup_qdisc_ets(port, type_data);
6586 - case TC_SETUP_QDISC_HTB:
6587 - return airoha_tc_setup_qdisc_htb(port, type_data);
6588 - default:
6589 - return -EOPNOTSUPP;
6590 - }
6591 -}
6592 -
6593 -static const struct net_device_ops airoha_netdev_ops = {
6594 - .ndo_init = airoha_dev_init,
6595 - .ndo_open = airoha_dev_open,
6596 - .ndo_stop = airoha_dev_stop,
6597 - .ndo_select_queue = airoha_dev_select_queue,
6598 - .ndo_start_xmit = airoha_dev_xmit,
6599 - .ndo_get_stats64 = airoha_dev_get_stats64,
6600 - .ndo_set_mac_address = airoha_dev_set_macaddr,
6601 - .ndo_setup_tc = airoha_dev_tc_setup,
6602 -};
6603 -
6604 -static const struct ethtool_ops airoha_ethtool_ops = {
6605 - .get_drvinfo = airoha_ethtool_get_drvinfo,
6606 - .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
6607 - .get_rmon_stats = airoha_ethtool_get_rmon_stats,
6608 -};
6609 -
6610 -static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
6611 -{
6612 - const __be32 *id_ptr = of_get_property(np, "reg", NULL);
6613 - struct airoha_gdm_port *port;
6614 - struct airoha_qdma *qdma;
6615 - struct net_device *dev;
6616 - int err, index;
6617 - u32 id;
6618 -
6619 - if (!id_ptr) {
6620 - dev_err(eth->dev, "missing gdm port id\n");
6621 - return -EINVAL;
6622 - }
6623 -
6624 - id = be32_to_cpup(id_ptr);
6625 - index = id - 1;
6626 -
6627 - if (!id || id > ARRAY_SIZE(eth->ports)) {
6628 - dev_err(eth->dev, "invalid gdm port id: %d\n", id);
6629 - return -EINVAL;
6630 - }
6631 -
6632 - if (eth->ports[index]) {
6633 - dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
6634 - return -EINVAL;
6635 - }
6636 -
6637 - dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
6638 - AIROHA_NUM_NETDEV_TX_RINGS,
6639 - AIROHA_NUM_RX_RING);
6640 - if (!dev) {
6641 - dev_err(eth->dev, "alloc_etherdev failed\n");
6642 - return -ENOMEM;
6643 - }
6644 -
6645 - qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
6646 - dev->netdev_ops = &airoha_netdev_ops;
6647 - dev->ethtool_ops = &airoha_ethtool_ops;
6648 - dev->max_mtu = AIROHA_MAX_MTU;
6649 - dev->watchdog_timeo = 5 * HZ;
6650 - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6651 - NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
6652 - NETIF_F_SG | NETIF_F_TSO |
6653 - NETIF_F_HW_TC;
6654 - dev->features |= dev->hw_features;
6655 - dev->dev.of_node = np;
6656 - dev->irq = qdma->irq;
6657 - SET_NETDEV_DEV(dev, eth->dev);
6658 -
6659 - /* reserve hw queues for HTB offloading */
6660 - err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
6661 - if (err)
6662 - return err;
6663 -
6664 - err = of_get_ethdev_address(np, dev);
6665 - if (err) {
6666 - if (err == -EPROBE_DEFER)
6667 - return err;
6668 -
6669 - eth_hw_addr_random(dev);
6670 - dev_info(eth->dev, "generated random MAC address %pM\n",
6671 - dev->dev_addr);
6672 - }
6673 -
6674 - port = netdev_priv(dev);
6675 - u64_stats_init(&port->stats.syncp);
6676 - spin_lock_init(&port->stats.lock);
6677 - port->qdma = qdma;
6678 - port->dev = dev;
6679 - port->id = id;
6680 - eth->ports[index] = port;
6681 -
6682 - return register_netdev(dev);
6683 -}
6684 -
6685 -static int airoha_probe(struct platform_device *pdev)
6686 -{
6687 - struct device_node *np;
6688 - struct airoha_eth *eth;
6689 - int i, err;
6690 -
6691 - eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
6692 - if (!eth)
6693 - return -ENOMEM;
6694 -
6695 - eth->dev = &pdev->dev;
6696 -
6697 - err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
6698 - if (err) {
6699 - dev_err(eth->dev, "failed configuring DMA mask\n");
6700 - return err;
6701 - }
6702 -
6703 - eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
6704 - if (IS_ERR(eth->fe_regs))
6705 - return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
6706 - "failed to iomap fe regs\n");
6707 -
6708 - eth->rsts[0].id = "fe";
6709 - eth->rsts[1].id = "pdma";
6710 - eth->rsts[2].id = "qdma";
6711 - err = devm_reset_control_bulk_get_exclusive(eth->dev,
6712 - ARRAY_SIZE(eth->rsts),
6713 - eth->rsts);
6714 - if (err) {
6715 - dev_err(eth->dev, "failed to get bulk reset lines\n");
6716 - return err;
6717 - }
6718 -
6719 - eth->xsi_rsts[0].id = "xsi-mac";
6720 - eth->xsi_rsts[1].id = "hsi0-mac";
6721 - eth->xsi_rsts[2].id = "hsi1-mac";
6722 - eth->xsi_rsts[3].id = "hsi-mac";
6723 - eth->xsi_rsts[4].id = "xfp-mac";
6724 - err = devm_reset_control_bulk_get_exclusive(eth->dev,
6725 - ARRAY_SIZE(eth->xsi_rsts),
6726 - eth->xsi_rsts);
6727 - if (err) {
6728 - dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
6729 - return err;
6730 - }
6731 -
6732 - eth->napi_dev = alloc_netdev_dummy(0);
6733 - if (!eth->napi_dev)
6734 - return -ENOMEM;
6735 -
6736 - /* Enable threaded NAPI by default */
6737 - eth->napi_dev->threaded = true;
6738 - strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
6739 - platform_set_drvdata(pdev, eth);
6740 -
6741 - err = airoha_hw_init(pdev, eth);
6742 - if (err)
6743 - goto error_hw_cleanup;
6744 -
6745 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
6746 - airoha_qdma_start_napi(&eth->qdma[i]);
6747 -
6748 - for_each_child_of_node(pdev->dev.of_node, np) {
6749 - if (!of_device_is_compatible(np, "airoha,eth-mac"))
6750 - continue;
6751 -
6752 - if (!of_device_is_available(np))
6753 - continue;
6754 -
6755 - err = airoha_alloc_gdm_port(eth, np);
6756 - if (err) {
6757 - of_node_put(np);
6758 - goto error_napi_stop;
6759 - }
6760 - }
6761 -
6762 - return 0;
6763 -
6764 -error_napi_stop:
6765 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
6766 - airoha_qdma_stop_napi(&eth->qdma[i]);
6767 -error_hw_cleanup:
6768 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
6769 - airoha_hw_cleanup(&eth->qdma[i]);
6770 -
6771 - for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
6772 - struct airoha_gdm_port *port = eth->ports[i];
6773 -
6774 - if (port && port->dev->reg_state == NETREG_REGISTERED)
6775 - unregister_netdev(port->dev);
6776 - }
6777 - free_netdev(eth->napi_dev);
6778 - platform_set_drvdata(pdev, NULL);
6779 -
6780 - return err;
6781 -}
6782 -
6783 -static void airoha_remove(struct platform_device *pdev)
6784 -{
6785 - struct airoha_eth *eth = platform_get_drvdata(pdev);
6786 - int i;
6787 -
6788 - for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
6789 - airoha_qdma_stop_napi(&eth->qdma[i]);
6790 - airoha_hw_cleanup(&eth->qdma[i]);
6791 - }
6792 -
6793 - for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
6794 - struct airoha_gdm_port *port = eth->ports[i];
6795 -
6796 - if (!port)
6797 - continue;
6798 -
6799 - airoha_dev_stop(port->dev);
6800 - unregister_netdev(port->dev);
6801 - }
6802 - free_netdev(eth->napi_dev);
6803 -
6804 - platform_set_drvdata(pdev, NULL);
6805 -}
6806 -
6807 -static const struct of_device_id of_airoha_match[] = {
6808 - { .compatible = "airoha,en7581-eth" },
6809 - { /* sentinel */ }
6810 -};
6811 -MODULE_DEVICE_TABLE(of, of_airoha_match);
6812 -
6813 -static struct platform_driver airoha_driver = {
6814 - .probe = airoha_probe,
6815 - .remove_new = airoha_remove,
6816 - .driver = {
6817 - .name = KBUILD_MODNAME,
6818 - .of_match_table = of_airoha_match,
6819 - },
6820 -};
6821 -module_platform_driver(airoha_driver);
6822 -
6823 -MODULE_LICENSE("GPL");
6824 -MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
6825 -MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");