2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
18 #include <dm/device-internal.h>
25 #include <linux/errno.h>
29 #include <asm/arch/cpu.h>
30 #include <asm/arch/soc.h>
31 #include <linux/compat.h>
32 #include <linux/mbus.h>
33 #include <asm-generic/gpio.h>
34 #include <fdt_support.h>
36 DECLARE_GLOBAL_DATA_PTR
;
38 /* Some linux -> U-Boot compatibility stuff */
39 #define netdev_err(dev, fmt, args...) \
41 #define netdev_warn(dev, fmt, args...) \
43 #define netdev_info(dev, fmt, args...) \
45 #define netdev_dbg(dev, fmt, args...) \
48 #define ETH_ALEN 6 /* Octets in one ethernet addr */
50 #define __verify_pcpu_ptr(ptr) \
52 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
56 #define VERIFY_PERCPU_PTR(__p) \
58 __verify_pcpu_ptr(__p); \
59 (typeof(*(__p)) __kernel __force *)(__p); \
62 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
63 #define smp_processor_id() 0
64 #define num_present_cpus() 1
65 #define for_each_present_cpu(cpu) \
66 for ((cpu) = 0; (cpu) < 1; (cpu)++)
68 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
70 #define CONFIG_NR_CPUS 1
71 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */
73 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
74 #define WRAP (2 + ETH_HLEN + 4 + 32)
76 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
78 #define MVPP2_SMI_TIMEOUT 10000
80 /* RX Fifo Registers */
81 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
82 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
83 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
84 #define MVPP2_RX_FIFO_INIT_REG 0x64
86 /* RX DMA Top Registers */
87 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
88 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
89 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
90 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
91 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
92 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
93 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
94 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
95 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
96 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
97 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
98 #define MVPP2_RXQ_POOL_LONG_OFFS 24
99 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
100 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
101 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
102 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
103 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
105 /* Parser Registers */
106 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
107 #define MVPP2_PRS_PORT_LU_MAX 0xf
108 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
109 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
110 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
111 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
112 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
113 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
114 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
115 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
116 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
117 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
118 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
119 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
120 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
121 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
122 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
124 /* Classifier Registers */
125 #define MVPP2_CLS_MODE_REG 0x1800
126 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
127 #define MVPP2_CLS_PORT_WAY_REG 0x1810
128 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
129 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
130 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
131 #define MVPP2_CLS_LKP_TBL_REG 0x1818
132 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
133 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
134 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
135 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
136 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
137 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
138 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
139 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
140 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
141 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
142 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
143 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
145 /* Descriptor Manager Top Registers */
146 #define MVPP2_RXQ_NUM_REG 0x2040
147 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
148 #define MVPP22_DESC_ADDR_OFFS 8
149 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
150 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
151 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
152 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
153 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
154 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
155 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
156 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
157 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
158 #define MVPP2_RXQ_THRESH_REG 0x204c
159 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
160 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
161 #define MVPP2_RXQ_INDEX_REG 0x2050
162 #define MVPP2_TXQ_NUM_REG 0x2080
163 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
164 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
165 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
166 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
167 #define MVPP2_TXQ_THRESH_REG 0x2094
168 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16
169 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
170 #define MVPP2_TXQ_INDEX_REG 0x2098
171 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
172 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
173 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
174 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
175 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
176 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
177 #define MVPP2_TXQ_PENDING_REG 0x20a0
178 #define MVPP2_TXQ_PENDING_MASK 0x3fff
179 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
180 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
181 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
182 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
183 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
184 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
185 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
186 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
187 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
188 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
189 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
190 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
191 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
192 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
193 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
194 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
195 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
197 /* MBUS bridge registers */
198 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
199 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
200 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
201 #define MVPP2_BASE_ADDR_ENABLE 0x4060
203 /* AXI Bridge Registers */
204 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
205 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
206 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
207 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
208 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
209 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
210 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
211 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
212 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
213 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
214 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
215 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
217 /* Values for AXI Bridge registers */
218 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
219 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
221 #define MVPP22_AXI_CODE_CACHE_OFFS 0
222 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
224 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
225 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
226 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
228 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
229 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
231 /* Interrupt Cause and Mask registers */
232 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
233 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
235 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
236 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
237 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
238 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
240 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
241 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
243 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
244 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
245 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
246 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
248 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
249 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
250 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
251 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
252 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
253 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
254 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
255 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
256 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
257 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
258 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
259 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
260 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
261 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
262 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
263 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
264 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
265 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
267 /* Buffer Manager registers */
268 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
269 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
270 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
271 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
272 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
273 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
274 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
275 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
276 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
277 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
278 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
279 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
280 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
281 #define MVPP2_BM_START_MASK BIT(0)
282 #define MVPP2_BM_STOP_MASK BIT(1)
283 #define MVPP2_BM_STATE_MASK BIT(4)
284 #define MVPP2_BM_LOW_THRESH_OFFS 8
285 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
286 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
287 MVPP2_BM_LOW_THRESH_OFFS)
288 #define MVPP2_BM_HIGH_THRESH_OFFS 16
289 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
290 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
291 MVPP2_BM_HIGH_THRESH_OFFS)
292 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
293 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
294 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
295 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
296 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
297 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
298 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
299 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
300 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
301 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
302 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
303 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
304 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
305 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
306 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
307 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
308 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
309 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
310 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
311 #define MVPP21_BM_MC_RLS_REG 0x64c4
312 #define MVPP2_BM_MC_ID_MASK 0xfff
313 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
314 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
315 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
316 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
317 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
318 #define MVPP22_BM_MC_RLS_REG 0x64d4
319 #define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310
320 #define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff
322 /* TX Scheduler registers */
323 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
324 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
325 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
326 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
327 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
328 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
329 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
330 #define MVPP2_TXP_MTU_MAX 0x7FFFF
331 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
332 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
333 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
334 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
335 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
336 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
337 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
338 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
339 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
340 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
341 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
342 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
343 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
344 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
346 /* TX general registers */
347 #define MVPP2_TX_SNOOP_REG 0x8800
348 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
349 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
352 #define MVPP2_SRC_ADDR_MIDDLE 0x24
353 #define MVPP2_SRC_ADDR_HIGH 0x28
354 #define MVPP2_PHY_AN_CFG0_REG 0x34
355 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
356 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
357 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
359 /* Per-port registers */
360 #define MVPP2_GMAC_CTRL_0_REG 0x0
361 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
362 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
363 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
364 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
365 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
366 #define MVPP2_GMAC_CTRL_1_REG 0x4
367 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
368 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
369 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
370 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
371 #define MVPP2_GMAC_SA_LOW_OFFS 7
372 #define MVPP2_GMAC_CTRL_2_REG 0x8
373 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
374 #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0)
375 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
376 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
377 #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5)
378 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
379 #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9)
380 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
381 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
382 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
383 #define MVPP2_GMAC_EN_PCS_AN BIT(2)
384 #define MVPP2_GMAC_AN_BYPASS_EN BIT(3)
385 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
386 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
387 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
388 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
389 #define MVPP2_GMAC_EN_FC_AN BIT(11)
390 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
391 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
392 #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15)
393 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
394 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
395 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
396 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
397 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
398 #define MVPP2_GMAC_CTRL_4_REG 0x90
399 #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0)
400 #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5)
401 #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6)
402 #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7)
405 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
406 * relative to port->base.
409 /* Port Mac Control0 */
410 #define MVPP22_XLG_CTRL0_REG 0x100
411 #define MVPP22_XLG_PORT_EN BIT(0)
412 #define MVPP22_XLG_MAC_RESETN BIT(1)
413 #define MVPP22_XLG_RX_FC_EN BIT(7)
414 #define MVPP22_XLG_MIBCNT_DIS BIT(13)
415 /* Port Mac Control1 */
416 #define MVPP22_XLG_CTRL1_REG 0x104
417 #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0
418 #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff
419 /* Port Interrupt Mask */
420 #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118
421 #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1)
422 /* Port Mac Control3 */
423 #define MVPP22_XLG_CTRL3_REG 0x11c
424 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
425 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
426 #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13)
427 /* Port Mac Control4 */
428 #define MVPP22_XLG_CTRL4_REG 0x184
429 #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5)
430 #define MVPP22_XLG_FORWARD_PFC_EN BIT(6)
431 #define MVPP22_XLG_MODE_DMA_1G BIT(12)
432 #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14)
436 /* Global Configuration 0 */
437 #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0
438 #define MVPP22_XPCS_PCSRESET BIT(0)
439 #define MVPP22_XPCS_PCSMODE_OFFS 3
440 #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \
441 MVPP22_XPCS_PCSMODE_OFFS)
442 #define MVPP22_XPCS_LANEACTIVE_OFFS 5
443 #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \
444 MVPP22_XPCS_LANEACTIVE_OFFS)
448 #define PCS40G_COMMON_CONTROL 0x14
449 #define FORWARD_ERROR_CORRECTION_MASK BIT(10)
451 #define PCS_CLOCK_RESET 0x14c
452 #define TX_SD_CLK_RESET_MASK BIT(0)
453 #define RX_SD_CLK_RESET_MASK BIT(1)
454 #define MAC_CLK_RESET_MASK BIT(2)
455 #define CLK_DIVISION_RATIO_OFFS 4
456 #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS)
457 #define CLK_DIV_PHASE_SET_MASK BIT(11)
459 /* System Soft Reset 1 */
460 #define GOP_SOFT_RESET_1_REG 0x108
461 #define NETC_GOP_SOFT_RESET_OFFS 6
462 #define NETC_GOP_SOFT_RESET_MASK (0x1 << \
463 NETC_GOP_SOFT_RESET_OFFS)
465 /* Ports Control 0 */
466 #define NETCOMP_PORTS_CONTROL_0_REG 0x110
467 #define NETC_BUS_WIDTH_SELECT_OFFS 1
468 #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \
469 NETC_BUS_WIDTH_SELECT_OFFS)
470 #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29
471 #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \
472 NETC_GIG_RX_DATA_SAMPLE_OFFS)
473 #define NETC_CLK_DIV_PHASE_OFFS 31
474 #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS)
475 /* Ports Control 1 */
476 #define NETCOMP_PORTS_CONTROL_1_REG 0x114
477 #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p)
478 #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \
479 NETC_PORTS_ACTIVE_OFFSET(p))
480 #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p)
481 #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \
482 NETC_PORT_GIG_RF_RESET_OFFS(p))
483 #define NETCOMP_CONTROL_0_REG 0x120
484 #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0
485 #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \
486 NETC_GBE_PORT0_SGMII_MODE_OFFS)
487 #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1
488 #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \
489 NETC_GBE_PORT1_SGMII_MODE_OFFS)
490 #define NETC_GBE_PORT1_MII_MODE_OFFS 2
491 #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \
492 NETC_GBE_PORT1_MII_MODE_OFFS)
494 #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04)
495 #define MVPP22_SMI_POLLING_EN BIT(10)
497 #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \
500 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
502 /* Descriptor ring Macros */
503 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
504 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
506 /* SMI: 0xc0054 -> offset 0x54 to lms_base */
507 #define MVPP21_SMI 0x0054
508 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
509 #define MVPP22_SMI 0x1200
510 #define MVPP2_PHY_REG_MASK 0x1f
511 /* SMI register fields */
512 #define MVPP2_SMI_DATA_OFFS 0 /* Data */
513 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS)
514 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
515 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
516 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
517 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS)
518 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */
519 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */
521 #define MVPP2_PHY_ADDR_MASK 0x1f
522 #define MVPP2_PHY_REG_MASK 0x1f
524 /* Additional PPv2.2 offsets */
525 #define MVPP22_MPCS 0x007000
526 #define MVPP22_XPCS 0x007400
527 #define MVPP22_PORT_BASE 0x007e00
528 #define MVPP22_PORT_OFFSET 0x001000
529 #define MVPP22_RFU1 0x318000
531 /* Maximum number of ports */
532 #define MVPP22_GOP_MAC_NUM 4
534 /* Sets the field located at the specified in data */
535 #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41
536 #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5
537 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb
540 enum mv_netc_topology
{
541 MV_NETC_GE_MAC2_SGMII
= BIT(0),
542 MV_NETC_GE_MAC3_SGMII
= BIT(1),
543 MV_NETC_GE_MAC3_RGMII
= BIT(2),
548 MV_NETC_SECOND_PHASE
,
551 enum mv_netc_sgmii_xmi_mode
{
556 enum mv_netc_mii_mode
{
566 /* Various constants */
569 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
570 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
571 #define MVPP2_RX_COAL_PKTS 32
572 #define MVPP2_RX_COAL_USEC 100
574 /* The two bytes Marvell header. Either contains a special value used
575 * by Marvell switches when a specific hardware mode is enabled (not
576 * supported by this driver) or is filled automatically by zeroes on
577 * the RX side. Those two bytes being at the front of the Ethernet
578 * header, they allow to have the IP header aligned on a 4 bytes
579 * boundary automatically: the hardware skips those two bytes on its
582 #define MVPP2_MH_SIZE 2
583 #define MVPP2_ETH_TYPE_LEN 2
584 #define MVPP2_PPPOE_HDR_SIZE 8
585 #define MVPP2_VLAN_TAG_LEN 4
587 /* Lbtd 802.3 type */
588 #define MVPP2_IP_LBDT_TYPE 0xfffa
590 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32
591 #define MVPP2_TX_CSUM_MAX_SIZE 9800
593 /* Timeout constants */
594 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
595 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
597 #define MVPP2_TX_MTU_MAX 0x7ffff
599 /* Maximum number of T-CONTs of PON port */
600 #define MVPP2_MAX_TCONT 16
602 /* Maximum number of supported ports */
603 #define MVPP2_MAX_PORTS 4
605 /* Maximum number of TXQs used by single port */
606 #define MVPP2_MAX_TXQ 8
608 /* Default number of TXQs in use */
609 #define MVPP2_DEFAULT_TXQ 1
611 /* Dfault number of RXQs in use */
612 #define MVPP2_DEFAULT_RXQ 1
613 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
615 /* Max number of Rx descriptors */
616 #define MVPP2_MAX_RXD 16
618 /* Max number of Tx descriptors */
619 #define MVPP2_MAX_TXD 16
621 /* Amount of Tx descriptors that can be reserved at once by CPU */
622 #define MVPP2_CPU_DESC_CHUNK 16
624 /* Max number of Tx descriptors in each aggregated queue */
625 #define MVPP2_AGGR_TXQ_SIZE 16
627 /* Descriptor aligned size */
628 #define MVPP2_DESC_ALIGNED_SIZE 32
630 /* Descriptor alignment mask */
631 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
633 /* RX FIFO constants */
634 #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000
635 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80
636 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000
637 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000
638 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000
639 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200
640 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80
641 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40
642 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
644 /* TX general registers */
645 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2))
646 #define MVPP22_TX_FIFO_SIZE_MASK 0xf
648 /* TX FIFO constants */
649 #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa
650 #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3
652 /* RX buffer constants */
653 #define MVPP2_SKB_SHINFO_SIZE \
656 #define MVPP2_RX_PKT_SIZE(mtu) \
657 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
658 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
660 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
661 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
662 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
663 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
665 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
667 /* IPv6 max L3 address size */
668 #define MVPP2_MAX_L3_ADDR_SIZE 16
671 #define MVPP2_F_LOOPBACK BIT(0)
673 /* Marvell tag types */
674 enum mvpp2_tag_type
{
675 MVPP2_TAG_TYPE_NONE
= 0,
676 MVPP2_TAG_TYPE_MH
= 1,
677 MVPP2_TAG_TYPE_DSA
= 2,
678 MVPP2_TAG_TYPE_EDSA
= 3,
679 MVPP2_TAG_TYPE_VLAN
= 4,
680 MVPP2_TAG_TYPE_LAST
= 5
683 /* Parser constants */
684 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
685 #define MVPP2_PRS_TCAM_WORDS 6
686 #define MVPP2_PRS_SRAM_WORDS 4
687 #define MVPP2_PRS_FLOW_ID_SIZE 64
688 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
689 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
690 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
691 #define MVPP2_PRS_IPV4_HEAD 0x40
692 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
693 #define MVPP2_PRS_IPV4_MC 0xe0
694 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
695 #define MVPP2_PRS_IPV4_BC_MASK 0xff
696 #define MVPP2_PRS_IPV4_IHL 0x5
697 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
698 #define MVPP2_PRS_IPV6_MC 0xff
699 #define MVPP2_PRS_IPV6_MC_MASK 0xff
700 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
701 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
702 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
703 #define MVPP2_PRS_DBL_VLANS_MAX 100
706 * - lookup ID - 4 bits
708 * - additional information - 1 byte
709 * - header data - 8 bytes
710 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
712 #define MVPP2_PRS_AI_BITS 8
713 #define MVPP2_PRS_PORT_MASK 0xff
714 #define MVPP2_PRS_LU_MASK 0xf
715 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
716 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
717 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
718 (((offs) * 2) - ((offs) % 2) + 2)
719 #define MVPP2_PRS_TCAM_AI_BYTE 16
720 #define MVPP2_PRS_TCAM_PORT_BYTE 17
721 #define MVPP2_PRS_TCAM_LU_BYTE 20
722 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
723 #define MVPP2_PRS_TCAM_INV_WORD 5
724 /* Tcam entries ID */
725 #define MVPP2_PE_DROP_ALL 0
726 #define MVPP2_PE_FIRST_FREE_TID 1
727 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
728 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
729 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
730 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
731 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
732 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
733 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
734 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
735 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
736 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
737 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
738 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
739 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
740 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
741 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
742 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
743 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
744 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
745 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
746 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
747 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
748 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
749 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
750 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
751 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
754 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
756 #define MVPP2_PRS_SRAM_RI_OFFS 0
757 #define MVPP2_PRS_SRAM_RI_WORD 0
758 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
759 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
760 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
761 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
762 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
763 #define MVPP2_PRS_SRAM_UDF_OFFS 73
764 #define MVPP2_PRS_SRAM_UDF_BITS 8
765 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
766 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
767 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
768 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
769 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
770 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
771 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
772 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
773 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
774 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
775 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
776 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
777 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
778 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
779 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
780 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
781 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
782 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
783 #define MVPP2_PRS_SRAM_AI_OFFS 90
784 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
785 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
786 #define MVPP2_PRS_SRAM_AI_MASK 0xff
787 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
788 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
789 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
790 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
792 /* Sram result info bits assignment */
793 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
794 #define MVPP2_PRS_RI_DSA_MASK 0x2
795 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
796 #define MVPP2_PRS_RI_VLAN_NONE 0x0
797 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
798 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
799 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
800 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
801 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
802 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
803 #define MVPP2_PRS_RI_L2_UCAST 0x0
804 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
805 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
806 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
807 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
808 #define MVPP2_PRS_RI_L3_UN 0x0
809 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
810 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
811 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
812 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
813 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
814 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
815 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
816 #define MVPP2_PRS_RI_L3_UCAST 0x0
817 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
818 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
819 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
820 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
821 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
822 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
823 #define MVPP2_PRS_RI_L4_TCP BIT(22)
824 #define MVPP2_PRS_RI_L4_UDP BIT(23)
825 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
826 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
827 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
828 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
830 /* Sram additional info bits assignment */
831 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
832 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
833 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
834 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
835 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
836 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
837 #define MVPP2_PRS_SINGLE_VLAN_AI 0
838 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
841 #define MVPP2_PRS_TAGGED true
842 #define MVPP2_PRS_UNTAGGED false
843 #define MVPP2_PRS_EDSA true
844 #define MVPP2_PRS_DSA false
846 /* MAC entries, shadow udf */
848 MVPP2_PRS_UDF_MAC_DEF
,
849 MVPP2_PRS_UDF_MAC_RANGE
,
850 MVPP2_PRS_UDF_L2_DEF
,
851 MVPP2_PRS_UDF_L2_DEF_COPY
,
852 MVPP2_PRS_UDF_L2_USER
,
856 enum mvpp2_prs_lookup
{
870 enum mvpp2_prs_l3_cast
{
871 MVPP2_PRS_L3_UNI_CAST
,
872 MVPP2_PRS_L3_MULTI_CAST
,
873 MVPP2_PRS_L3_BROAD_CAST
876 /* Classifier constants */
877 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
878 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
879 #define MVPP2_CLS_LKP_TBL_SIZE 64
882 #define MVPP2_BM_POOLS_NUM 1
883 #define MVPP2_BM_LONG_BUF_NUM 16
884 #define MVPP2_BM_SHORT_BUF_NUM 16
885 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
886 #define MVPP2_BM_POOL_PTR_ALIGN 128
887 #define MVPP2_BM_SWF_LONG_POOL(port) 0
889 /* BM cookie (32 bits) definition */
890 #define MVPP2_BM_COOKIE_POOL_OFFS 8
891 #define MVPP2_BM_COOKIE_CPU_OFFS 24
893 /* BM short pool packet size
894 * These value assure that for SWF the total number
895 * of bytes allocated for each buffer will be 512
897 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
907 /* Shared Packet Processor resources */
909 /* Shared registers' base addresses */
911 void __iomem
*lms_base
;
912 void __iomem
*iface_base
;
913 void __iomem
*mdio_base
;
915 void __iomem
*mpcs_base
;
916 void __iomem
*xpcs_base
;
917 void __iomem
*rfu1_base
;
921 /* List of pointers to port structures */
922 struct mvpp2_port
**port_list
;
924 /* Aggregated TXQs */
925 struct mvpp2_tx_queue
*aggr_txqs
;
928 struct mvpp2_bm_pool
*bm_pools
;
930 /* PRS shadow table */
931 struct mvpp2_prs_shadow
*prs_shadow
;
932 /* PRS auxiliary table for double vlan entries control */
933 bool *prs_double_vlans
;
939 enum { MVPP21
, MVPP22
} hw_version
;
941 /* Maximum number of RXQs per port */
942 unsigned int max_port_rxqs
;
950 struct mvpp2_pcpu_stats
{
960 /* Index of the port from the "group of ports" complex point
969 /* Per-port registers' base address */
972 struct mvpp2_rx_queue
**rxqs
;
973 struct mvpp2_tx_queue
**txqs
;
977 u32 pending_cause_rx
;
979 /* Per-CPU port control */
980 struct mvpp2_port_pcpu __percpu
*pcpu
;
987 struct mvpp2_pcpu_stats __percpu
*stats
;
989 struct phy_device
*phy_dev
;
990 phy_interface_t phy_interface
;
993 #ifdef CONFIG_DM_GPIO
994 struct gpio_desc phy_reset_gpio
;
995 struct gpio_desc phy_tx_disable_gpio
;
1002 unsigned int phy_speed
; /* SGMII 1Gbps vs 2.5Gbps */
1004 struct mvpp2_bm_pool
*pool_long
;
1005 struct mvpp2_bm_pool
*pool_short
;
1007 /* Index of first port's physical RXQ */
1010 u8 dev_addr
[ETH_ALEN
];
1013 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1014 * layout of the transmit and reception DMA descriptors, and their
1015 * layout is therefore defined by the hardware design
1018 #define MVPP2_TXD_L3_OFF_SHIFT 0
1019 #define MVPP2_TXD_IP_HLEN_SHIFT 8
1020 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
1021 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
1022 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
1023 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
1024 #define MVPP2_TXD_L4_UDP BIT(24)
1025 #define MVPP2_TXD_L3_IP6 BIT(26)
1026 #define MVPP2_TXD_L_DESC BIT(28)
1027 #define MVPP2_TXD_F_DESC BIT(29)
1029 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
1030 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1031 #define MVPP2_RXD_ERR_CRC 0x0
1032 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
1033 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1034 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
1035 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1036 #define MVPP2_RXD_HWF_SYNC BIT(21)
1037 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
1038 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1039 #define MVPP2_RXD_L4_TCP BIT(25)
1040 #define MVPP2_RXD_L4_UDP BIT(26)
1041 #define MVPP2_RXD_L3_IP4 BIT(28)
1042 #define MVPP2_RXD_L3_IP6 BIT(30)
1043 #define MVPP2_RXD_BUF_HDR BIT(31)
1045 /* HW TX descriptor for PPv2.1 */
1046 struct mvpp21_tx_desc
{
1047 u32 command
; /* Options used by HW for packet transmitting.*/
1048 u8 packet_offset
; /* the offset from the buffer beginning */
1049 u8 phys_txq
; /* destination queue ID */
1050 u16 data_size
; /* data size of transmitted packet in bytes */
1051 u32 buf_dma_addr
; /* physical addr of transmitted buffer */
1052 u32 buf_cookie
; /* cookie for access to TX buffer in tx path */
1053 u32 reserved1
[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1054 u32 reserved2
; /* reserved (for future use) */
1057 /* HW RX descriptor for PPv2.1 */
1058 struct mvpp21_rx_desc
{
1059 u32 status
; /* info about received packet */
1060 u16 reserved1
; /* parser_info (for future use, PnC) */
1061 u16 data_size
; /* size of received packet in bytes */
1062 u32 buf_dma_addr
; /* physical address of the buffer */
1063 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
1064 u16 reserved2
; /* gem_port_id (for future use, PON) */
1065 u16 reserved3
; /* csum_l4 (for future use, PnC) */
1066 u8 reserved4
; /* bm_qset (for future use, BM) */
1068 u16 reserved6
; /* classify_info (for future use, PnC) */
1069 u32 reserved7
; /* flow_id (for future use, PnC) */
1073 /* HW TX descriptor for PPv2.2 */
1074 struct mvpp22_tx_desc
{
1080 u64 buf_dma_addr_ptp
;
1081 u64 buf_cookie_misc
;
1084 /* HW RX descriptor for PPv2.2 */
1085 struct mvpp22_rx_desc
{
1091 u64 buf_dma_addr_key_hash
;
1092 u64 buf_cookie_misc
;
1095 /* Opaque type used by the driver to manipulate the HW TX and RX
1098 struct mvpp2_tx_desc
{
1100 struct mvpp21_tx_desc pp21
;
1101 struct mvpp22_tx_desc pp22
;
1105 struct mvpp2_rx_desc
{
1107 struct mvpp21_rx_desc pp21
;
1108 struct mvpp22_rx_desc pp22
;
1112 /* Per-CPU Tx queue control */
1113 struct mvpp2_txq_pcpu
{
1116 /* Number of Tx DMA descriptors in the descriptor ring */
1119 /* Number of currently used Tx DMA descriptor in the
1124 /* Number of Tx DMA descriptors reserved for each CPU */
1127 /* Index of last TX DMA descriptor that was inserted */
1130 /* Index of the TX DMA descriptor to be cleaned up */
1134 struct mvpp2_tx_queue
{
1135 /* Physical number of this Tx queue */
1138 /* Logical number of this Tx queue */
1141 /* Number of Tx DMA descriptors in the descriptor ring */
1144 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1147 /* Per-CPU control of physical Tx queues */
1148 struct mvpp2_txq_pcpu __percpu
*pcpu
;
1152 /* Virtual address of thex Tx DMA descriptors array */
1153 struct mvpp2_tx_desc
*descs
;
1155 /* DMA address of the Tx DMA descriptors array */
1156 dma_addr_t descs_dma
;
1158 /* Index of the last Tx DMA descriptor */
1161 /* Index of the next Tx DMA descriptor to process */
1162 int next_desc_to_proc
;
1165 struct mvpp2_rx_queue
{
1166 /* RX queue number, in the range 0-31 for physical RXQs */
1169 /* Num of rx descriptors in the rx descriptor ring */
1175 /* Virtual address of the RX DMA descriptors array */
1176 struct mvpp2_rx_desc
*descs
;
1178 /* DMA address of the RX DMA descriptors array */
1179 dma_addr_t descs_dma
;
1181 /* Index of the last RX DMA descriptor */
1184 /* Index of the next RX DMA descriptor to process */
1185 int next_desc_to_proc
;
1187 /* ID of port to which physical RXQ is mapped */
1190 /* Port's logic RXQ number to which physical RXQ is mapped */
1194 union mvpp2_prs_tcam_entry
{
1195 u32 word
[MVPP2_PRS_TCAM_WORDS
];
1196 u8 byte
[MVPP2_PRS_TCAM_WORDS
* 4];
1199 union mvpp2_prs_sram_entry
{
1200 u32 word
[MVPP2_PRS_SRAM_WORDS
];
1201 u8 byte
[MVPP2_PRS_SRAM_WORDS
* 4];
1204 struct mvpp2_prs_entry
{
1206 union mvpp2_prs_tcam_entry tcam
;
1207 union mvpp2_prs_sram_entry sram
;
1210 struct mvpp2_prs_shadow
{
1217 /* User defined offset */
1225 struct mvpp2_cls_flow_entry
{
1227 u32 data
[MVPP2_CLS_FLOWS_TBL_DATA_WORDS
];
1230 struct mvpp2_cls_lookup_entry
{
1236 struct mvpp2_bm_pool
{
1237 /* Pool number in the range 0-7 */
1239 enum mvpp2_bm_type type
;
1241 /* Buffer Pointers Pool External (BPPE) size */
1243 /* Number of buffers for this pool */
1245 /* Pool buffer size */
1250 /* BPPE virtual base address */
1251 unsigned long *virt_addr
;
1252 /* BPPE DMA base address */
1253 dma_addr_t dma_addr
;
1255 /* Ports using BM pool */
1259 /* Static declaractions */
1261 /* Number of RXQs used by single port */
1262 static int rxq_number
= MVPP2_DEFAULT_RXQ
;
1263 /* Number of TXQs used by single port */
1264 static int txq_number
= MVPP2_DEFAULT_TXQ
;
1268 #define MVPP2_DRIVER_NAME "mvpp2"
1269 #define MVPP2_DRIVER_VERSION "1.0"
1272 * U-Boot internal data, mostly uncached buffers for descriptors and data
1274 struct buffer_location
{
1275 struct mvpp2_tx_desc
*aggr_tx_descs
;
1276 struct mvpp2_tx_desc
*tx_descs
;
1277 struct mvpp2_rx_desc
*rx_descs
;
1278 unsigned long *bm_pool
[MVPP2_BM_POOLS_NUM
];
1279 unsigned long *rx_buffer
[MVPP2_BM_LONG_BUF_NUM
];
1284 * All 4 interfaces use the same global buffer, since only one interface
1285 * can be enabled at once
1287 static struct buffer_location buffer_loc
;
1290 * Page table entries are set to 1MB, or multiples of 1MB
1291 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1293 #define BD_SPACE (1 << 20)
1295 /* Utility/helper methods */
1297 static void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
1299 writel(data
, priv
->base
+ offset
);
1302 static u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
1304 return readl(priv
->base
+ offset
);
1307 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port
*port
,
1308 struct mvpp2_tx_desc
*tx_desc
,
1309 dma_addr_t dma_addr
)
1311 if (port
->priv
->hw_version
== MVPP21
) {
1312 tx_desc
->pp21
.buf_dma_addr
= dma_addr
;
1314 u64 val
= (u64
)dma_addr
;
1316 tx_desc
->pp22
.buf_dma_addr_ptp
&= ~GENMASK_ULL(40, 0);
1317 tx_desc
->pp22
.buf_dma_addr_ptp
|= val
;
1321 static void mvpp2_txdesc_size_set(struct mvpp2_port
*port
,
1322 struct mvpp2_tx_desc
*tx_desc
,
1325 if (port
->priv
->hw_version
== MVPP21
)
1326 tx_desc
->pp21
.data_size
= size
;
1328 tx_desc
->pp22
.data_size
= size
;
1331 static void mvpp2_txdesc_txq_set(struct mvpp2_port
*port
,
1332 struct mvpp2_tx_desc
*tx_desc
,
1335 if (port
->priv
->hw_version
== MVPP21
)
1336 tx_desc
->pp21
.phys_txq
= txq
;
1338 tx_desc
->pp22
.phys_txq
= txq
;
1341 static void mvpp2_txdesc_cmd_set(struct mvpp2_port
*port
,
1342 struct mvpp2_tx_desc
*tx_desc
,
1343 unsigned int command
)
1345 if (port
->priv
->hw_version
== MVPP21
)
1346 tx_desc
->pp21
.command
= command
;
1348 tx_desc
->pp22
.command
= command
;
1351 static void mvpp2_txdesc_offset_set(struct mvpp2_port
*port
,
1352 struct mvpp2_tx_desc
*tx_desc
,
1353 unsigned int offset
)
1355 if (port
->priv
->hw_version
== MVPP21
)
1356 tx_desc
->pp21
.packet_offset
= offset
;
1358 tx_desc
->pp22
.packet_offset
= offset
;
1361 static dma_addr_t
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port
*port
,
1362 struct mvpp2_rx_desc
*rx_desc
)
1364 if (port
->priv
->hw_version
== MVPP21
)
1365 return rx_desc
->pp21
.buf_dma_addr
;
1367 return rx_desc
->pp22
.buf_dma_addr_key_hash
& GENMASK_ULL(40, 0);
1370 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port
*port
,
1371 struct mvpp2_rx_desc
*rx_desc
)
1373 if (port
->priv
->hw_version
== MVPP21
)
1374 return rx_desc
->pp21
.buf_cookie
;
1376 return rx_desc
->pp22
.buf_cookie_misc
& GENMASK_ULL(40, 0);
1379 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port
*port
,
1380 struct mvpp2_rx_desc
*rx_desc
)
1382 if (port
->priv
->hw_version
== MVPP21
)
1383 return rx_desc
->pp21
.data_size
;
1385 return rx_desc
->pp22
.data_size
;
1388 static u32
mvpp2_rxdesc_status_get(struct mvpp2_port
*port
,
1389 struct mvpp2_rx_desc
*rx_desc
)
1391 if (port
->priv
->hw_version
== MVPP21
)
1392 return rx_desc
->pp21
.status
;
1394 return rx_desc
->pp22
.status
;
1397 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
1399 txq_pcpu
->txq_get_index
++;
1400 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
1401 txq_pcpu
->txq_get_index
= 0;
1404 /* Get number of physical egress port */
1405 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
1407 return MVPP2_MAX_TCONT
+ port
->id
;
1410 /* Get number of physical TXQ */
1411 static inline int mvpp2_txq_phys(int port
, int txq
)
1413 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
1416 /* Parser configuration routines */
1418 /* Update parser tcam and sram hw entries */
1419 static int mvpp2_prs_hw_write(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1423 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1426 /* Clear entry invalidation bit */
1427 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] &= ~MVPP2_PRS_TCAM_INV_MASK
;
1429 /* Write tcam index - indirect access */
1430 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1431 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1432 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), pe
->tcam
.word
[i
]);
1434 /* Write sram index - indirect access */
1435 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1436 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1437 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), pe
->sram
.word
[i
]);
1442 /* Read tcam entry from hw */
1443 static int mvpp2_prs_hw_read(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
1447 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
1450 /* Write tcam index - indirect access */
1451 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
1453 pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] = mvpp2_read(priv
,
1454 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
));
1455 if (pe
->tcam
.word
[MVPP2_PRS_TCAM_INV_WORD
] & MVPP2_PRS_TCAM_INV_MASK
)
1456 return MVPP2_PRS_TCAM_ENTRY_INVALID
;
1458 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
1459 pe
->tcam
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_TCAM_DATA_REG(i
));
1461 /* Write sram index - indirect access */
1462 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
1463 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
1464 pe
->sram
.word
[i
] = mvpp2_read(priv
, MVPP2_PRS_SRAM_DATA_REG(i
));
1469 /* Invalidate tcam hw entry */
1470 static void mvpp2_prs_hw_inv(struct mvpp2
*priv
, int index
)
1472 /* Write index - indirect access */
1473 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
1474 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
),
1475 MVPP2_PRS_TCAM_INV_MASK
);
1478 /* Enable shadow table entry and set its lookup ID */
1479 static void mvpp2_prs_shadow_set(struct mvpp2
*priv
, int index
, int lu
)
1481 priv
->prs_shadow
[index
].valid
= true;
1482 priv
->prs_shadow
[index
].lu
= lu
;
1485 /* Update ri fields in shadow table entry */
1486 static void mvpp2_prs_shadow_ri_set(struct mvpp2
*priv
, int index
,
1487 unsigned int ri
, unsigned int ri_mask
)
1489 priv
->prs_shadow
[index
].ri_mask
= ri_mask
;
1490 priv
->prs_shadow
[index
].ri
= ri
;
1493 /* Update lookup field in tcam sw entry */
1494 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry
*pe
, unsigned int lu
)
1496 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE
);
1498 pe
->tcam
.byte
[MVPP2_PRS_TCAM_LU_BYTE
] = lu
;
1499 pe
->tcam
.byte
[enable_off
] = MVPP2_PRS_LU_MASK
;
1502 /* Update mask for single port in tcam sw entry */
1503 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry
*pe
,
1504 unsigned int port
, bool add
)
1506 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1509 pe
->tcam
.byte
[enable_off
] &= ~(1 << port
);
1511 pe
->tcam
.byte
[enable_off
] |= 1 << port
;
1514 /* Update port map in tcam sw entry */
1515 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry
*pe
,
1518 unsigned char port_mask
= MVPP2_PRS_PORT_MASK
;
1519 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1521 pe
->tcam
.byte
[MVPP2_PRS_TCAM_PORT_BYTE
] = 0;
1522 pe
->tcam
.byte
[enable_off
] &= ~port_mask
;
1523 pe
->tcam
.byte
[enable_off
] |= ~ports
& MVPP2_PRS_PORT_MASK
;
1526 /* Obtain port map from tcam sw entry */
1527 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry
*pe
)
1529 int enable_off
= MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE
);
1531 return ~(pe
->tcam
.byte
[enable_off
]) & MVPP2_PRS_PORT_MASK
;
1534 /* Set byte of data and its enable bits in tcam sw entry */
1535 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry
*pe
,
1536 unsigned int offs
, unsigned char byte
,
1537 unsigned char enable
)
1539 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)] = byte
;
1540 pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)] = enable
;
1543 /* Get byte of data and its enable bits from tcam sw entry */
1544 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry
*pe
,
1545 unsigned int offs
, unsigned char *byte
,
1546 unsigned char *enable
)
1548 *byte
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(offs
)];
1549 *enable
= pe
->tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs
)];
1552 /* Set ethertype in tcam sw entry */
1553 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry
*pe
, int offset
,
1554 unsigned short ethertype
)
1556 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, ethertype
>> 8, 0xff);
1557 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, ethertype
& 0xff, 0xff);
1560 /* Set bits in sram sw entry */
1561 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry
*pe
, int bit_num
,
1564 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] |= (val
<< (bit_num
% 8));
1567 /* Clear bits in sram sw entry */
1568 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry
*pe
, int bit_num
,
1571 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(bit_num
)] &= ~(val
<< (bit_num
% 8));
1574 /* Update ri bits in sram sw entry */
1575 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry
*pe
,
1576 unsigned int bits
, unsigned int mask
)
1580 for (i
= 0; i
< MVPP2_PRS_SRAM_RI_CTRL_BITS
; i
++) {
1581 int ri_off
= MVPP2_PRS_SRAM_RI_OFFS
;
1583 if (!(mask
& BIT(i
)))
1587 mvpp2_prs_sram_bits_set(pe
, ri_off
+ i
, 1);
1589 mvpp2_prs_sram_bits_clear(pe
, ri_off
+ i
, 1);
1591 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_CTRL_OFFS
+ i
, 1);
1595 /* Update ai bits in sram sw entry */
1596 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry
*pe
,
1597 unsigned int bits
, unsigned int mask
)
1600 int ai_off
= MVPP2_PRS_SRAM_AI_OFFS
;
1602 for (i
= 0; i
< MVPP2_PRS_SRAM_AI_CTRL_BITS
; i
++) {
1604 if (!(mask
& BIT(i
)))
1608 mvpp2_prs_sram_bits_set(pe
, ai_off
+ i
, 1);
1610 mvpp2_prs_sram_bits_clear(pe
, ai_off
+ i
, 1);
1612 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_CTRL_OFFS
+ i
, 1);
1616 /* Read ai bits from sram sw entry */
1617 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry
*pe
)
1620 int ai_off
= MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS
);
1621 int ai_en_off
= ai_off
+ 1;
1622 int ai_shift
= MVPP2_PRS_SRAM_AI_OFFS
% 8;
1624 bits
= (pe
->sram
.byte
[ai_off
] >> ai_shift
) |
1625 (pe
->sram
.byte
[ai_en_off
] << (8 - ai_shift
));
1630 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1633 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry
*pe
,
1636 int sram_next_off
= MVPP2_PRS_SRAM_NEXT_LU_OFFS
;
1638 mvpp2_prs_sram_bits_clear(pe
, sram_next_off
,
1639 MVPP2_PRS_SRAM_NEXT_LU_MASK
);
1640 mvpp2_prs_sram_bits_set(pe
, sram_next_off
, lu
);
1643 /* In the sram sw entry set sign and value of the next lookup offset
1644 * and the offset value generated to the classifier
1646 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry
*pe
, int shift
,
1651 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1654 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
1658 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS
)] =
1659 (unsigned char)shift
;
1661 /* Reset and set operation */
1662 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
,
1663 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK
);
1664 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
, op
);
1666 /* Set base offset as current */
1667 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1670 /* In the sram sw entry set sign and value of the user defined offset
1671 * generated to the classifier
1673 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry
*pe
,
1674 unsigned int type
, int offset
,
1679 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1680 offset
= 0 - offset
;
1682 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
1686 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
1687 MVPP2_PRS_SRAM_UDF_MASK
);
1688 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_OFFS
, offset
);
1689 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1690 MVPP2_PRS_SRAM_UDF_BITS
)] &=
1691 ~(MVPP2_PRS_SRAM_UDF_MASK
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1692 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS
+
1693 MVPP2_PRS_SRAM_UDF_BITS
)] |=
1694 (offset
>> (8 - (MVPP2_PRS_SRAM_UDF_OFFS
% 8)));
1696 /* Set offset type */
1697 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
,
1698 MVPP2_PRS_SRAM_UDF_TYPE_MASK
);
1699 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
, type
);
1701 /* Set offset operation */
1702 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
1703 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
1704 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
, op
);
1706 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1707 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] &=
1708 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
>>
1709 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1711 pe
->sram
.byte
[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
+
1712 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS
)] |=
1713 (op
>> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
% 8)));
1715 /* Set base offset as current */
1716 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
1719 /* Find parser flow entry */
1720 static struct mvpp2_prs_entry
*mvpp2_prs_flow_find(struct mvpp2
*priv
, int flow
)
1722 struct mvpp2_prs_entry
*pe
;
1725 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
1728 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
1730 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1731 for (tid
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1; tid
>= 0; tid
--) {
1734 if (!priv
->prs_shadow
[tid
].valid
||
1735 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_FLOWS
)
1739 mvpp2_prs_hw_read(priv
, pe
);
1740 bits
= mvpp2_prs_sram_ai_get(pe
);
1742 /* Sram store classification lookup ID in AI bits [5:0] */
1743 if ((bits
& MVPP2_PRS_FLOW_ID_MASK
) == flow
)
1751 /* Return first free tcam index, seeking from start to end */
1752 static int mvpp2_prs_tcam_first_free(struct mvpp2
*priv
, unsigned char start
,
1760 if (end
>= MVPP2_PRS_TCAM_SRAM_SIZE
)
1761 end
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1;
1763 for (tid
= start
; tid
<= end
; tid
++) {
1764 if (!priv
->prs_shadow
[tid
].valid
)
1771 /* Enable/disable dropping all mac da's */
1772 static void mvpp2_prs_mac_drop_all_set(struct mvpp2
*priv
, int port
, bool add
)
1774 struct mvpp2_prs_entry pe
;
1776 if (priv
->prs_shadow
[MVPP2_PE_DROP_ALL
].valid
) {
1777 /* Entry exist - update port only */
1778 pe
.index
= MVPP2_PE_DROP_ALL
;
1779 mvpp2_prs_hw_read(priv
, &pe
);
1781 /* Entry doesn't exist - create new */
1782 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1783 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1784 pe
.index
= MVPP2_PE_DROP_ALL
;
1786 /* Non-promiscuous mode for all ports - DROP unknown packets */
1787 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1788 MVPP2_PRS_RI_DROP_MASK
);
1790 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1791 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1793 /* Update shadow table */
1794 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1796 /* Mask all ports */
1797 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1800 /* Update port mask */
1801 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1803 mvpp2_prs_hw_write(priv
, &pe
);
1806 /* Set port to promiscuous mode */
1807 static void mvpp2_prs_mac_promisc_set(struct mvpp2
*priv
, int port
, bool add
)
1809 struct mvpp2_prs_entry pe
;
1811 /* Promiscuous mode - Accept unknown packets */
1813 if (priv
->prs_shadow
[MVPP2_PE_MAC_PROMISCUOUS
].valid
) {
1814 /* Entry exist - update port only */
1815 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1816 mvpp2_prs_hw_read(priv
, &pe
);
1818 /* Entry doesn't exist - create new */
1819 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1820 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1821 pe
.index
= MVPP2_PE_MAC_PROMISCUOUS
;
1823 /* Continue - set next lookup */
1824 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1826 /* Set result info bits */
1827 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_UCAST
,
1828 MVPP2_PRS_RI_L2_CAST_MASK
);
1830 /* Shift to ethertype */
1831 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1832 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1834 /* Mask all ports */
1835 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1837 /* Update shadow table */
1838 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1841 /* Update port mask */
1842 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1844 mvpp2_prs_hw_write(priv
, &pe
);
1847 /* Accept multicast */
1848 static void mvpp2_prs_mac_multi_set(struct mvpp2
*priv
, int port
, int index
,
1851 struct mvpp2_prs_entry pe
;
1852 unsigned char da_mc
;
1854 /* Ethernet multicast address first byte is
1855 * 0x01 for IPv4 and 0x33 for IPv6
1857 da_mc
= (index
== MVPP2_PE_MAC_MC_ALL
) ? 0x01 : 0x33;
1859 if (priv
->prs_shadow
[index
].valid
) {
1860 /* Entry exist - update port only */
1862 mvpp2_prs_hw_read(priv
, &pe
);
1864 /* Entry doesn't exist - create new */
1865 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1866 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1869 /* Continue - set next lookup */
1870 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1872 /* Set result info bits */
1873 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L2_MCAST
,
1874 MVPP2_PRS_RI_L2_CAST_MASK
);
1876 /* Update tcam entry data first byte */
1877 mvpp2_prs_tcam_data_byte_set(&pe
, 0, da_mc
, 0xff);
1879 /* Shift to ethertype */
1880 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
1881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1883 /* Mask all ports */
1884 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1886 /* Update shadow table */
1887 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1890 /* Update port mask */
1891 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
1893 mvpp2_prs_hw_write(priv
, &pe
);
1896 /* Parser per-port initialization */
1897 static void mvpp2_prs_hw_port_init(struct mvpp2
*priv
, int port
, int lu_first
,
1898 int lu_max
, int offset
)
1903 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_LOOKUP_REG
);
1904 val
&= ~MVPP2_PRS_PORT_LU_MASK(port
);
1905 val
|= MVPP2_PRS_PORT_LU_VAL(port
, lu_first
);
1906 mvpp2_write(priv
, MVPP2_PRS_INIT_LOOKUP_REG
, val
);
1908 /* Set maximum number of loops for packet received from port */
1909 val
= mvpp2_read(priv
, MVPP2_PRS_MAX_LOOP_REG(port
));
1910 val
&= ~MVPP2_PRS_MAX_LOOP_MASK(port
);
1911 val
|= MVPP2_PRS_MAX_LOOP_VAL(port
, lu_max
);
1912 mvpp2_write(priv
, MVPP2_PRS_MAX_LOOP_REG(port
), val
);
1914 /* Set initial offset for packet header extraction for the first
1917 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_OFFS_REG(port
));
1918 val
&= ~MVPP2_PRS_INIT_OFF_MASK(port
);
1919 val
|= MVPP2_PRS_INIT_OFF_VAL(port
, offset
);
1920 mvpp2_write(priv
, MVPP2_PRS_INIT_OFFS_REG(port
), val
);
1923 /* Default flow entries initialization for all ports */
1924 static void mvpp2_prs_def_flow_init(struct mvpp2
*priv
)
1926 struct mvpp2_prs_entry pe
;
1929 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
1930 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1931 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1932 pe
.index
= MVPP2_PE_FIRST_DEFAULT_FLOW
- port
;
1934 /* Mask all ports */
1935 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1938 mvpp2_prs_sram_ai_update(&pe
, port
, MVPP2_PRS_FLOW_ID_MASK
);
1939 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
1941 /* Update shadow table and hw entry */
1942 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
1943 mvpp2_prs_hw_write(priv
, &pe
);
1947 /* Set default entry for Marvell Header field */
1948 static void mvpp2_prs_mh_init(struct mvpp2
*priv
)
1950 struct mvpp2_prs_entry pe
;
1952 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1954 pe
.index
= MVPP2_PE_MH_DEFAULT
;
1955 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MH
);
1956 mvpp2_prs_sram_shift_set(&pe
, MVPP2_MH_SIZE
,
1957 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1958 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1960 /* Unmask all ports */
1961 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1963 /* Update shadow table and hw entry */
1964 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MH
);
1965 mvpp2_prs_hw_write(priv
, &pe
);
1968 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1969 * multicast MAC addresses
1971 static void mvpp2_prs_mac_init(struct mvpp2
*priv
)
1973 struct mvpp2_prs_entry pe
;
1975 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1977 /* Non-promiscuous mode for all ports - DROP unknown packets */
1978 pe
.index
= MVPP2_PE_MAC_NON_PROMISCUOUS
;
1979 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1981 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1982 MVPP2_PRS_RI_DROP_MASK
);
1983 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1984 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1986 /* Unmask all ports */
1987 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1989 /* Update shadow table and hw entry */
1990 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1991 mvpp2_prs_hw_write(priv
, &pe
);
1993 /* place holders only - no ports */
1994 mvpp2_prs_mac_drop_all_set(priv
, 0, false);
1995 mvpp2_prs_mac_promisc_set(priv
, 0, false);
1996 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_ALL
, 0, false);
1997 mvpp2_prs_mac_multi_set(priv
, MVPP2_PE_MAC_MC_IP6
, 0, false);
2000 /* Match basic ethertypes */
2001 static int mvpp2_prs_etype_init(struct mvpp2
*priv
)
2003 struct mvpp2_prs_entry pe
;
2006 /* Ethertype: PPPoE */
2007 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2008 MVPP2_PE_LAST_FREE_TID
);
2012 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2013 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2016 mvpp2_prs_match_etype(&pe
, 0, PROT_PPP_SES
);
2018 mvpp2_prs_sram_shift_set(&pe
, MVPP2_PPPOE_HDR_SIZE
,
2019 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2020 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
2021 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_PPPOE_MASK
,
2022 MVPP2_PRS_RI_PPPOE_MASK
);
2024 /* Update shadow table and hw entry */
2025 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2026 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2027 priv
->prs_shadow
[pe
.index
].finish
= false;
2028 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_PPPOE_MASK
,
2029 MVPP2_PRS_RI_PPPOE_MASK
);
2030 mvpp2_prs_hw_write(priv
, &pe
);
2032 /* Ethertype: ARP */
2033 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2034 MVPP2_PE_LAST_FREE_TID
);
2038 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2039 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2042 mvpp2_prs_match_etype(&pe
, 0, PROT_ARP
);
2044 /* Generate flow in the next iteration*/
2045 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2046 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2047 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_ARP
,
2048 MVPP2_PRS_RI_L3_PROTO_MASK
);
2050 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2052 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2054 /* Update shadow table and hw entry */
2055 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2056 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2057 priv
->prs_shadow
[pe
.index
].finish
= true;
2058 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_ARP
,
2059 MVPP2_PRS_RI_L3_PROTO_MASK
);
2060 mvpp2_prs_hw_write(priv
, &pe
);
2062 /* Ethertype: LBTD */
2063 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2064 MVPP2_PE_LAST_FREE_TID
);
2068 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2069 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2072 mvpp2_prs_match_etype(&pe
, 0, MVPP2_IP_LBDT_TYPE
);
2074 /* Generate flow in the next iteration*/
2075 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2076 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2077 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2078 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2079 MVPP2_PRS_RI_CPU_CODE_MASK
|
2080 MVPP2_PRS_RI_UDF3_MASK
);
2082 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2084 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2086 /* Update shadow table and hw entry */
2087 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2088 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2089 priv
->prs_shadow
[pe
.index
].finish
= true;
2090 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
2091 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
2092 MVPP2_PRS_RI_CPU_CODE_MASK
|
2093 MVPP2_PRS_RI_UDF3_MASK
);
2094 mvpp2_prs_hw_write(priv
, &pe
);
2096 /* Ethertype: IPv4 without options */
2097 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2098 MVPP2_PE_LAST_FREE_TID
);
2102 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2103 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2106 mvpp2_prs_match_etype(&pe
, 0, PROT_IP
);
2107 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2108 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
2109 MVPP2_PRS_IPV4_HEAD_MASK
|
2110 MVPP2_PRS_IPV4_IHL_MASK
);
2112 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
2113 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
2114 MVPP2_PRS_RI_L3_PROTO_MASK
);
2115 /* Skip eth_type + 4 bytes of IP header */
2116 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
2117 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2119 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2121 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2123 /* Update shadow table and hw entry */
2124 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2125 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2126 priv
->prs_shadow
[pe
.index
].finish
= false;
2127 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4
,
2128 MVPP2_PRS_RI_L3_PROTO_MASK
);
2129 mvpp2_prs_hw_write(priv
, &pe
);
2131 /* Ethertype: IPv4 with options */
2132 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2133 MVPP2_PE_LAST_FREE_TID
);
2139 /* Clear tcam data before updating */
2140 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2141 pe
.tcam
.byte
[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN
)] = 0x0;
2143 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
2144 MVPP2_PRS_IPV4_HEAD
,
2145 MVPP2_PRS_IPV4_HEAD_MASK
);
2147 /* Clear ri before updating */
2148 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
2149 pe
.sram
.word
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
2150 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
2151 MVPP2_PRS_RI_L3_PROTO_MASK
);
2153 /* Update shadow table and hw entry */
2154 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2155 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2156 priv
->prs_shadow
[pe
.index
].finish
= false;
2157 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4_OPT
,
2158 MVPP2_PRS_RI_L3_PROTO_MASK
);
2159 mvpp2_prs_hw_write(priv
, &pe
);
2161 /* Ethertype: IPv6 without options */
2162 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2163 MVPP2_PE_LAST_FREE_TID
);
2167 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2168 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2171 mvpp2_prs_match_etype(&pe
, 0, PROT_IPV6
);
2173 /* Skip DIP of IPV6 header */
2174 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
2175 MVPP2_MAX_L3_ADDR_SIZE
,
2176 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2177 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
2178 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
2179 MVPP2_PRS_RI_L3_PROTO_MASK
);
2181 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2183 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2185 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2186 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2187 priv
->prs_shadow
[pe
.index
].finish
= false;
2188 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP6
,
2189 MVPP2_PRS_RI_L3_PROTO_MASK
);
2190 mvpp2_prs_hw_write(priv
, &pe
);
2192 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2193 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
2194 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2195 pe
.index
= MVPP2_PE_ETH_TYPE_UN
;
2197 /* Unmask all ports */
2198 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2200 /* Generate flow in the next iteration*/
2201 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
2202 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2203 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
2204 MVPP2_PRS_RI_L3_PROTO_MASK
);
2205 /* Set L3 offset even it's unknown L3 */
2206 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
2208 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
2210 /* Update shadow table and hw entry */
2211 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
2212 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
2213 priv
->prs_shadow
[pe
.index
].finish
= true;
2214 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_UN
,
2215 MVPP2_PRS_RI_L3_PROTO_MASK
);
2216 mvpp2_prs_hw_write(priv
, &pe
);
2221 /* Parser default initialization */
2222 static int mvpp2_prs_default_init(struct udevice
*dev
,
2227 /* Enable tcam table */
2228 mvpp2_write(priv
, MVPP2_PRS_TCAM_CTRL_REG
, MVPP2_PRS_TCAM_EN_MASK
);
2230 /* Clear all tcam and sram entries */
2231 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++) {
2232 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
2233 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
2234 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), 0);
2236 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, index
);
2237 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
2238 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), 0);
2241 /* Invalidate all tcam entries */
2242 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++)
2243 mvpp2_prs_hw_inv(priv
, index
);
2245 priv
->prs_shadow
= devm_kcalloc(dev
, MVPP2_PRS_TCAM_SRAM_SIZE
,
2246 sizeof(struct mvpp2_prs_shadow
),
2248 if (!priv
->prs_shadow
)
2251 /* Always start from lookup = 0 */
2252 for (index
= 0; index
< MVPP2_MAX_PORTS
; index
++)
2253 mvpp2_prs_hw_port_init(priv
, index
, MVPP2_PRS_LU_MH
,
2254 MVPP2_PRS_PORT_LU_MAX
, 0);
2256 mvpp2_prs_def_flow_init(priv
);
2258 mvpp2_prs_mh_init(priv
);
2260 mvpp2_prs_mac_init(priv
);
2262 err
= mvpp2_prs_etype_init(priv
);
2269 /* Compare MAC DA with tcam entry data */
2270 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry
*pe
,
2271 const u8
*da
, unsigned char *mask
)
2273 unsigned char tcam_byte
, tcam_mask
;
2276 for (index
= 0; index
< ETH_ALEN
; index
++) {
2277 mvpp2_prs_tcam_data_byte_get(pe
, index
, &tcam_byte
, &tcam_mask
);
2278 if (tcam_mask
!= mask
[index
])
2281 if ((tcam_mask
& tcam_byte
) != (da
[index
] & mask
[index
]))
2288 /* Find tcam entry with matched pair <MAC DA, port> */
2289 static struct mvpp2_prs_entry
*
2290 mvpp2_prs_mac_da_range_find(struct mvpp2
*priv
, int pmap
, const u8
*da
,
2291 unsigned char *mask
, int udf_type
)
2293 struct mvpp2_prs_entry
*pe
;
2296 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2299 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
2301 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2302 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
2303 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
2304 unsigned int entry_pmap
;
2306 if (!priv
->prs_shadow
[tid
].valid
||
2307 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
2308 (priv
->prs_shadow
[tid
].udf
!= udf_type
))
2312 mvpp2_prs_hw_read(priv
, pe
);
2313 entry_pmap
= mvpp2_prs_tcam_port_map_get(pe
);
2315 if (mvpp2_prs_mac_range_equals(pe
, da
, mask
) &&
2324 /* Update parser's mac da entry */
2325 static int mvpp2_prs_mac_da_accept(struct mvpp2
*priv
, int port
,
2326 const u8
*da
, bool add
)
2328 struct mvpp2_prs_entry
*pe
;
2329 unsigned int pmap
, len
, ri
;
2330 unsigned char mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2333 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2334 pe
= mvpp2_prs_mac_da_range_find(priv
, (1 << port
), da
, mask
,
2335 MVPP2_PRS_UDF_MAC_DEF
);
2342 /* Create new TCAM entry */
2343 /* Find first range mac entry*/
2344 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
2345 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++)
2346 if (priv
->prs_shadow
[tid
].valid
&&
2347 (priv
->prs_shadow
[tid
].lu
== MVPP2_PRS_LU_MAC
) &&
2348 (priv
->prs_shadow
[tid
].udf
==
2349 MVPP2_PRS_UDF_MAC_RANGE
))
2352 /* Go through the all entries from first to last */
2353 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
2358 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2361 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_MAC
);
2364 /* Mask all ports */
2365 mvpp2_prs_tcam_port_map_set(pe
, 0);
2368 /* Update port mask */
2369 mvpp2_prs_tcam_port_set(pe
, port
, add
);
2371 /* Invalidate the entry if no ports are left enabled */
2372 pmap
= mvpp2_prs_tcam_port_map_get(pe
);
2378 mvpp2_prs_hw_inv(priv
, pe
->index
);
2379 priv
->prs_shadow
[pe
->index
].valid
= false;
2384 /* Continue - set next lookup */
2385 mvpp2_prs_sram_next_lu_set(pe
, MVPP2_PRS_LU_DSA
);
2387 /* Set match on DA */
2390 mvpp2_prs_tcam_data_byte_set(pe
, len
, da
[len
], 0xff);
2392 /* Set result info bits */
2393 ri
= MVPP2_PRS_RI_L2_UCAST
| MVPP2_PRS_RI_MAC_ME_MASK
;
2395 mvpp2_prs_sram_ri_update(pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
2396 MVPP2_PRS_RI_MAC_ME_MASK
);
2397 mvpp2_prs_shadow_ri_set(priv
, pe
->index
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
2398 MVPP2_PRS_RI_MAC_ME_MASK
);
2400 /* Shift to ethertype */
2401 mvpp2_prs_sram_shift_set(pe
, 2 * ETH_ALEN
,
2402 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2404 /* Update shadow table and hw entry */
2405 priv
->prs_shadow
[pe
->index
].udf
= MVPP2_PRS_UDF_MAC_DEF
;
2406 mvpp2_prs_shadow_set(priv
, pe
->index
, MVPP2_PRS_LU_MAC
);
2407 mvpp2_prs_hw_write(priv
, pe
);
2414 static int mvpp2_prs_update_mac_da(struct mvpp2_port
*port
, const u8
*da
)
2418 /* Remove old parser entry */
2419 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, port
->dev_addr
,
2424 /* Add new parser entry */
2425 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, da
, true);
2429 /* Set addr in the device */
2430 memcpy(port
->dev_addr
, da
, ETH_ALEN
);
2435 /* Set prs flow for the port */
2436 static int mvpp2_prs_def_flow(struct mvpp2_port
*port
)
2438 struct mvpp2_prs_entry
*pe
;
2441 pe
= mvpp2_prs_flow_find(port
->priv
, port
->id
);
2443 /* Such entry not exist */
2445 /* Go through the all entires from last to first */
2446 tid
= mvpp2_prs_tcam_first_free(port
->priv
,
2447 MVPP2_PE_LAST_FREE_TID
,
2448 MVPP2_PE_FIRST_FREE_TID
);
2452 pe
= kzalloc(sizeof(*pe
), GFP_KERNEL
);
2456 mvpp2_prs_tcam_lu_set(pe
, MVPP2_PRS_LU_FLOWS
);
2460 mvpp2_prs_sram_ai_update(pe
, port
->id
, MVPP2_PRS_FLOW_ID_MASK
);
2461 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2463 /* Update shadow table */
2464 mvpp2_prs_shadow_set(port
->priv
, pe
->index
, MVPP2_PRS_LU_FLOWS
);
2467 mvpp2_prs_tcam_port_map_set(pe
, (1 << port
->id
));
2468 mvpp2_prs_hw_write(port
->priv
, pe
);
2474 /* Classifier configuration routines */
2476 /* Update classification flow table registers */
2477 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
2478 struct mvpp2_cls_flow_entry
*fe
)
2480 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
2481 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
2482 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
2483 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
2486 /* Update classification lookup table register */
2487 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
2488 struct mvpp2_cls_lookup_entry
*le
)
2492 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
2493 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
2494 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
2497 /* Classifier default initialization */
2498 static void mvpp2_cls_init(struct mvpp2
*priv
)
2500 struct mvpp2_cls_lookup_entry le
;
2501 struct mvpp2_cls_flow_entry fe
;
2504 /* Enable classifier */
2505 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
2507 /* Clear classifier flow table */
2508 memset(&fe
.data
, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS
);
2509 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
2511 mvpp2_cls_flow_write(priv
, &fe
);
2514 /* Clear classifier lookup table */
2516 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
2519 mvpp2_cls_lookup_write(priv
, &le
);
2522 mvpp2_cls_lookup_write(priv
, &le
);
2526 static void mvpp2_cls_port_config(struct mvpp2_port
*port
)
2528 struct mvpp2_cls_lookup_entry le
;
2531 /* Set way for the port */
2532 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
2533 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
2534 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
2536 /* Pick the entry to be accessed in lookup ID decoding table
2537 * according to the way and lkpid.
2539 le
.lkpid
= port
->id
;
2543 /* Set initial CPU queue for receiving packets */
2544 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
2545 le
.data
|= port
->first_rxq
;
2547 /* Disable classification engines */
2548 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
2550 /* Update lookup ID table entry */
2551 mvpp2_cls_lookup_write(port
->priv
, &le
);
2554 /* Set CPU queue number for oversize packets */
2555 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
2559 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
2560 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
2562 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
2563 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
2565 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
2566 val
|= MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
2567 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
2570 /* Buffer Manager configuration routines */
2573 static int mvpp2_bm_pool_create(struct udevice
*dev
,
2575 struct mvpp2_bm_pool
*bm_pool
, int size
)
2579 /* Number of buffer pointers must be a multiple of 16, as per
2580 * hardware constraints
2582 if (!IS_ALIGNED(size
, 16))
2585 bm_pool
->virt_addr
= buffer_loc
.bm_pool
[bm_pool
->id
];
2586 bm_pool
->dma_addr
= (dma_addr_t
)buffer_loc
.bm_pool
[bm_pool
->id
];
2587 if (!bm_pool
->virt_addr
)
2590 if (!IS_ALIGNED((unsigned long)bm_pool
->virt_addr
,
2591 MVPP2_BM_POOL_PTR_ALIGN
)) {
2592 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
2593 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
2597 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
2598 lower_32_bits(bm_pool
->dma_addr
));
2599 if (priv
->hw_version
== MVPP22
)
2600 mvpp2_write(priv
, MVPP22_BM_POOL_BASE_HIGH_REG
,
2601 (upper_32_bits(bm_pool
->dma_addr
) &
2602 MVPP22_BM_POOL_BASE_HIGH_MASK
));
2603 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
2605 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
2606 val
|= MVPP2_BM_START_MASK
;
2607 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
2609 bm_pool
->type
= MVPP2_BM_FREE
;
2610 bm_pool
->size
= size
;
2611 bm_pool
->pkt_size
= 0;
2612 bm_pool
->buf_num
= 0;
2617 /* Set pool buffer size */
2618 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
2619 struct mvpp2_bm_pool
*bm_pool
,
2624 bm_pool
->buf_size
= buf_size
;
2626 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
2627 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
2630 /* Free all buffers from the pool */
2631 static void mvpp2_bm_bufs_free(struct udevice
*dev
, struct mvpp2
*priv
,
2632 struct mvpp2_bm_pool
*bm_pool
)
2636 for (i
= 0; i
< bm_pool
->buf_num
; i
++) {
2637 /* Allocate buffer back from the buffer manager */
2638 mvpp2_read(priv
, MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
2641 bm_pool
->buf_num
= 0;
2645 static int mvpp2_bm_pool_destroy(struct udevice
*dev
,
2647 struct mvpp2_bm_pool
*bm_pool
)
2651 mvpp2_bm_bufs_free(dev
, priv
, bm_pool
);
2652 if (bm_pool
->buf_num
) {
2653 dev_err(dev
, "cannot free all buffers in pool %d\n", bm_pool
->id
);
2657 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
2658 val
|= MVPP2_BM_STOP_MASK
;
2659 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
2664 static int mvpp2_bm_pools_init(struct udevice
*dev
,
2668 struct mvpp2_bm_pool
*bm_pool
;
2670 /* Create all pools with maximum size */
2671 size
= MVPP2_BM_POOL_SIZE_MAX
;
2672 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
2673 bm_pool
= &priv
->bm_pools
[i
];
2675 err
= mvpp2_bm_pool_create(dev
, priv
, bm_pool
, size
);
2677 goto err_unroll_pools
;
2678 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, RX_BUFFER_SIZE
);
2683 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
2684 for (i
= i
- 1; i
>= 0; i
--)
2685 mvpp2_bm_pool_destroy(dev
, priv
, &priv
->bm_pools
[i
]);
2689 static int mvpp2_bm_init(struct udevice
*dev
, struct mvpp2
*priv
)
2693 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
2694 /* Mask BM all interrupts */
2695 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
2696 /* Clear BM cause register */
2697 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
2700 /* Allocate and initialize BM pools */
2701 priv
->bm_pools
= devm_kcalloc(dev
, MVPP2_BM_POOLS_NUM
,
2702 sizeof(struct mvpp2_bm_pool
), GFP_KERNEL
);
2703 if (!priv
->bm_pools
)
2706 err
= mvpp2_bm_pools_init(dev
, priv
);
2712 /* Attach long pool to rxq */
2713 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
2714 int lrxq
, int long_pool
)
2719 /* Get queue physical ID */
2720 prxq
= port
->rxqs
[lrxq
]->id
;
2722 if (port
->priv
->hw_version
== MVPP21
)
2723 mask
= MVPP21_RXQ_POOL_LONG_MASK
;
2725 mask
= MVPP22_RXQ_POOL_LONG_MASK
;
2727 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
2729 val
|= (long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) & mask
;
2730 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
2733 /* Set pool number in a BM cookie */
2734 static inline u32
mvpp2_bm_cookie_pool_set(u32 cookie
, int pool
)
2738 bm
= cookie
& ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS
);
2739 bm
|= ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
);
2744 /* Get pool number from a BM cookie */
2745 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie
)
2747 return (cookie
>> MVPP2_BM_COOKIE_POOL_OFFS
) & 0xFF;
2750 /* Release buffer to BM */
2751 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
2752 dma_addr_t buf_dma_addr
,
2753 unsigned long buf_phys_addr
)
2755 if (port
->priv
->hw_version
== MVPP22
) {
2758 if (sizeof(dma_addr_t
) == 8)
2759 val
|= upper_32_bits(buf_dma_addr
) &
2760 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK
;
2762 if (sizeof(phys_addr_t
) == 8)
2763 val
|= (upper_32_bits(buf_phys_addr
)
2764 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT
) &
2765 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK
;
2767 mvpp2_write(port
->priv
, MVPP22_BM_ADDR_HIGH_RLS_REG
, val
);
2770 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2771 * returned in the "cookie" field of the RX
2772 * descriptor. Instead of storing the virtual address, we
2773 * store the physical address
2775 mvpp2_write(port
->priv
, MVPP2_BM_VIRT_RLS_REG
, buf_phys_addr
);
2776 mvpp2_write(port
->priv
, MVPP2_BM_PHY_RLS_REG(pool
), buf_dma_addr
);
2779 /* Refill BM pool */
2780 static void mvpp2_pool_refill(struct mvpp2_port
*port
, u32 bm
,
2781 dma_addr_t dma_addr
,
2782 phys_addr_t phys_addr
)
2784 int pool
= mvpp2_bm_cookie_pool_get(bm
);
2786 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
2789 /* Allocate buffers for the pool */
2790 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
2791 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
2796 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
2797 netdev_err(port
->dev
,
2798 "cannot allocate %d buffers for pool %d\n",
2799 buf_num
, bm_pool
->id
);
2803 for (i
= 0; i
< buf_num
; i
++) {
2804 mvpp2_bm_pool_put(port
, bm_pool
->id
,
2805 (dma_addr_t
)buffer_loc
.rx_buffer
[i
],
2806 (unsigned long)buffer_loc
.rx_buffer
[i
]);
2810 /* Update BM driver with number of buffers added to pool */
2811 bm_pool
->buf_num
+= i
;
2816 /* Notify the driver that BM pool is being used as specific type and return the
2817 * pool pointer on success
2819 static struct mvpp2_bm_pool
*
2820 mvpp2_bm_pool_use(struct mvpp2_port
*port
, int pool
, enum mvpp2_bm_type type
,
2823 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
2826 if (new_pool
->type
!= MVPP2_BM_FREE
&& new_pool
->type
!= type
) {
2827 netdev_err(port
->dev
, "mixing pool types is forbidden\n");
2831 if (new_pool
->type
== MVPP2_BM_FREE
)
2832 new_pool
->type
= type
;
2834 /* Allocate buffers in case BM pool is used as long pool, but packet
2835 * size doesn't match MTU or BM pool hasn't being used yet
2837 if (((type
== MVPP2_BM_SWF_LONG
) && (pkt_size
> new_pool
->pkt_size
)) ||
2838 (new_pool
->pkt_size
== 0)) {
2841 /* Set default buffer number or free all the buffers in case
2842 * the pool is not empty
2844 pkts_num
= new_pool
->buf_num
;
2846 pkts_num
= type
== MVPP2_BM_SWF_LONG
?
2847 MVPP2_BM_LONG_BUF_NUM
:
2848 MVPP2_BM_SHORT_BUF_NUM
;
2850 mvpp2_bm_bufs_free(NULL
,
2851 port
->priv
, new_pool
);
2853 new_pool
->pkt_size
= pkt_size
;
2855 /* Allocate buffers for this pool */
2856 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
2857 if (num
!= pkts_num
) {
2858 dev_err(dev
, "pool %d: %d of %d allocated\n",
2859 new_pool
->id
, num
, pkts_num
);
2867 /* Initialize pools for swf */
2868 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
2872 if (!port
->pool_long
) {
2874 mvpp2_bm_pool_use(port
, MVPP2_BM_SWF_LONG_POOL(port
->id
),
2877 if (!port
->pool_long
)
2880 port
->pool_long
->port_map
|= (1 << port
->id
);
2882 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
2883 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
2889 /* Port configuration routines */
2891 static void mvpp2_port_mii_set(struct mvpp2_port
*port
)
2895 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
2897 switch (port
->phy_interface
) {
2898 case PHY_INTERFACE_MODE_SGMII
:
2899 val
|= MVPP2_GMAC_INBAND_AN_MASK
;
2901 case PHY_INTERFACE_MODE_RGMII
:
2902 case PHY_INTERFACE_MODE_RGMII_ID
:
2903 val
|= MVPP2_GMAC_PORT_RGMII_MASK
;
2905 val
&= ~MVPP2_GMAC_PCS_ENABLE_MASK
;
2908 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
2911 static void mvpp2_port_fc_adv_enable(struct mvpp2_port
*port
)
2915 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
2916 val
|= MVPP2_GMAC_FC_ADV_EN
;
2917 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
2920 static void mvpp2_port_enable(struct mvpp2_port
*port
)
2924 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
2925 val
|= MVPP2_GMAC_PORT_EN_MASK
;
2926 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
2927 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
2930 static void mvpp2_port_disable(struct mvpp2_port
*port
)
2934 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
2935 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
2936 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
2939 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2940 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
2944 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
2945 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
2946 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
2949 /* Configure loopback port */
2950 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
)
2954 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
2956 if (port
->speed
== 1000)
2957 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
2959 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
2961 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
2962 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
2964 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
2966 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
2969 static void mvpp2_port_reset(struct mvpp2_port
*port
)
2973 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
2974 ~MVPP2_GMAC_PORT_RESET_MASK
;
2975 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
2977 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
2978 MVPP2_GMAC_PORT_RESET_MASK
)
2982 /* Change maximum receive size of the port */
2983 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
2987 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
2988 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
2989 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
2990 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
2991 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
2994 /* PPv2.2 GoP/GMAC config */
2996 /* Set the MAC to reset or exit from reset */
2997 static int gop_gmac_reset(struct mvpp2_port
*port
, int reset
)
3001 /* read - modify - write */
3002 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3004 val
|= MVPP2_GMAC_PORT_RESET_MASK
;
3006 val
&= ~MVPP2_GMAC_PORT_RESET_MASK
;
3007 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3015 * Configure port to working with Gig PCS or don't.
3017 static int gop_gpcs_mode_cfg(struct mvpp2_port
*port
, int en
)
3021 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3023 val
|= MVPP2_GMAC_PCS_ENABLE_MASK
;
3025 val
&= ~MVPP2_GMAC_PCS_ENABLE_MASK
;
3026 /* enable / disable PCS on this port */
3027 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3032 static int gop_bypass_clk_cfg(struct mvpp2_port
*port
, int en
)
3036 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3038 val
|= MVPP2_GMAC_CLK_125_BYPS_EN_MASK
;
3040 val
&= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK
;
3041 /* enable / disable PCS on this port */
3042 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3047 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port
*port
)
3052 * Configure minimal level of the Tx FIFO before the lower part
3053 * starts to read a packet
3055 thresh
= MVPP2_SGMII2_5_TX_FIFO_MIN_TH
;
3056 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3057 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
3058 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh
);
3059 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3061 /* Disable bypass of sync module */
3062 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_4_REG
);
3063 val
|= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK
;
3064 /* configure DP clock select according to mode */
3065 val
|= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK
;
3066 /* configure QSGMII bypass according to mode */
3067 val
|= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK
;
3068 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_4_REG
);
3070 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3072 * Configure GIG MAC to 1000Base-X mode connected to a fiber
3075 val
|= MVPP2_GMAC_PORT_TYPE_MASK
;
3076 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3078 /* configure AN 0x9268 */
3079 val
= MVPP2_GMAC_EN_PCS_AN
|
3080 MVPP2_GMAC_AN_BYPASS_EN
|
3081 MVPP2_GMAC_CONFIG_MII_SPEED
|
3082 MVPP2_GMAC_CONFIG_GMII_SPEED
|
3083 MVPP2_GMAC_FC_ADV_EN
|
3084 MVPP2_GMAC_CONFIG_FULL_DUPLEX
|
3085 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG
;
3086 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3089 static void gop_gmac_sgmii_cfg(struct mvpp2_port
*port
)
3094 * Configure minimal level of the Tx FIFO before the lower part
3095 * starts to read a packet
3097 thresh
= MVPP2_SGMII_TX_FIFO_MIN_TH
;
3098 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3099 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
3100 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh
);
3101 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3103 /* Disable bypass of sync module */
3104 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_4_REG
);
3105 val
|= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK
;
3106 /* configure DP clock select according to mode */
3107 val
&= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK
;
3108 /* configure QSGMII bypass according to mode */
3109 val
|= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK
;
3110 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_4_REG
);
3112 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3113 /* configure GIG MAC to SGMII mode */
3114 val
&= ~MVPP2_GMAC_PORT_TYPE_MASK
;
3115 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3118 val
= MVPP2_GMAC_EN_PCS_AN
|
3119 MVPP2_GMAC_AN_BYPASS_EN
|
3120 MVPP2_GMAC_AN_SPEED_EN
|
3121 MVPP2_GMAC_EN_FC_AN
|
3122 MVPP2_GMAC_AN_DUPLEX_EN
|
3123 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG
;
3124 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3127 static void gop_gmac_rgmii_cfg(struct mvpp2_port
*port
)
3132 * Configure minimal level of the Tx FIFO before the lower part
3133 * starts to read a packet
3135 thresh
= MVPP2_RGMII_TX_FIFO_MIN_TH
;
3136 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3137 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
3138 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh
);
3139 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3141 /* Disable bypass of sync module */
3142 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_4_REG
);
3143 val
|= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK
;
3144 /* configure DP clock select according to mode */
3145 val
&= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK
;
3146 val
|= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK
;
3147 val
|= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK
;
3148 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_4_REG
);
3150 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3151 /* configure GIG MAC to SGMII mode */
3152 val
&= ~MVPP2_GMAC_PORT_TYPE_MASK
;
3153 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3155 /* configure AN 0xb8e8 */
3156 val
= MVPP2_GMAC_AN_BYPASS_EN
|
3157 MVPP2_GMAC_AN_SPEED_EN
|
3158 MVPP2_GMAC_EN_FC_AN
|
3159 MVPP2_GMAC_AN_DUPLEX_EN
|
3160 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG
;
3161 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
3164 /* Set the internal mux's to the required MAC in the GOP */
3165 static int gop_gmac_mode_cfg(struct mvpp2_port
*port
)
3169 /* Set TX FIFO thresholds */
3170 switch (port
->phy_interface
) {
3171 case PHY_INTERFACE_MODE_SGMII
:
3172 if (port
->phy_speed
== 2500)
3173 gop_gmac_sgmii2_5_cfg(port
);
3175 gop_gmac_sgmii_cfg(port
);
3178 case PHY_INTERFACE_MODE_RGMII
:
3179 case PHY_INTERFACE_MODE_RGMII_ID
:
3180 gop_gmac_rgmii_cfg(port
);
3187 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */
3188 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3189 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
3190 val
|= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS
;
3191 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
3193 /* PeriodicXonEn disable */
3194 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3195 val
&= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
3196 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3201 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port
*port
)
3205 /* relevant only for MAC0 (XLG0 and GMAC0) */
3206 if (port
->gop_id
> 0)
3209 /* configure 1Gig MAC mode */
3210 val
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
3211 val
&= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
3212 val
|= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC
;
3213 writel(val
, port
->base
+ MVPP22_XLG_CTRL3_REG
);
3216 static int gop_gpcs_reset(struct mvpp2_port
*port
, int reset
)
3220 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3222 val
&= ~MVPP2_GMAC_SGMII_MODE_MASK
;
3224 val
|= MVPP2_GMAC_SGMII_MODE_MASK
;
3225 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
3230 /* Set the internal mux's to the required PCS in the PI */
3231 static int gop_xpcs_mode(struct mvpp2_port
*port
, int num_of_lanes
)
3236 switch (num_of_lanes
) {
3250 /* configure XG MAC mode */
3251 val
= readl(port
->priv
->xpcs_base
+ MVPP22_XPCS_GLOBAL_CFG_0_REG
);
3252 val
&= ~MVPP22_XPCS_PCSMODE_MASK
;
3253 val
&= ~MVPP22_XPCS_LANEACTIVE_MASK
;
3254 val
|= (2 * lane
) << MVPP22_XPCS_LANEACTIVE_OFFS
;
3255 writel(val
, port
->priv
->xpcs_base
+ MVPP22_XPCS_GLOBAL_CFG_0_REG
);
3260 static int gop_mpcs_mode(struct mvpp2_port
*port
)
3264 /* configure PCS40G COMMON CONTROL */
3265 val
= readl(port
->priv
->mpcs_base
+ PCS40G_COMMON_CONTROL
);
3266 val
&= ~FORWARD_ERROR_CORRECTION_MASK
;
3267 writel(val
, port
->priv
->mpcs_base
+ PCS40G_COMMON_CONTROL
);
3269 /* configure PCS CLOCK RESET */
3270 val
= readl(port
->priv
->mpcs_base
+ PCS_CLOCK_RESET
);
3271 val
&= ~CLK_DIVISION_RATIO_MASK
;
3272 val
|= 1 << CLK_DIVISION_RATIO_OFFS
;
3273 writel(val
, port
->priv
->mpcs_base
+ PCS_CLOCK_RESET
);
3275 val
&= ~CLK_DIV_PHASE_SET_MASK
;
3276 val
|= MAC_CLK_RESET_MASK
;
3277 val
|= RX_SD_CLK_RESET_MASK
;
3278 val
|= TX_SD_CLK_RESET_MASK
;
3279 writel(val
, port
->priv
->mpcs_base
+ PCS_CLOCK_RESET
);
3284 /* Set the internal mux's to the required MAC in the GOP */
3285 static int gop_xlg_mac_mode_cfg(struct mvpp2_port
*port
, int num_of_act_lanes
)
3289 /* configure 10G MAC mode */
3290 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
3291 val
|= MVPP22_XLG_RX_FC_EN
;
3292 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
3294 val
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
3295 val
&= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
3296 val
|= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC
;
3297 writel(val
, port
->base
+ MVPP22_XLG_CTRL3_REG
);
3299 /* read - modify - write */
3300 val
= readl(port
->base
+ MVPP22_XLG_CTRL4_REG
);
3301 val
&= ~MVPP22_XLG_MODE_DMA_1G
;
3302 val
|= MVPP22_XLG_FORWARD_PFC_EN
;
3303 val
|= MVPP22_XLG_FORWARD_802_3X_FC_EN
;
3304 val
&= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK
;
3305 writel(val
, port
->base
+ MVPP22_XLG_CTRL4_REG
);
3307 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
3308 val
= readl(port
->base
+ MVPP22_XLG_CTRL1_REG
);
3309 val
&= ~MVPP22_XLG_MAX_RX_SIZE_MASK
;
3310 val
|= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS
;
3311 writel(val
, port
->base
+ MVPP22_XLG_CTRL1_REG
);
3313 /* unmask link change interrupt */
3314 val
= readl(port
->base
+ MVPP22_XLG_INTERRUPT_MASK_REG
);
3315 val
|= MVPP22_XLG_INTERRUPT_LINK_CHANGE
;
3316 val
|= 1; /* unmask summary bit */
3317 writel(val
, port
->base
+ MVPP22_XLG_INTERRUPT_MASK_REG
);
3322 /* Set PCS to reset or exit from reset */
3323 static int gop_xpcs_reset(struct mvpp2_port
*port
, int reset
)
3327 /* read - modify - write */
3328 val
= readl(port
->priv
->xpcs_base
+ MVPP22_XPCS_GLOBAL_CFG_0_REG
);
3330 val
&= ~MVPP22_XPCS_PCSRESET
;
3332 val
|= MVPP22_XPCS_PCSRESET
;
3333 writel(val
, port
->priv
->xpcs_base
+ MVPP22_XPCS_GLOBAL_CFG_0_REG
);
3338 /* Set the MAC to reset or exit from reset */
3339 static int gop_xlg_mac_reset(struct mvpp2_port
*port
, int reset
)
3343 /* read - modify - write */
3344 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
3346 val
&= ~MVPP22_XLG_MAC_RESETN
;
3348 val
|= MVPP22_XLG_MAC_RESETN
;
3349 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
3357 * Init physical port. Configures the port mode and all it's elements
3359 * Does not verify that the selected mode/port number is valid at the
3362 static int gop_port_init(struct mvpp2_port
*port
)
3364 int mac_num
= port
->gop_id
;
3365 int num_of_act_lanes
;
3367 if (mac_num
>= MVPP22_GOP_MAC_NUM
) {
3368 netdev_err(NULL
, "%s: illegal port number %d", __func__
,
3373 switch (port
->phy_interface
) {
3374 case PHY_INTERFACE_MODE_RGMII
:
3375 case PHY_INTERFACE_MODE_RGMII_ID
:
3376 gop_gmac_reset(port
, 1);
3379 gop_gpcs_mode_cfg(port
, 0);
3380 gop_bypass_clk_cfg(port
, 1);
3383 gop_gmac_mode_cfg(port
);
3385 gop_gpcs_reset(port
, 0);
3388 gop_gmac_reset(port
, 0);
3391 case PHY_INTERFACE_MODE_SGMII
:
3393 gop_gpcs_mode_cfg(port
, 1);
3396 gop_gmac_mode_cfg(port
);
3397 /* select proper Mac mode */
3398 gop_xlg_2_gig_mac_cfg(port
);
3401 gop_gpcs_reset(port
, 0);
3403 gop_gmac_reset(port
, 0);
3406 case PHY_INTERFACE_MODE_SFI
:
3407 num_of_act_lanes
= 2;
3410 gop_xpcs_mode(port
, num_of_act_lanes
);
3411 gop_mpcs_mode(port
);
3413 gop_xlg_mac_mode_cfg(port
, num_of_act_lanes
);
3416 gop_xpcs_reset(port
, 0);
3419 gop_xlg_mac_reset(port
, 0);
3423 netdev_err(NULL
, "%s: Requested port mode (%d) not supported\n",
3424 __func__
, port
->phy_interface
);
3431 static void gop_xlg_mac_port_enable(struct mvpp2_port
*port
, int enable
)
3435 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
3437 /* Enable port and MIB counters update */
3438 val
|= MVPP22_XLG_PORT_EN
;
3439 val
&= ~MVPP22_XLG_MIBCNT_DIS
;
3442 val
&= ~MVPP22_XLG_PORT_EN
;
3444 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
3447 static void gop_port_enable(struct mvpp2_port
*port
, int enable
)
3449 switch (port
->phy_interface
) {
3450 case PHY_INTERFACE_MODE_RGMII
:
3451 case PHY_INTERFACE_MODE_RGMII_ID
:
3452 case PHY_INTERFACE_MODE_SGMII
:
3454 mvpp2_port_enable(port
);
3456 mvpp2_port_disable(port
);
3459 case PHY_INTERFACE_MODE_SFI
:
3460 gop_xlg_mac_port_enable(port
, enable
);
3464 netdev_err(NULL
, "%s: Wrong port mode (%d)\n", __func__
,
3465 port
->phy_interface
);
3470 /* RFU1 functions */
3471 static inline u32
gop_rfu1_read(struct mvpp2
*priv
, u32 offset
)
3473 return readl(priv
->rfu1_base
+ offset
);
3476 static inline void gop_rfu1_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
3478 writel(data
, priv
->rfu1_base
+ offset
);
3481 static u32
mvpp2_netc_cfg_create(int gop_id
, phy_interface_t phy_type
)
3486 if (phy_type
== PHY_INTERFACE_MODE_SGMII
)
3487 val
|= MV_NETC_GE_MAC2_SGMII
;
3491 if (phy_type
== PHY_INTERFACE_MODE_SGMII
)
3492 val
|= MV_NETC_GE_MAC3_SGMII
;
3493 else if (phy_type
== PHY_INTERFACE_MODE_RGMII
||
3494 phy_type
== PHY_INTERFACE_MODE_RGMII_ID
)
3495 val
|= MV_NETC_GE_MAC3_RGMII
;
3501 static void gop_netc_active_port(struct mvpp2
*priv
, int gop_id
, u32 val
)
3505 reg
= gop_rfu1_read(priv
, NETCOMP_PORTS_CONTROL_1_REG
);
3506 reg
&= ~(NETC_PORTS_ACTIVE_MASK(gop_id
));
3508 val
<<= NETC_PORTS_ACTIVE_OFFSET(gop_id
);
3509 val
&= NETC_PORTS_ACTIVE_MASK(gop_id
);
3513 gop_rfu1_write(priv
, NETCOMP_PORTS_CONTROL_1_REG
, reg
);
3516 static void gop_netc_mii_mode(struct mvpp2
*priv
, int gop_id
, u32 val
)
3520 reg
= gop_rfu1_read(priv
, NETCOMP_CONTROL_0_REG
);
3521 reg
&= ~NETC_GBE_PORT1_MII_MODE_MASK
;
3523 val
<<= NETC_GBE_PORT1_MII_MODE_OFFS
;
3524 val
&= NETC_GBE_PORT1_MII_MODE_MASK
;
3528 gop_rfu1_write(priv
, NETCOMP_CONTROL_0_REG
, reg
);
3531 static void gop_netc_gop_reset(struct mvpp2
*priv
, u32 val
)
3535 reg
= gop_rfu1_read(priv
, GOP_SOFT_RESET_1_REG
);
3536 reg
&= ~NETC_GOP_SOFT_RESET_MASK
;
3538 val
<<= NETC_GOP_SOFT_RESET_OFFS
;
3539 val
&= NETC_GOP_SOFT_RESET_MASK
;
3543 gop_rfu1_write(priv
, GOP_SOFT_RESET_1_REG
, reg
);
3546 static void gop_netc_gop_clock_logic_set(struct mvpp2
*priv
, u32 val
)
3550 reg
= gop_rfu1_read(priv
, NETCOMP_PORTS_CONTROL_0_REG
);
3551 reg
&= ~NETC_CLK_DIV_PHASE_MASK
;
3553 val
<<= NETC_CLK_DIV_PHASE_OFFS
;
3554 val
&= NETC_CLK_DIV_PHASE_MASK
;
3558 gop_rfu1_write(priv
, NETCOMP_PORTS_CONTROL_0_REG
, reg
);
3561 static void gop_netc_port_rf_reset(struct mvpp2
*priv
, int gop_id
, u32 val
)
3565 reg
= gop_rfu1_read(priv
, NETCOMP_PORTS_CONTROL_1_REG
);
3566 reg
&= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id
));
3568 val
<<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id
);
3569 val
&= NETC_PORT_GIG_RF_RESET_MASK(gop_id
);
3573 gop_rfu1_write(priv
, NETCOMP_PORTS_CONTROL_1_REG
, reg
);
3576 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2
*priv
, int gop_id
,
3579 u32 reg
, mask
, offset
;
3582 mask
= NETC_GBE_PORT0_SGMII_MODE_MASK
;
3583 offset
= NETC_GBE_PORT0_SGMII_MODE_OFFS
;
3585 mask
= NETC_GBE_PORT1_SGMII_MODE_MASK
;
3586 offset
= NETC_GBE_PORT1_SGMII_MODE_OFFS
;
3588 reg
= gop_rfu1_read(priv
, NETCOMP_CONTROL_0_REG
);
3596 gop_rfu1_write(priv
, NETCOMP_CONTROL_0_REG
, reg
);
3599 static void gop_netc_bus_width_select(struct mvpp2
*priv
, u32 val
)
3603 reg
= gop_rfu1_read(priv
, NETCOMP_PORTS_CONTROL_0_REG
);
3604 reg
&= ~NETC_BUS_WIDTH_SELECT_MASK
;
3606 val
<<= NETC_BUS_WIDTH_SELECT_OFFS
;
3607 val
&= NETC_BUS_WIDTH_SELECT_MASK
;
3611 gop_rfu1_write(priv
, NETCOMP_PORTS_CONTROL_0_REG
, reg
);
3614 static void gop_netc_sample_stages_timing(struct mvpp2
*priv
, u32 val
)
3618 reg
= gop_rfu1_read(priv
, NETCOMP_PORTS_CONTROL_0_REG
);
3619 reg
&= ~NETC_GIG_RX_DATA_SAMPLE_MASK
;
3621 val
<<= NETC_GIG_RX_DATA_SAMPLE_OFFS
;
3622 val
&= NETC_GIG_RX_DATA_SAMPLE_MASK
;
3626 gop_rfu1_write(priv
, NETCOMP_PORTS_CONTROL_0_REG
, reg
);
3629 static void gop_netc_mac_to_xgmii(struct mvpp2
*priv
, int gop_id
,
3630 enum mv_netc_phase phase
)
3633 case MV_NETC_FIRST_PHASE
:
3634 /* Set Bus Width to HB mode = 1 */
3635 gop_netc_bus_width_select(priv
, 1);
3636 /* Select RGMII mode */
3637 gop_netc_gbe_sgmii_mode_select(priv
, gop_id
, MV_NETC_GBE_XMII
);
3640 case MV_NETC_SECOND_PHASE
:
3641 /* De-assert the relevant port HB reset */
3642 gop_netc_port_rf_reset(priv
, gop_id
, 1);
3647 static void gop_netc_mac_to_sgmii(struct mvpp2
*priv
, int gop_id
,
3648 enum mv_netc_phase phase
)
3651 case MV_NETC_FIRST_PHASE
:
3652 /* Set Bus Width to HB mode = 1 */
3653 gop_netc_bus_width_select(priv
, 1);
3654 /* Select SGMII mode */
3656 gop_netc_gbe_sgmii_mode_select(priv
, gop_id
,
3660 /* Configure the sample stages */
3661 gop_netc_sample_stages_timing(priv
, 0);
3662 /* Configure the ComPhy Selector */
3663 /* gop_netc_com_phy_selector_config(netComplex); */
3666 case MV_NETC_SECOND_PHASE
:
3667 /* De-assert the relevant port HB reset */
3668 gop_netc_port_rf_reset(priv
, gop_id
, 1);
3673 static int gop_netc_init(struct mvpp2
*priv
, enum mv_netc_phase phase
)
3675 u32 c
= priv
->netc_config
;
3677 if (c
& MV_NETC_GE_MAC2_SGMII
)
3678 gop_netc_mac_to_sgmii(priv
, 2, phase
);
3680 gop_netc_mac_to_xgmii(priv
, 2, phase
);
3682 if (c
& MV_NETC_GE_MAC3_SGMII
) {
3683 gop_netc_mac_to_sgmii(priv
, 3, phase
);
3685 gop_netc_mac_to_xgmii(priv
, 3, phase
);
3686 if (c
& MV_NETC_GE_MAC3_RGMII
)
3687 gop_netc_mii_mode(priv
, 3, MV_NETC_GBE_RGMII
);
3689 gop_netc_mii_mode(priv
, 3, MV_NETC_GBE_MII
);
3692 /* Activate gop ports 0, 2, 3 */
3693 gop_netc_active_port(priv
, 0, 1);
3694 gop_netc_active_port(priv
, 2, 1);
3695 gop_netc_active_port(priv
, 3, 1);
3697 if (phase
== MV_NETC_SECOND_PHASE
) {
3698 /* Enable the GOP internal clock logic */
3699 gop_netc_gop_clock_logic_set(priv
, 1);
3700 /* De-assert GOP unit reset */
3701 gop_netc_gop_reset(priv
, 1);
3707 /* Set defaults to the MVPP2 port */
3708 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
3710 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
3712 if (port
->priv
->hw_version
== MVPP21
) {
3713 /* Configure port to loopback if needed */
3714 if (port
->flags
& MVPP2_F_LOOPBACK
)
3715 mvpp2_port_loopback_set(port
);
3717 /* Update TX FIFO MIN Threshold */
3718 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3719 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
3720 /* Min. TX threshold must be less than minimal packet length */
3721 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3722 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
3725 /* Disable Legacy WRR, Disable EJP, Release from reset */
3726 tx_port_num
= mvpp2_egress_port(port
);
3727 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
3729 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
3731 /* Close bandwidth for all queues */
3732 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
3733 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
3734 mvpp2_write(port
->priv
,
3735 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
3738 /* Set refill period to 1 usec, refill tokens
3739 * and bucket size to maximum
3741 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
, 0xc8);
3742 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
3743 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
3744 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3745 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
3746 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
3747 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
3748 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
3750 /* Set MaximumLowLatencyPacketSize value to 256 */
3751 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
3752 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
3753 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3755 /* Enable Rx cache snoop */
3756 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3757 queue
= port
->rxqs
[lrxq
]->id
;
3758 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3759 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
3760 MVPP2_SNOOP_BUF_HDR_MASK
;
3761 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
3765 /* Enable/disable receiving packets */
3766 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
3771 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3772 queue
= port
->rxqs
[lrxq
]->id
;
3773 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3774 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
3775 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
3779 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
3784 for (lrxq
= 0; lrxq
< rxq_number
; lrxq
++) {
3785 queue
= port
->rxqs
[lrxq
]->id
;
3786 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
3787 val
|= MVPP2_RXQ_DISABLE_MASK
;
3788 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
3792 /* Enable transmit via physical egress queue
3793 * - HW starts take descriptors from DRAM
3795 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
3799 int tx_port_num
= mvpp2_egress_port(port
);
3801 /* Enable all initialized TXs. */
3803 for (queue
= 0; queue
< txq_number
; queue
++) {
3804 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
3806 if (txq
->descs
!= NULL
)
3807 qmap
|= (1 << queue
);
3810 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
3811 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
3814 /* Disable transmit via physical egress queue
3815 * - HW doesn't take descriptors from DRAM
3817 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
3821 int tx_port_num
= mvpp2_egress_port(port
);
3823 /* Issue stop command for active channels only */
3824 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
3825 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
3826 MVPP2_TXP_SCHED_ENQ_MASK
;
3828 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
3829 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
3831 /* Wait for all Tx activity to terminate. */
3834 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
3835 netdev_warn(port
->dev
,
3836 "Tx stop timed out, status=0x%08x\n",
3843 /* Check port TX Command register that all
3844 * Tx queues are stopped
3846 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
3847 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
3850 /* Rx descriptors helper methods */
3852 /* Get number of Rx descriptors occupied by received packets */
3854 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
3856 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
3858 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
3861 /* Update Rx queue status with the number of occupied and available
3862 * Rx descriptor slots.
3865 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
3866 int used_count
, int free_count
)
3868 /* Decrement the number of used descriptors and increment count
3869 * increment the number of free descriptors.
3871 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
3873 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
3876 /* Get pointer to next RX descriptor to be processed by SW */
3877 static inline struct mvpp2_rx_desc
*
3878 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
3880 int rx_desc
= rxq
->next_desc_to_proc
;
3882 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
3883 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
3884 return rxq
->descs
+ rx_desc
;
3887 /* Set rx queue offset */
3888 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
3889 int prxq
, int offset
)
3893 /* Convert offset from bytes to units of 32 bytes */
3894 offset
= offset
>> 5;
3896 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
3897 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
3900 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
3901 MVPP2_RXQ_PACKET_OFFSET_MASK
);
3903 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
3906 /* Obtain BM cookie information from descriptor */
3907 static u32
mvpp2_bm_cookie_build(struct mvpp2_port
*port
,
3908 struct mvpp2_rx_desc
*rx_desc
)
3910 int cpu
= smp_processor_id();
3913 pool
= (mvpp2_rxdesc_status_get(port
, rx_desc
) &
3914 MVPP2_RXD_BM_POOL_ID_MASK
) >>
3915 MVPP2_RXD_BM_POOL_ID_OFFS
;
3917 return ((pool
& 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS
) |
3918 ((cpu
& 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS
);
3921 /* Tx descriptors helper methods */
3923 /* Get number of Tx descriptors waiting to be transmitted by HW */
3924 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port
*port
,
3925 struct mvpp2_tx_queue
*txq
)
3929 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
3930 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
);
3932 return val
& MVPP2_TXQ_PENDING_MASK
;
3935 /* Get pointer to next Tx descriptor to be processed (send) by HW */
3936 static struct mvpp2_tx_desc
*
3937 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
3939 int tx_desc
= txq
->next_desc_to_proc
;
3941 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
3942 return txq
->descs
+ tx_desc
;
3945 /* Update HW with number of aggregated Tx descriptors to be sent */
3946 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
3948 /* aggregated access - relevant TXQ number is written in TX desc */
3949 mvpp2_write(port
->priv
, MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
3952 /* Get number of sent descriptors and decrement counter.
3953 * The number of sent descriptors is returned.
3956 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
3957 struct mvpp2_tx_queue
*txq
)
3961 /* Reading status reg resets transmitted descriptor counter */
3962 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(txq
->id
));
3964 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
3965 MVPP2_TRANSMITTED_COUNT_OFFSET
;
3968 static void mvpp2_txq_sent_counter_clear(void *arg
)
3970 struct mvpp2_port
*port
= arg
;
3973 for (queue
= 0; queue
< txq_number
; queue
++) {
3974 int id
= port
->txqs
[queue
]->id
;
3976 mvpp2_read(port
->priv
, MVPP2_TXQ_SENT_REG(id
));
3980 /* Set max sizes for Tx queues */
3981 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
3984 int txq
, tx_port_num
;
3986 mtu
= port
->pkt_size
* 8;
3987 if (mtu
> MVPP2_TXP_MTU_MAX
)
3988 mtu
= MVPP2_TXP_MTU_MAX
;
3990 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3993 /* Indirect access to registers */
3994 tx_port_num
= mvpp2_egress_port(port
);
3995 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
3998 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
3999 val
&= ~MVPP2_TXP_MTU_MAX
;
4001 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
4003 /* TXP token size and all TXQs token size must be larger that MTU */
4004 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
4005 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
4008 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
4010 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
4013 for (txq
= 0; txq
< txq_number
; txq
++) {
4014 val
= mvpp2_read(port
->priv
,
4015 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
4016 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
4020 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
4022 mvpp2_write(port
->priv
,
4023 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
4029 /* Free Tx queue skbuffs */
4030 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
4031 struct mvpp2_tx_queue
*txq
,
4032 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
4036 for (i
= 0; i
< num
; i
++)
4037 mvpp2_txq_inc_get(txq_pcpu
);
4040 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
4043 int queue
= fls(cause
) - 1;
4045 return port
->rxqs
[queue
];
4048 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
4051 int queue
= fls(cause
) - 1;
4053 return port
->txqs
[queue
];
4056 /* Rx/Tx queue initialization/cleanup methods */
4058 /* Allocate and initialize descriptors for aggr TXQ */
4059 static int mvpp2_aggr_txq_init(struct udevice
*dev
,
4060 struct mvpp2_tx_queue
*aggr_txq
,
4061 int desc_num
, int cpu
,
4066 /* Allocate memory for TX descriptors */
4067 aggr_txq
->descs
= buffer_loc
.aggr_tx_descs
;
4068 aggr_txq
->descs_dma
= (dma_addr_t
)buffer_loc
.aggr_tx_descs
;
4069 if (!aggr_txq
->descs
)
4072 /* Make sure descriptor address is cache line size aligned */
4073 BUG_ON(aggr_txq
->descs
!=
4074 PTR_ALIGN(aggr_txq
->descs
, MVPP2_CPU_D_CACHE_LINE_SIZE
));
4076 aggr_txq
->last_desc
= aggr_txq
->size
- 1;
4078 /* Aggr TXQ no reset WA */
4079 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
4080 MVPP2_AGGR_TXQ_INDEX_REG(cpu
));
4082 /* Set Tx descriptors queue starting address indirect
4085 if (priv
->hw_version
== MVPP21
)
4086 txq_dma
= aggr_txq
->descs_dma
;
4088 txq_dma
= aggr_txq
->descs_dma
>>
4089 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS
;
4091 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu
), txq_dma
);
4092 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu
), desc_num
);
4097 /* Create a specified Rx queue */
4098 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
4099 struct mvpp2_rx_queue
*rxq
)
4104 rxq
->size
= port
->rx_ring_size
;
4106 /* Allocate memory for RX descriptors */
4107 rxq
->descs
= buffer_loc
.rx_descs
;
4108 rxq
->descs_dma
= (dma_addr_t
)buffer_loc
.rx_descs
;
4112 BUG_ON(rxq
->descs
!=
4113 PTR_ALIGN(rxq
->descs
, MVPP2_CPU_D_CACHE_LINE_SIZE
));
4115 rxq
->last_desc
= rxq
->size
- 1;
4117 /* Zero occupied and non-occupied counters - direct access */
4118 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4120 /* Set Rx descriptors queue starting address - indirect access */
4121 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4122 if (port
->priv
->hw_version
== MVPP21
)
4123 rxq_dma
= rxq
->descs_dma
;
4125 rxq_dma
= rxq
->descs_dma
>> MVPP22_DESC_ADDR_OFFS
;
4126 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, rxq_dma
);
4127 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
4128 mvpp2_write(port
->priv
, MVPP2_RXQ_INDEX_REG
, 0);
4131 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
4133 /* Add number of descriptors ready for receiving packets */
4134 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
4139 /* Push packets received by the RXQ to BM pool */
4140 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
4141 struct mvpp2_rx_queue
*rxq
)
4145 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
4149 for (i
= 0; i
< rx_received
; i
++) {
4150 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
4151 u32 bm
= mvpp2_bm_cookie_build(port
, rx_desc
);
4153 mvpp2_pool_refill(port
, bm
,
4154 mvpp2_rxdesc_dma_addr_get(port
, rx_desc
),
4155 mvpp2_rxdesc_cookie_get(port
, rx_desc
));
4157 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
4160 /* Cleanup Rx queue */
4161 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
4162 struct mvpp2_rx_queue
*rxq
)
4164 mvpp2_rxq_drop_pkts(port
, rxq
);
4168 rxq
->next_desc_to_proc
= 0;
4171 /* Clear Rx descriptors queue starting address and size;
4172 * free descriptor number
4174 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
4175 mvpp2_write(port
->priv
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
4176 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
4177 mvpp2_write(port
->priv
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
4180 /* Create and initialize a Tx queue */
4181 static int mvpp2_txq_init(struct mvpp2_port
*port
,
4182 struct mvpp2_tx_queue
*txq
)
4185 int cpu
, desc
, desc_per_txq
, tx_port_num
;
4186 struct mvpp2_txq_pcpu
*txq_pcpu
;
4188 txq
->size
= port
->tx_ring_size
;
4190 /* Allocate memory for Tx descriptors */
4191 txq
->descs
= buffer_loc
.tx_descs
;
4192 txq
->descs_dma
= (dma_addr_t
)buffer_loc
.tx_descs
;
4196 /* Make sure descriptor address is cache line size aligned */
4197 BUG_ON(txq
->descs
!=
4198 PTR_ALIGN(txq
->descs
, MVPP2_CPU_D_CACHE_LINE_SIZE
));
4200 txq
->last_desc
= txq
->size
- 1;
4202 /* Set Tx descriptors queue starting address - indirect access */
4203 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4204 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, txq
->descs_dma
);
4205 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, txq
->size
&
4206 MVPP2_TXQ_DESC_SIZE_MASK
);
4207 mvpp2_write(port
->priv
, MVPP2_TXQ_INDEX_REG
, 0);
4208 mvpp2_write(port
->priv
, MVPP2_TXQ_RSVD_CLR_REG
,
4209 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
4210 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PENDING_REG
);
4211 val
&= ~MVPP2_TXQ_PENDING_MASK
;
4212 mvpp2_write(port
->priv
, MVPP2_TXQ_PENDING_REG
, val
);
4214 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4215 * for each existing TXQ.
4216 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4217 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4220 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
4221 (txq
->log_id
* desc_per_txq
);
4223 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
,
4224 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
4225 MVPP2_PREF_BUF_THRESH(desc_per_txq
/ 2));
4227 /* WRR / EJP configuration - indirect access */
4228 tx_port_num
= mvpp2_egress_port(port
);
4229 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
4231 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
4232 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
4233 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4234 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
4235 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
4237 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
4238 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
4241 for_each_present_cpu(cpu
) {
4242 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4243 txq_pcpu
->size
= txq
->size
;
4249 /* Free allocated TXQ resources */
4250 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
4251 struct mvpp2_tx_queue
*txq
)
4255 txq
->next_desc_to_proc
= 0;
4258 /* Set minimum bandwidth for disabled TXQs */
4259 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
4261 /* Set Tx descriptors queue starting address and size */
4262 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4263 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
4264 mvpp2_write(port
->priv
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
4267 /* Cleanup Tx ports */
4268 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
4270 struct mvpp2_txq_pcpu
*txq_pcpu
;
4271 int delay
, pending
, cpu
;
4274 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
4275 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
);
4276 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
4277 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4279 /* The napi queue has been stopped so wait for all packets
4280 * to be transmitted.
4284 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
4285 netdev_warn(port
->dev
,
4286 "port %d: cleaning queue %d timed out\n",
4287 port
->id
, txq
->log_id
);
4293 pending
= mvpp2_txq_pend_desc_num_get(port
, txq
);
4296 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
4297 mvpp2_write(port
->priv
, MVPP2_TXQ_PREF_BUF_REG
, val
);
4299 for_each_present_cpu(cpu
) {
4300 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4302 /* Release all packets */
4303 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
4306 txq_pcpu
->count
= 0;
4307 txq_pcpu
->txq_put_index
= 0;
4308 txq_pcpu
->txq_get_index
= 0;
4312 /* Cleanup all Tx queues */
4313 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
4315 struct mvpp2_tx_queue
*txq
;
4319 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
4321 /* Reset Tx ports and delete Tx queues */
4322 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4323 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4325 for (queue
= 0; queue
< txq_number
; queue
++) {
4326 txq
= port
->txqs
[queue
];
4327 mvpp2_txq_clean(port
, txq
);
4328 mvpp2_txq_deinit(port
, txq
);
4331 mvpp2_txq_sent_counter_clear(port
);
4333 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
4334 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
4337 /* Cleanup all Rx queues */
4338 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
4342 for (queue
= 0; queue
< rxq_number
; queue
++)
4343 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
4346 /* Init all Rx queues for port */
4347 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
4351 for (queue
= 0; queue
< rxq_number
; queue
++) {
4352 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
4359 mvpp2_cleanup_rxqs(port
);
4363 /* Init all tx queues for port */
4364 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
4366 struct mvpp2_tx_queue
*txq
;
4369 for (queue
= 0; queue
< txq_number
; queue
++) {
4370 txq
= port
->txqs
[queue
];
4371 err
= mvpp2_txq_init(port
, txq
);
4376 mvpp2_txq_sent_counter_clear(port
);
4380 mvpp2_cleanup_txqs(port
);
4385 static void mvpp2_link_event(struct mvpp2_port
*port
)
4387 struct phy_device
*phydev
= port
->phy_dev
;
4388 int status_change
= 0;
4392 if ((port
->speed
!= phydev
->speed
) ||
4393 (port
->duplex
!= phydev
->duplex
)) {
4396 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4397 val
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
|
4398 MVPP2_GMAC_CONFIG_GMII_SPEED
|
4399 MVPP2_GMAC_CONFIG_FULL_DUPLEX
|
4400 MVPP2_GMAC_AN_SPEED_EN
|
4401 MVPP2_GMAC_AN_DUPLEX_EN
);
4404 val
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4406 if (phydev
->speed
== SPEED_1000
)
4407 val
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
4408 else if (phydev
->speed
== SPEED_100
)
4409 val
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
4411 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4413 port
->duplex
= phydev
->duplex
;
4414 port
->speed
= phydev
->speed
;
4418 if (phydev
->link
!= port
->link
) {
4419 if (!phydev
->link
) {
4424 port
->link
= phydev
->link
;
4428 if (status_change
) {
4430 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4431 val
|= (MVPP2_GMAC_FORCE_LINK_PASS
|
4432 MVPP2_GMAC_FORCE_LINK_DOWN
);
4433 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4434 mvpp2_egress_enable(port
);
4435 mvpp2_ingress_enable(port
);
4437 mvpp2_ingress_disable(port
);
4438 mvpp2_egress_disable(port
);
4443 /* Main RX/TX processing routines */
4445 /* Display more error info */
4446 static void mvpp2_rx_error(struct mvpp2_port
*port
,
4447 struct mvpp2_rx_desc
*rx_desc
)
4449 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
4450 size_t sz
= mvpp2_rxdesc_size_get(port
, rx_desc
);
4452 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
4453 case MVPP2_RXD_ERR_CRC
:
4454 netdev_err(port
->dev
, "bad rx status %08x (crc error), size=%zu\n",
4457 case MVPP2_RXD_ERR_OVERRUN
:
4458 netdev_err(port
->dev
, "bad rx status %08x (overrun error), size=%zu\n",
4461 case MVPP2_RXD_ERR_RESOURCE
:
4462 netdev_err(port
->dev
, "bad rx status %08x (resource error), size=%zu\n",
4468 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4469 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
4470 struct mvpp2_bm_pool
*bm_pool
,
4471 u32 bm
, dma_addr_t dma_addr
)
4473 mvpp2_pool_refill(port
, bm
, dma_addr
, (unsigned long)dma_addr
);
4477 /* Set hw internals when starting port */
4478 static void mvpp2_start_dev(struct mvpp2_port
*port
)
4480 switch (port
->phy_interface
) {
4481 case PHY_INTERFACE_MODE_RGMII
:
4482 case PHY_INTERFACE_MODE_RGMII_ID
:
4483 case PHY_INTERFACE_MODE_SGMII
:
4484 mvpp2_gmac_max_rx_size_set(port
);
4489 mvpp2_txp_max_tx_size_set(port
);
4491 if (port
->priv
->hw_version
== MVPP21
)
4492 mvpp2_port_enable(port
);
4494 gop_port_enable(port
, 1);
4497 /* Set hw internals when stopping port */
4498 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
4500 /* Stop new packets from arriving to RXQs */
4501 mvpp2_ingress_disable(port
);
4503 mvpp2_egress_disable(port
);
4505 if (port
->priv
->hw_version
== MVPP21
)
4506 mvpp2_port_disable(port
);
4508 gop_port_enable(port
, 0);
4511 static int mvpp2_phy_connect(struct udevice
*dev
, struct mvpp2_port
*port
)
4513 struct phy_device
*phy_dev
;
4515 if (!port
->init
|| port
->link
== 0) {
4516 phy_dev
= phy_connect(port
->priv
->bus
, port
->phyaddr
, dev
,
4517 port
->phy_interface
);
4518 port
->phy_dev
= phy_dev
;
4520 netdev_err(port
->dev
, "cannot connect to phy\n");
4523 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
4524 phy_dev
->advertising
= phy_dev
->supported
;
4526 port
->phy_dev
= phy_dev
;
4531 phy_config(phy_dev
);
4532 phy_startup(phy_dev
);
4533 if (!phy_dev
->link
) {
4534 printf("%s: No link\n", phy_dev
->dev
->name
);
4540 mvpp2_egress_enable(port
);
4541 mvpp2_ingress_enable(port
);
4547 static int mvpp2_open(struct udevice
*dev
, struct mvpp2_port
*port
)
4549 unsigned char mac_bcast
[ETH_ALEN
] = {
4550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4553 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
, mac_bcast
, true);
4555 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
4558 err
= mvpp2_prs_mac_da_accept(port
->priv
, port
->id
,
4559 port
->dev_addr
, true);
4561 netdev_err(dev
, "mvpp2_prs_mac_da_accept MC failed\n");
4564 err
= mvpp2_prs_def_flow(port
);
4566 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
4570 /* Allocate the Rx/Tx queues */
4571 err
= mvpp2_setup_rxqs(port
);
4573 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
4577 err
= mvpp2_setup_txqs(port
);
4579 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
4583 if (port
->phy_node
) {
4584 err
= mvpp2_phy_connect(dev
, port
);
4588 mvpp2_link_event(port
);
4590 mvpp2_egress_enable(port
);
4591 mvpp2_ingress_enable(port
);
4594 mvpp2_start_dev(port
);
4599 /* No Device ops here in U-Boot */
4601 /* Driver initialization */
4603 static void mvpp2_port_power_up(struct mvpp2_port
*port
)
4605 struct mvpp2
*priv
= port
->priv
;
4607 /* On PPv2.2 the GoP / interface configuration has already been done */
4608 if (priv
->hw_version
== MVPP21
)
4609 mvpp2_port_mii_set(port
);
4610 mvpp2_port_periodic_xon_disable(port
);
4611 if (priv
->hw_version
== MVPP21
)
4612 mvpp2_port_fc_adv_enable(port
);
4613 mvpp2_port_reset(port
);
4616 /* Initialize port HW */
4617 static int mvpp2_port_init(struct udevice
*dev
, struct mvpp2_port
*port
)
4619 struct mvpp2
*priv
= port
->priv
;
4620 struct mvpp2_txq_pcpu
*txq_pcpu
;
4621 int queue
, cpu
, err
;
4623 if (port
->first_rxq
+ rxq_number
>
4624 MVPP2_MAX_PORTS
* priv
->max_port_rxqs
)
4628 mvpp2_egress_disable(port
);
4629 if (priv
->hw_version
== MVPP21
)
4630 mvpp2_port_disable(port
);
4632 gop_port_enable(port
, 0);
4634 port
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*port
->txqs
),
4639 /* Associate physical Tx queues to this port and initialize.
4640 * The mapping is predefined.
4642 for (queue
= 0; queue
< txq_number
; queue
++) {
4643 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
4644 struct mvpp2_tx_queue
*txq
;
4646 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
4650 txq
->pcpu
= devm_kzalloc(dev
, sizeof(struct mvpp2_txq_pcpu
),
4655 txq
->id
= queue_phy_id
;
4656 txq
->log_id
= queue
;
4657 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
4658 for_each_present_cpu(cpu
) {
4659 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, cpu
);
4660 txq_pcpu
->cpu
= cpu
;
4663 port
->txqs
[queue
] = txq
;
4666 port
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*port
->rxqs
),
4671 /* Allocate and initialize Rx queue for this port */
4672 for (queue
= 0; queue
< rxq_number
; queue
++) {
4673 struct mvpp2_rx_queue
*rxq
;
4675 /* Map physical Rx queue to port's logical Rx queue */
4676 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
4679 /* Map this Rx queue to a physical queue */
4680 rxq
->id
= port
->first_rxq
+ queue
;
4681 rxq
->port
= port
->id
;
4682 rxq
->logic_rxq
= queue
;
4684 port
->rxqs
[queue
] = rxq
;
4688 /* Create Rx descriptor rings */
4689 for (queue
= 0; queue
< rxq_number
; queue
++) {
4690 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
4692 rxq
->size
= port
->rx_ring_size
;
4693 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
4694 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
4697 mvpp2_ingress_disable(port
);
4699 /* Port default configuration */
4700 mvpp2_defaults_set(port
);
4702 /* Port's classifier configuration */
4703 mvpp2_cls_oversize_rxq_set(port
);
4704 mvpp2_cls_port_config(port
);
4706 /* Provide an initial Rx packet size */
4707 port
->pkt_size
= MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN
);
4709 /* Initialize pools for swf */
4710 err
= mvpp2_swf_bm_pool_init(port
);
4717 static int phy_info_parse(struct udevice
*dev
, struct mvpp2_port
*port
)
4719 int port_node
= dev_of_offset(dev
);
4720 const char *phy_mode_str
;
4721 int phy_node
, mdio_off
, cp_node
;
4725 phys_addr_t mdio_addr
;
4727 phy_node
= fdtdec_lookup_phandle(gd
->fdt_blob
, port_node
, "phy");
4730 phyaddr
= fdtdec_get_int(gd
->fdt_blob
, phy_node
, "reg", 0);
4732 dev_err(&pdev
->dev
, "could not find phy address\n");
4735 mdio_off
= fdt_parent_offset(gd
->fdt_blob
, phy_node
);
4737 /* TODO: This WA for mdio issue. U-boot 2017 don't have
4738 * mdio driver and on MACHIATOBin board ports from CP1
4739 * connected to mdio on CP0.
4740 * WA is to get mdio address from phy handler parent
4741 * base address. WA should be removed after
4742 * mdio driver implementation.
4744 mdio_addr
= fdtdec_get_uint(gd
->fdt_blob
,
4745 mdio_off
, "reg", 0);
4747 cp_node
= fdt_parent_offset(gd
->fdt_blob
, mdio_off
);
4748 mdio_addr
|= fdt_get_base_address((void *)gd
->fdt_blob
,
4751 port
->priv
->mdio_base
= (void *)mdio_addr
;
4753 if (port
->priv
->mdio_base
< 0) {
4754 dev_err(&pdev
->dev
, "could not find mdio base address\n");
4761 phy_mode_str
= fdt_getprop(gd
->fdt_blob
, port_node
, "phy-mode", NULL
);
4763 phy_mode
= phy_get_interface_by_name(phy_mode_str
);
4764 if (phy_mode
== -1) {
4765 dev_err(&pdev
->dev
, "incorrect phy mode\n");
4769 id
= fdtdec_get_int(gd
->fdt_blob
, port_node
, "port-id", -1);
4771 dev_err(&pdev
->dev
, "missing port-id value\n");
4775 #ifdef CONFIG_DM_GPIO
4776 gpio_request_by_name(dev
, "phy-reset-gpios", 0,
4777 &port
->phy_reset_gpio
, GPIOD_IS_OUT
);
4778 gpio_request_by_name(dev
, "marvell,sfp-tx-disable-gpio", 0,
4779 &port
->phy_tx_disable_gpio
, GPIOD_IS_OUT
);
4784 * Not sure if this DT property "phy-speed" will get accepted, so
4785 * this might change later
4787 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */
4788 port
->phy_speed
= fdtdec_get_int(gd
->fdt_blob
, port_node
,
4792 if (port
->priv
->hw_version
== MVPP21
)
4793 port
->first_rxq
= port
->id
* rxq_number
;
4795 port
->first_rxq
= port
->id
* port
->priv
->max_port_rxqs
;
4796 port
->phy_node
= phy_node
;
4797 port
->phy_interface
= phy_mode
;
4798 port
->phyaddr
= phyaddr
;
4803 #ifdef CONFIG_DM_GPIO
4804 /* Port GPIO initialization */
4805 static void mvpp2_gpio_init(struct mvpp2_port
*port
)
4807 if (dm_gpio_is_valid(&port
->phy_reset_gpio
)) {
4808 dm_gpio_set_value(&port
->phy_reset_gpio
, 0);
4810 dm_gpio_set_value(&port
->phy_reset_gpio
, 1);
4813 if (dm_gpio_is_valid(&port
->phy_tx_disable_gpio
))
4814 dm_gpio_set_value(&port
->phy_tx_disable_gpio
, 0);
4818 /* Ports initialization */
4819 static int mvpp2_port_probe(struct udevice
*dev
,
4820 struct mvpp2_port
*port
,
4826 port
->tx_ring_size
= MVPP2_MAX_TXD
;
4827 port
->rx_ring_size
= MVPP2_MAX_RXD
;
4829 err
= mvpp2_port_init(dev
, port
);
4831 dev_err(&pdev
->dev
, "failed to init port %d\n", port
->id
);
4834 mvpp2_port_power_up(port
);
4836 #ifdef CONFIG_DM_GPIO
4837 mvpp2_gpio_init(port
);
4840 priv
->port_list
[port
->id
] = port
;
4845 /* Initialize decoding windows */
4846 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
4852 for (i
= 0; i
< 6; i
++) {
4853 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
4854 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
4857 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
4862 for (i
= 0; i
< dram
->num_cs
; i
++) {
4863 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
4865 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
4866 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
4867 dram
->mbus_dram_target_id
);
4869 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
4870 (cs
->size
- 1) & 0xffff0000);
4872 win_enable
|= (1 << i
);
4875 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
4878 /* Initialize Rx FIFO's */
4879 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
4883 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
4884 if (priv
->hw_version
== MVPP22
) {
4887 MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
4888 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE
);
4890 MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
4891 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE
);
4892 } else if (port
== 1) {
4894 MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
4895 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE
);
4897 MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
4898 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE
);
4901 MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
4902 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE
);
4904 MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
4905 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE
);
4908 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
4909 MVPP21_RX_FIFO_PORT_DATA_SIZE
);
4910 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
4911 MVPP21_RX_FIFO_PORT_ATTR_SIZE
);
4915 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
4916 MVPP2_RX_FIFO_PORT_MIN_PKT
);
4917 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
4920 /* Initialize Tx FIFO's */
4921 static void mvpp2_tx_fifo_init(struct mvpp2
*priv
)
4925 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
4926 /* Port 0 supports 10KB TX FIFO */
4928 val
= MVPP2_TX_FIFO_DATA_SIZE_10KB
&
4929 MVPP22_TX_FIFO_SIZE_MASK
;
4931 val
= MVPP2_TX_FIFO_DATA_SIZE_3KB
&
4932 MVPP22_TX_FIFO_SIZE_MASK
;
4934 mvpp2_write(priv
, MVPP22_TX_FIFO_SIZE_REG(port
), val
);
4938 static void mvpp2_axi_init(struct mvpp2
*priv
)
4940 u32 val
, rdval
, wrval
;
4942 mvpp2_write(priv
, MVPP22_BM_ADDR_HIGH_RLS_REG
, 0x0);
4944 /* AXI Bridge Configuration */
4946 rdval
= MVPP22_AXI_CODE_CACHE_RD_CACHE
4947 << MVPP22_AXI_ATTR_CACHE_OFFS
;
4948 rdval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4949 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
4951 wrval
= MVPP22_AXI_CODE_CACHE_WR_CACHE
4952 << MVPP22_AXI_ATTR_CACHE_OFFS
;
4953 wrval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4954 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
4957 mvpp2_write(priv
, MVPP22_AXI_BM_WR_ATTR_REG
, wrval
);
4958 mvpp2_write(priv
, MVPP22_AXI_BM_RD_ATTR_REG
, rdval
);
4961 mvpp2_write(priv
, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG
, rdval
);
4962 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG
, wrval
);
4963 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG
, rdval
);
4964 mvpp2_write(priv
, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG
, wrval
);
4967 mvpp2_write(priv
, MVPP22_AXI_TX_DATA_RD_ATTR_REG
, rdval
);
4968 mvpp2_write(priv
, MVPP22_AXI_RX_DATA_WR_ATTR_REG
, wrval
);
4970 val
= MVPP22_AXI_CODE_CACHE_NON_CACHE
4971 << MVPP22_AXI_CODE_CACHE_OFFS
;
4972 val
|= MVPP22_AXI_CODE_DOMAIN_SYSTEM
4973 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
4974 mvpp2_write(priv
, MVPP22_AXI_RD_NORMAL_CODE_REG
, val
);
4975 mvpp2_write(priv
, MVPP22_AXI_WR_NORMAL_CODE_REG
, val
);
4977 val
= MVPP22_AXI_CODE_CACHE_RD_CACHE
4978 << MVPP22_AXI_CODE_CACHE_OFFS
;
4979 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4980 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
4982 mvpp2_write(priv
, MVPP22_AXI_RD_SNOOP_CODE_REG
, val
);
4984 val
= MVPP22_AXI_CODE_CACHE_WR_CACHE
4985 << MVPP22_AXI_CODE_CACHE_OFFS
;
4986 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4987 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
4989 mvpp2_write(priv
, MVPP22_AXI_WR_SNOOP_CODE_REG
, val
);
4992 /* Initialize network controller common part HW */
4993 static int mvpp2_init(struct udevice
*dev
, struct mvpp2
*priv
)
4995 const struct mbus_dram_target_info
*dram_target_info
;
4999 /* Checks for hardware constraints (U-Boot uses only one rxq) */
5000 if ((rxq_number
> priv
->max_port_rxqs
) ||
5001 (txq_number
> MVPP2_MAX_TXQ
)) {
5002 dev_err(&pdev
->dev
, "invalid queue size parameter\n");
5006 if (priv
->hw_version
== MVPP22
)
5007 mvpp2_axi_init(priv
);
5009 /* MBUS windows configuration */
5010 dram_target_info
= mvebu_mbus_dram_info();
5011 if (dram_target_info
)
5012 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
5015 if (priv
->hw_version
== MVPP21
) {
5016 /* Disable HW PHY polling */
5017 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5018 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
5019 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5021 /* Enable HW PHY polling */
5022 val
= readl(priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5023 val
|= MVPP22_SMI_POLLING_EN
;
5024 writel(val
, priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5027 /* Allocate and initialize aggregated TXQs */
5028 priv
->aggr_txqs
= devm_kcalloc(dev
, num_present_cpus(),
5029 sizeof(struct mvpp2_tx_queue
),
5031 if (!priv
->aggr_txqs
)
5034 for_each_present_cpu(i
) {
5035 priv
->aggr_txqs
[i
].id
= i
;
5036 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
5037 err
= mvpp2_aggr_txq_init(dev
, &priv
->aggr_txqs
[i
],
5038 MVPP2_AGGR_TXQ_SIZE
, i
, priv
);
5044 mvpp2_rx_fifo_init(priv
);
5047 if (priv
->hw_version
== MVPP22
)
5048 mvpp2_tx_fifo_init(priv
);
5050 if (priv
->hw_version
== MVPP21
)
5051 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
5052 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
5054 /* Allow cache snoop when transmiting packets */
5055 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
5057 /* Buffer Manager initialization */
5058 err
= mvpp2_bm_init(dev
, priv
);
5062 /* Parser default initialization */
5063 err
= mvpp2_prs_default_init(dev
, priv
);
5067 /* Classifier default initialization */
5068 mvpp2_cls_init(priv
);
5073 /* SMI / MDIO functions */
5075 static int smi_wait_ready(struct mvpp2
*priv
)
5077 u32 timeout
= MVPP2_SMI_TIMEOUT
;
5080 /* wait till the SMI is not busy */
5082 /* read smi register */
5083 smi_reg
= readl(priv
->mdio_base
);
5084 if (timeout
-- == 0) {
5085 printf("Error: SMI busy timeout\n");
5088 } while (smi_reg
& MVPP2_SMI_BUSY
);
5094 * mpp2_mdio_read - miiphy_read callback function.
5096 * Returns 16bit phy register value, or 0xffff on error
5098 static int mpp2_mdio_read(struct mii_dev
*bus
, int addr
, int devad
, int reg
)
5100 struct mvpp2
*priv
= bus
->priv
;
5104 /* check parameters */
5105 if (addr
> MVPP2_PHY_ADDR_MASK
) {
5106 printf("Error: Invalid PHY address %d\n", addr
);
5110 if (reg
> MVPP2_PHY_REG_MASK
) {
5111 printf("Err: Invalid register offset %d\n", reg
);
5115 /* wait till the SMI is not busy */
5116 if (smi_wait_ready(priv
) < 0)
5119 /* fill the phy address and regiser offset and read opcode */
5120 smi_reg
= (addr
<< MVPP2_SMI_DEV_ADDR_OFFS
)
5121 | (reg
<< MVPP2_SMI_REG_ADDR_OFFS
)
5122 | MVPP2_SMI_OPCODE_READ
;
5124 /* write the smi register */
5125 writel(smi_reg
, priv
->mdio_base
);
5127 /* wait till read value is ready */
5128 timeout
= MVPP2_SMI_TIMEOUT
;
5131 /* read smi register */
5132 smi_reg
= readl(priv
->mdio_base
);
5133 if (timeout
-- == 0) {
5134 printf("Err: SMI read ready timeout\n");
5137 } while (!(smi_reg
& MVPP2_SMI_READ_VALID
));
5139 /* Wait for the data to update in the SMI register */
5140 for (timeout
= 0; timeout
< MVPP2_SMI_TIMEOUT
; timeout
++)
5143 return readl(priv
->mdio_base
) & MVPP2_SMI_DATA_MASK
;
5147 * mpp2_mdio_write - miiphy_write callback function.
5149 * Returns 0 if write succeed, -EINVAL on bad parameters
5152 static int mpp2_mdio_write(struct mii_dev
*bus
, int addr
, int devad
, int reg
,
5155 struct mvpp2
*priv
= bus
->priv
;
5158 /* check parameters */
5159 if (addr
> MVPP2_PHY_ADDR_MASK
) {
5160 printf("Error: Invalid PHY address %d\n", addr
);
5164 if (reg
> MVPP2_PHY_REG_MASK
) {
5165 printf("Err: Invalid register offset %d\n", reg
);
5169 /* wait till the SMI is not busy */
5170 if (smi_wait_ready(priv
) < 0)
5173 /* fill the phy addr and reg offset and write opcode and data */
5174 smi_reg
= value
<< MVPP2_SMI_DATA_OFFS
;
5175 smi_reg
|= (addr
<< MVPP2_SMI_DEV_ADDR_OFFS
)
5176 | (reg
<< MVPP2_SMI_REG_ADDR_OFFS
);
5177 smi_reg
&= ~MVPP2_SMI_OPCODE_READ
;
5179 /* write the smi register */
5180 writel(smi_reg
, priv
->mdio_base
);
5185 static int mvpp2_recv(struct udevice
*dev
, int flags
, uchar
**packetp
)
5187 struct mvpp2_port
*port
= dev_get_priv(dev
);
5188 struct mvpp2_rx_desc
*rx_desc
;
5189 struct mvpp2_bm_pool
*bm_pool
;
5190 dma_addr_t dma_addr
;
5192 int pool
, rx_bytes
, err
;
5194 struct mvpp2_rx_queue
*rxq
;
5197 /* Process RX packets */
5198 rxq
= port
->rxqs
[0];
5200 /* Get number of received packets and clamp the to-do */
5201 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
5203 /* Return if no packets are received */
5207 rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
5208 rx_status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
5209 rx_bytes
= mvpp2_rxdesc_size_get(port
, rx_desc
);
5210 rx_bytes
-= MVPP2_MH_SIZE
;
5211 dma_addr
= mvpp2_rxdesc_dma_addr_get(port
, rx_desc
);
5213 bm
= mvpp2_bm_cookie_build(port
, rx_desc
);
5214 pool
= mvpp2_bm_cookie_pool_get(bm
);
5215 bm_pool
= &port
->priv
->bm_pools
[pool
];
5217 /* In case of an error, release the requested buffer pointer
5218 * to the Buffer Manager. This request process is controlled
5219 * by the hardware, and the information about the buffer is
5220 * comprised by the RX descriptor.
5222 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
5223 mvpp2_rx_error(port
, rx_desc
);
5224 /* Return the buffer to the pool */
5225 mvpp2_pool_refill(port
, bm
, dma_addr
, dma_addr
);
5229 err
= mvpp2_rx_refill(port
, bm_pool
, bm
, dma_addr
);
5231 netdev_err(port
->dev
, "failed to refill BM pools\n");
5235 /* Update Rx queue management counters */
5237 mvpp2_rxq_status_update(port
, rxq
->id
, 1, 1);
5239 /* give packet to stack - skip on first n bytes */
5240 data
= (u8
*)dma_addr
+ 2 + 32;
5246 * No cache invalidation needed here, since the rx_buffer's are
5247 * located in a uncached memory region
5254 static int mvpp2_send(struct udevice
*dev
, void *packet
, int length
)
5256 struct mvpp2_port
*port
= dev_get_priv(dev
);
5257 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
5258 struct mvpp2_tx_desc
*tx_desc
;
5262 txq
= port
->txqs
[0];
5263 aggr_txq
= &port
->priv
->aggr_txqs
[smp_processor_id()];
5265 /* Get a descriptor for the first part of the packet */
5266 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
5267 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
5268 mvpp2_txdesc_size_set(port
, tx_desc
, length
);
5269 mvpp2_txdesc_offset_set(port
, tx_desc
,
5270 (dma_addr_t
)packet
& MVPP2_TX_DESC_ALIGN
);
5271 mvpp2_txdesc_dma_addr_set(port
, tx_desc
,
5272 (dma_addr_t
)packet
& ~MVPP2_TX_DESC_ALIGN
);
5273 /* First and Last descriptor */
5274 mvpp2_txdesc_cmd_set(port
, tx_desc
,
5275 MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
5276 | MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
);
5279 flush_dcache_range((unsigned long)packet
,
5280 (unsigned long)packet
+ ALIGN(length
, PKTALIGN
));
5282 /* Enable transmit */
5284 mvpp2_aggr_txq_pend_desc_add(port
, 1);
5286 mvpp2_write(port
->priv
, MVPP2_TXQ_NUM_REG
, txq
->id
);
5290 if (timeout
++ > 10000) {
5291 printf("timeout: packet not sent from aggregated to phys TXQ\n");
5294 tx_done
= mvpp2_txq_pend_desc_num_get(port
, txq
);
5299 if (timeout
++ > 10000) {
5300 printf("timeout: packet not sent\n");
5303 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
5309 static int mvpp2_start(struct udevice
*dev
)
5311 struct eth_pdata
*pdata
= dev_get_platdata(dev
);
5312 struct mvpp2_port
*port
= dev_get_priv(dev
);
5314 /* Load current MAC address */
5315 memcpy(port
->dev_addr
, pdata
->enetaddr
, ETH_ALEN
);
5317 /* Reconfigure parser accept the original MAC address */
5318 mvpp2_prs_update_mac_da(port
, port
->dev_addr
);
5320 switch (port
->phy_interface
) {
5321 case PHY_INTERFACE_MODE_RGMII
:
5322 case PHY_INTERFACE_MODE_RGMII_ID
:
5323 case PHY_INTERFACE_MODE_SGMII
:
5324 mvpp2_port_power_up(port
);
5329 mvpp2_open(dev
, port
);
5334 static void mvpp2_stop(struct udevice
*dev
)
5336 struct mvpp2_port
*port
= dev_get_priv(dev
);
5338 mvpp2_stop_dev(port
);
5339 mvpp2_cleanup_rxqs(port
);
5340 mvpp2_cleanup_txqs(port
);
5343 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port
*port
)
5345 writel(port
->phyaddr
, port
->priv
->iface_base
+
5346 MVPP22_SMI_PHY_ADDR_REG(port
->gop_id
));
5351 static int mvpp2_base_probe(struct udevice
*dev
)
5353 struct mvpp2
*priv
= dev_get_priv(dev
);
5354 struct mii_dev
*bus
;
5359 /* Save hw-version */
5360 priv
->hw_version
= dev_get_driver_data(dev
);
5363 * U-Boot special buffer handling:
5365 * Allocate buffer area for descs and rx_buffers. This is only
5366 * done once for all interfaces. As only one interface can
5367 * be active. Make this area DMA-safe by disabling the D-cache
5370 /* Align buffer area for descs and rx_buffers to 1MiB */
5371 bd_space
= memalign(1 << MMU_SECTION_SHIFT
, BD_SPACE
);
5372 mmu_set_region_dcache_behaviour((unsigned long)bd_space
,
5373 BD_SPACE
, DCACHE_OFF
);
5375 buffer_loc
.aggr_tx_descs
= (struct mvpp2_tx_desc
*)bd_space
;
5376 size
+= MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
;
5378 buffer_loc
.tx_descs
=
5379 (struct mvpp2_tx_desc
*)((unsigned long)bd_space
+ size
);
5380 size
+= MVPP2_MAX_TXD
* MVPP2_DESC_ALIGNED_SIZE
;
5382 buffer_loc
.rx_descs
=
5383 (struct mvpp2_rx_desc
*)((unsigned long)bd_space
+ size
);
5384 size
+= MVPP2_MAX_RXD
* MVPP2_DESC_ALIGNED_SIZE
;
5386 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
5387 buffer_loc
.bm_pool
[i
] =
5388 (unsigned long *)((unsigned long)bd_space
+ size
);
5389 if (priv
->hw_version
== MVPP21
)
5390 size
+= MVPP2_BM_POOL_SIZE_MAX
* 2 * sizeof(u32
);
5392 size
+= MVPP2_BM_POOL_SIZE_MAX
* 2 * sizeof(u64
);
5395 for (i
= 0; i
< MVPP2_BM_LONG_BUF_NUM
; i
++) {
5396 buffer_loc
.rx_buffer
[i
] =
5397 (unsigned long *)((unsigned long)bd_space
+ size
);
5398 size
+= RX_BUFFER_SIZE
;
5401 /* Clear the complete area so that all descriptors are cleared */
5402 memset(bd_space
, 0, size
);
5404 /* Save base addresses for later use */
5405 priv
->base
= (void *)devfdt_get_addr_index(dev
, 0);
5406 if (IS_ERR(priv
->base
))
5407 return PTR_ERR(priv
->base
);
5409 if (priv
->hw_version
== MVPP21
) {
5410 priv
->lms_base
= (void *)devfdt_get_addr_index(dev
, 1);
5411 if (IS_ERR(priv
->lms_base
))
5412 return PTR_ERR(priv
->lms_base
);
5414 priv
->mdio_base
= priv
->lms_base
+ MVPP21_SMI
;
5416 priv
->iface_base
= (void *)devfdt_get_addr_index(dev
, 1);
5417 if (IS_ERR(priv
->iface_base
))
5418 return PTR_ERR(priv
->iface_base
);
5420 priv
->mdio_base
= priv
->iface_base
+ MVPP22_SMI
;
5422 /* Store common base addresses for all ports */
5423 priv
->mpcs_base
= priv
->iface_base
+ MVPP22_MPCS
;
5424 priv
->xpcs_base
= priv
->iface_base
+ MVPP22_XPCS
;
5425 priv
->rfu1_base
= priv
->iface_base
+ MVPP22_RFU1
;
5428 if (priv
->hw_version
== MVPP21
)
5429 priv
->max_port_rxqs
= 8;
5431 priv
->max_port_rxqs
= 32;
5433 /* Finally create and register the MDIO bus driver */
5436 printf("Failed to allocate MDIO bus\n");
5440 bus
->read
= mpp2_mdio_read
;
5441 bus
->write
= mpp2_mdio_write
;
5442 snprintf(bus
->name
, sizeof(bus
->name
), dev
->name
);
5443 bus
->priv
= (void *)priv
;
5446 return mdio_register(bus
);
5449 static int mvpp2_probe(struct udevice
*dev
)
5451 struct mvpp2_port
*port
= dev_get_priv(dev
);
5452 struct mvpp2
*priv
= dev_get_priv(dev
->parent
);
5455 /* Only call the probe function for the parent once */
5456 if (!priv
->probe_done
)
5457 err
= mvpp2_base_probe(dev
->parent
);
5459 port
->priv
= dev_get_priv(dev
->parent
);
5461 err
= phy_info_parse(dev
, port
);
5466 * We need the port specific io base addresses at this stage, since
5467 * gop_port_init() accesses these registers
5469 if (priv
->hw_version
== MVPP21
) {
5470 int priv_common_regs_num
= 2;
5472 port
->base
= (void __iomem
*)devfdt_get_addr_index(
5473 dev
->parent
, priv_common_regs_num
+ port
->id
);
5474 if (IS_ERR(port
->base
))
5475 return PTR_ERR(port
->base
);
5477 port
->gop_id
= fdtdec_get_int(gd
->fdt_blob
, dev_of_offset(dev
),
5479 if (port
->id
== -1) {
5480 dev_err(&pdev
->dev
, "missing gop-port-id value\n");
5484 port
->base
= priv
->iface_base
+ MVPP22_PORT_BASE
+
5485 port
->gop_id
* MVPP22_PORT_OFFSET
;
5487 /* Set phy address of the port */
5489 mvpp22_smi_phy_addr_cfg(port
);
5492 gop_port_init(port
);
5495 if (!priv
->probe_done
) {
5496 /* Initialize network controller */
5497 err
= mvpp2_init(dev
, priv
);
5499 dev_err(&pdev
->dev
, "failed to initialize controller\n");
5502 priv
->num_ports
= 0;
5503 priv
->probe_done
= 1;
5506 err
= mvpp2_port_probe(dev
, port
, dev_of_offset(dev
), priv
);
5510 if (priv
->hw_version
== MVPP22
) {
5511 priv
->netc_config
|= mvpp2_netc_cfg_create(port
->gop_id
,
5512 port
->phy_interface
);
5514 /* Netcomplex configurations for all ports */
5515 gop_netc_init(priv
, MV_NETC_FIRST_PHASE
);
5516 gop_netc_init(priv
, MV_NETC_SECOND_PHASE
);
5523 * Empty BM pool and stop its activity before the OS is started
5525 static int mvpp2_remove(struct udevice
*dev
)
5527 struct mvpp2_port
*port
= dev_get_priv(dev
);
5528 struct mvpp2
*priv
= port
->priv
;
5533 if (priv
->num_ports
)
5536 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++)
5537 mvpp2_bm_pool_destroy(dev
, priv
, &priv
->bm_pools
[i
]);
5542 static const struct eth_ops mvpp2_ops
= {
5543 .start
= mvpp2_start
,
5549 static struct driver mvpp2_driver
= {
5552 .probe
= mvpp2_probe
,
5553 .remove
= mvpp2_remove
,
5555 .priv_auto_alloc_size
= sizeof(struct mvpp2_port
),
5556 .platdata_auto_alloc_size
= sizeof(struct eth_pdata
),
5557 .flags
= DM_FLAG_ACTIVE_DMA
,
5561 * Use a MISC device to bind the n instances (child nodes) of the
5562 * network base controller in UCLASS_ETH.
5564 static int mvpp2_base_bind(struct udevice
*parent
)
5566 const void *blob
= gd
->fdt_blob
;
5567 int node
= dev_of_offset(parent
);
5568 struct uclass_driver
*drv
;
5569 struct udevice
*dev
;
5570 struct eth_pdata
*plat
;
5576 /* Lookup eth driver */
5577 drv
= lists_uclass_lookup(UCLASS_ETH
);
5579 puts("Cannot find eth driver\n");
5583 base_id_add
= base_id
;
5585 fdt_for_each_subnode(subnode
, blob
, node
) {
5586 /* Increment base_id for all subnodes, also the disabled ones */
5589 /* Skip disabled ports */
5590 if (!fdtdec_get_is_enabled(blob
, subnode
))
5593 plat
= calloc(1, sizeof(*plat
));
5597 id
= fdtdec_get_int(blob
, subnode
, "port-id", -1);
5600 name
= calloc(1, 16);
5601 sprintf(name
, "mvpp2-%d", id
);
5603 /* Create child device UCLASS_ETH and bind it */
5604 device_bind(parent
, &mvpp2_driver
, name
, plat
, subnode
, &dev
);
5605 dev_set_of_offset(dev
, subnode
);
5611 static const struct udevice_id mvpp2_ids
[] = {
5613 .compatible
= "marvell,armada-375-pp2",
5617 .compatible
= "marvell,armada-7k-pp22",
5623 U_BOOT_DRIVER(mvpp2_base
) = {
5624 .name
= "mvpp2_base",
5626 .of_match
= mvpp2_ids
,
5627 .bind
= mvpp2_base_bind
,
5628 .priv_auto_alloc_size
= sizeof(struct mvpp2
),