]>
Commit | Line | Data |
---|---|---|
19fc2eae SR |
1 | /* |
2 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. | |
3 | * | |
4 | * U-Boot version: | |
e3b9c98a | 5 | * Copyright (C) 2014-2015 Stefan Roese <sr@denx.de> |
19fc2eae SR |
6 | * |
7 | * Based on the Linux version which is: | |
8 | * Copyright (C) 2012 Marvell | |
9 | * | |
10 | * Rami Rosen <rosenr@marvell.com> | |
11 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | |
12 | * | |
13 | * SPDX-License-Identifier: GPL-2.0 | |
14 | */ | |
15 | ||
16 | #include <common.h> | |
e3b9c98a | 17 | #include <dm.h> |
19fc2eae SR |
18 | #include <net.h> |
19 | #include <netdev.h> | |
20 | #include <config.h> | |
21 | #include <malloc.h> | |
22 | #include <asm/io.h> | |
1221ce45 | 23 | #include <linux/errno.h> |
19fc2eae SR |
24 | #include <phy.h> |
25 | #include <miiphy.h> | |
26 | #include <watchdog.h> | |
27 | #include <asm/arch/cpu.h> | |
28 | #include <asm/arch/soc.h> | |
29 | #include <linux/compat.h> | |
30 | #include <linux/mbus.h> | |
31 | ||
e3b9c98a SR |
32 | DECLARE_GLOBAL_DATA_PTR; |
33 | ||
19fc2eae SR |
34 | #if !defined(CONFIG_PHYLIB) |
35 | # error Marvell mvneta requires PHYLIB | |
36 | #endif | |
37 | ||
38 | /* Some linux -> U-Boot compatibility stuff */ | |
39 | #define netdev_err(dev, fmt, args...) \ | |
40 | printf(fmt, ##args) | |
41 | #define netdev_warn(dev, fmt, args...) \ | |
42 | printf(fmt, ##args) | |
43 | #define netdev_info(dev, fmt, args...) \ | |
44 | printf(fmt, ##args) | |
45 | ||
46 | #define CONFIG_NR_CPUS 1 | |
19fc2eae SR |
47 | #define ETH_HLEN 14 /* Total octets in header */ |
48 | ||
49 | /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ | |
50 | #define WRAP (2 + ETH_HLEN + 4 + 32) | |
51 | #define MTU 1500 | |
52 | #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) | |
53 | ||
54 | #define MVNETA_SMI_TIMEOUT 10000 | |
55 | ||
56 | /* Registers */ | |
57 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) | |
58 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) | |
59 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) | |
60 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) | |
61 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) | |
62 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) | |
63 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) | |
64 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) | |
65 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 | |
66 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) | |
67 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) | |
68 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff | |
69 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) | |
70 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 | |
71 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 | |
72 | #define MVNETA_PORT_RX_RESET 0x1cc0 | |
73 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) | |
74 | #define MVNETA_PHY_ADDR 0x2000 | |
75 | #define MVNETA_PHY_ADDR_MASK 0x1f | |
76 | #define MVNETA_SMI 0x2004 | |
77 | #define MVNETA_PHY_REG_MASK 0x1f | |
78 | /* SMI register fields */ | |
79 | #define MVNETA_SMI_DATA_OFFS 0 /* Data */ | |
80 | #define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS) | |
81 | #define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ | |
82 | #define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ | |
83 | #define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ | |
84 | #define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS) | |
85 | #define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */ | |
86 | #define MVNETA_SMI_BUSY (1 << 28) /* Busy */ | |
87 | #define MVNETA_MBUS_RETRY 0x2010 | |
88 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 | |
89 | #define MVNETA_UNIT_CONTROL 0x20B0 | |
90 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) | |
91 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) | |
92 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) | |
93 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) | |
544eefe0 | 94 | #define MVNETA_WIN_SIZE_MASK (0xffff0000) |
19fc2eae | 95 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
544eefe0 SR |
96 | #define MVNETA_BASE_ADDR_ENABLE_BIT 0x1 |
97 | #define MVNETA_PORT_ACCESS_PROTECT 0x2294 | |
98 | #define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3 | |
19fc2eae SR |
99 | #define MVNETA_PORT_CONFIG 0x2400 |
100 | #define MVNETA_UNI_PROMISC_MODE BIT(0) | |
101 | #define MVNETA_DEF_RXQ(q) ((q) << 1) | |
102 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) | |
103 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) | |
104 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) | |
105 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) | |
106 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) | |
107 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) | |
108 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ | |
109 | MVNETA_DEF_RXQ_ARP(q) | \ | |
110 | MVNETA_DEF_RXQ_TCP(q) | \ | |
111 | MVNETA_DEF_RXQ_UDP(q) | \ | |
112 | MVNETA_DEF_RXQ_BPDU(q) | \ | |
113 | MVNETA_TX_UNSET_ERR_SUM | \ | |
114 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) | |
115 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 | |
116 | #define MVNETA_MAC_ADDR_LOW 0x2414 | |
117 | #define MVNETA_MAC_ADDR_HIGH 0x2418 | |
118 | #define MVNETA_SDMA_CONFIG 0x241c | |
119 | #define MVNETA_SDMA_BRST_SIZE_16 4 | |
120 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) | |
121 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) | |
122 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) | |
123 | #define MVNETA_DESC_SWAP BIT(6) | |
124 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) | |
125 | #define MVNETA_PORT_STATUS 0x2444 | |
126 | #define MVNETA_TX_IN_PRGRS BIT(1) | |
127 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | |
128 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | |
129 | #define MVNETA_SERDES_CFG 0x24A0 | |
130 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 | |
131 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 | |
132 | #define MVNETA_TYPE_PRIO 0x24bc | |
133 | #define MVNETA_FORCE_UNI BIT(21) | |
134 | #define MVNETA_TXQ_CMD_1 0x24e4 | |
135 | #define MVNETA_TXQ_CMD 0x2448 | |
136 | #define MVNETA_TXQ_DISABLE_SHIFT 8 | |
137 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff | |
138 | #define MVNETA_ACC_MODE 0x2500 | |
139 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) | |
140 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff | |
141 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 | |
142 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) | |
143 | ||
144 | /* Exception Interrupt Port/Queue Cause register */ | |
145 | ||
146 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 | |
147 | #define MVNETA_INTR_NEW_MASK 0x25a4 | |
148 | ||
149 | /* bits 0..7 = TXQ SENT, one bit per queue. | |
150 | * bits 8..15 = RXQ OCCUP, one bit per queue. | |
151 | * bits 16..23 = RXQ FREE, one bit per queue. | |
152 | * bit 29 = OLD_REG_SUM, see old reg ? | |
153 | * bit 30 = TX_ERR_SUM, one bit for 4 ports | |
154 | * bit 31 = MISC_SUM, one bit for 4 ports | |
155 | */ | |
156 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) | |
157 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) | |
158 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) | |
159 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) | |
160 | ||
161 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 | |
162 | #define MVNETA_INTR_OLD_MASK 0x25ac | |
163 | ||
164 | /* Data Path Port/Queue Cause Register */ | |
165 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 | |
166 | #define MVNETA_INTR_MISC_MASK 0x25b4 | |
167 | #define MVNETA_INTR_ENABLE 0x25b8 | |
168 | ||
169 | #define MVNETA_RXQ_CMD 0x2680 | |
170 | #define MVNETA_RXQ_DISABLE_SHIFT 8 | |
171 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff | |
172 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) | |
173 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) | |
174 | #define MVNETA_GMAC_CTRL_0 0x2c00 | |
175 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 | |
176 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc | |
177 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) | |
178 | #define MVNETA_GMAC_CTRL_2 0x2c08 | |
179 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) | |
180 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) | |
181 | #define MVNETA_GMAC2_PORT_RESET BIT(6) | |
182 | #define MVNETA_GMAC_STATUS 0x2c10 | |
183 | #define MVNETA_GMAC_LINK_UP BIT(0) | |
184 | #define MVNETA_GMAC_SPEED_1000 BIT(1) | |
185 | #define MVNETA_GMAC_SPEED_100 BIT(2) | |
186 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) | |
187 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) | |
188 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) | |
189 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) | |
190 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) | |
191 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c | |
192 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) | |
193 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) | |
194 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) | |
195 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) | |
196 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) | |
197 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) | |
198 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) | |
199 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 | |
200 | #define MVNETA_MIB_LATE_COLLISION 0x7c | |
201 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 | |
202 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 | |
203 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 | |
204 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) | |
205 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) | |
206 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 | |
207 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) | |
208 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) | |
209 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 | |
210 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) | |
211 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 | |
212 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 | |
213 | #define MVNETA_PORT_TX_RESET 0x3cf0 | |
214 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) | |
215 | #define MVNETA_TX_MTU 0x3e0c | |
216 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 | |
217 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff | |
218 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) | |
219 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff | |
220 | ||
221 | /* Descriptor ring Macros */ | |
222 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ | |
223 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) | |
224 | ||
225 | /* Various constants */ | |
226 | ||
227 | /* Coalescing */ | |
228 | #define MVNETA_TXDONE_COAL_PKTS 16 | |
229 | #define MVNETA_RX_COAL_PKTS 32 | |
230 | #define MVNETA_RX_COAL_USEC 100 | |
231 | ||
232 | /* The two bytes Marvell header. Either contains a special value used | |
233 | * by Marvell switches when a specific hardware mode is enabled (not | |
234 | * supported by this driver) or is filled automatically by zeroes on | |
235 | * the RX side. Those two bytes being at the front of the Ethernet | |
236 | * header, they allow to have the IP header aligned on a 4 bytes | |
237 | * boundary automatically: the hardware skips those two bytes on its | |
238 | * own. | |
239 | */ | |
240 | #define MVNETA_MH_SIZE 2 | |
241 | ||
242 | #define MVNETA_VLAN_TAG_LEN 4 | |
243 | ||
244 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 | |
245 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 | |
246 | #define MVNETA_ACC_MODE_EXT 1 | |
247 | ||
248 | /* Timeout constants */ | |
249 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 | |
250 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 | |
251 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 | |
252 | ||
253 | #define MVNETA_TX_MTU_MAX 0x3ffff | |
254 | ||
255 | /* Max number of Rx descriptors */ | |
256 | #define MVNETA_MAX_RXD 16 | |
257 | ||
258 | /* Max number of Tx descriptors */ | |
259 | #define MVNETA_MAX_TXD 16 | |
260 | ||
261 | /* descriptor aligned size */ | |
262 | #define MVNETA_DESC_ALIGNED_SIZE 32 | |
263 | ||
264 | struct mvneta_port { | |
265 | void __iomem *base; | |
266 | struct mvneta_rx_queue *rxqs; | |
267 | struct mvneta_tx_queue *txqs; | |
268 | ||
269 | u8 mcast_count[256]; | |
270 | u16 tx_ring_size; | |
271 | u16 rx_ring_size; | |
272 | ||
273 | phy_interface_t phy_interface; | |
274 | unsigned int link; | |
275 | unsigned int duplex; | |
276 | unsigned int speed; | |
277 | ||
278 | int init; | |
279 | int phyaddr; | |
280 | struct phy_device *phydev; | |
281 | struct mii_dev *bus; | |
282 | }; | |
283 | ||
284 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the | |
285 | * layout of the transmit and reception DMA descriptors, and their | |
286 | * layout is therefore defined by the hardware design | |
287 | */ | |
288 | ||
289 | #define MVNETA_TX_L3_OFF_SHIFT 0 | |
290 | #define MVNETA_TX_IP_HLEN_SHIFT 8 | |
291 | #define MVNETA_TX_L4_UDP BIT(16) | |
292 | #define MVNETA_TX_L3_IP6 BIT(17) | |
293 | #define MVNETA_TXD_IP_CSUM BIT(18) | |
294 | #define MVNETA_TXD_Z_PAD BIT(19) | |
295 | #define MVNETA_TXD_L_DESC BIT(20) | |
296 | #define MVNETA_TXD_F_DESC BIT(21) | |
297 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ | |
298 | MVNETA_TXD_L_DESC | \ | |
299 | MVNETA_TXD_F_DESC) | |
300 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) | |
301 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) | |
302 | ||
303 | #define MVNETA_RXD_ERR_CRC 0x0 | |
304 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) | |
305 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) | |
306 | #define MVNETA_RXD_ERR_LEN BIT(18) | |
307 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) | |
308 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) | |
309 | #define MVNETA_RXD_L3_IP4 BIT(25) | |
310 | #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) | |
311 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) | |
312 | ||
313 | struct mvneta_tx_desc { | |
314 | u32 command; /* Options used by HW for packet transmitting.*/ | |
315 | u16 reserverd1; /* csum_l4 (for future use) */ | |
316 | u16 data_size; /* Data size of transmitted packet in bytes */ | |
317 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ | |
318 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ | |
319 | u32 reserved3[4]; /* Reserved - (for future use) */ | |
320 | }; | |
321 | ||
322 | struct mvneta_rx_desc { | |
323 | u32 status; /* Info about received packet */ | |
324 | u16 reserved1; /* pnc_info - (for future use, PnC) */ | |
325 | u16 data_size; /* Size of received packet in bytes */ | |
326 | ||
327 | u32 buf_phys_addr; /* Physical address of the buffer */ | |
328 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ | |
329 | ||
330 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ | |
331 | u16 reserved3; /* prefetch_cmd, for future use */ | |
332 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ | |
333 | ||
334 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ | |
335 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ | |
336 | }; | |
337 | ||
338 | struct mvneta_tx_queue { | |
339 | /* Number of this TX queue, in the range 0-7 */ | |
340 | u8 id; | |
341 | ||
342 | /* Number of TX DMA descriptors in the descriptor ring */ | |
343 | int size; | |
344 | ||
345 | /* Index of last TX DMA descriptor that was inserted */ | |
346 | int txq_put_index; | |
347 | ||
348 | /* Index of the TX DMA descriptor to be cleaned up */ | |
349 | int txq_get_index; | |
350 | ||
351 | /* Virtual address of the TX DMA descriptors array */ | |
352 | struct mvneta_tx_desc *descs; | |
353 | ||
354 | /* DMA address of the TX DMA descriptors array */ | |
355 | dma_addr_t descs_phys; | |
356 | ||
357 | /* Index of the last TX DMA descriptor */ | |
358 | int last_desc; | |
359 | ||
360 | /* Index of the next TX DMA descriptor to process */ | |
361 | int next_desc_to_proc; | |
362 | }; | |
363 | ||
364 | struct mvneta_rx_queue { | |
365 | /* rx queue number, in the range 0-7 */ | |
366 | u8 id; | |
367 | ||
368 | /* num of rx descriptors in the rx descriptor ring */ | |
369 | int size; | |
370 | ||
371 | /* Virtual address of the RX DMA descriptors array */ | |
372 | struct mvneta_rx_desc *descs; | |
373 | ||
374 | /* DMA address of the RX DMA descriptors array */ | |
375 | dma_addr_t descs_phys; | |
376 | ||
377 | /* Index of the last RX DMA descriptor */ | |
378 | int last_desc; | |
379 | ||
380 | /* Index of the next RX DMA descriptor to process */ | |
381 | int next_desc_to_proc; | |
382 | }; | |
383 | ||
384 | /* U-Boot doesn't use the queues, so set the number to 1 */ | |
385 | static int rxq_number = 1; | |
386 | static int txq_number = 1; | |
387 | static int rxq_def; | |
388 | ||
389 | struct buffer_location { | |
390 | struct mvneta_tx_desc *tx_descs; | |
391 | struct mvneta_rx_desc *rx_descs; | |
392 | u32 rx_buffers; | |
393 | }; | |
394 | ||
395 | /* | |
396 | * All 4 interfaces use the same global buffer, since only one interface | |
397 | * can be enabled at once | |
398 | */ | |
399 | static struct buffer_location buffer_loc; | |
400 | ||
401 | /* | |
402 | * Page table entries are set to 1MB, or multiples of 1MB | |
403 | * (not < 1MB). driver uses less bd's so use 1MB bdspace. | |
404 | */ | |
405 | #define BD_SPACE (1 << 20) | |
406 | ||
407 | /* Utility/helper methods */ | |
408 | ||
409 | /* Write helper method */ | |
410 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) | |
411 | { | |
412 | writel(data, pp->base + offset); | |
413 | } | |
414 | ||
415 | /* Read helper method */ | |
416 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) | |
417 | { | |
418 | return readl(pp->base + offset); | |
419 | } | |
420 | ||
421 | /* Clear all MIB counters */ | |
422 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) | |
423 | { | |
424 | int i; | |
425 | ||
426 | /* Perform dummy reads from MIB counters */ | |
427 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) | |
428 | mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); | |
429 | } | |
430 | ||
431 | /* Rx descriptors helper methods */ | |
432 | ||
433 | /* Checks whether the RX descriptor having this status is both the first | |
434 | * and the last descriptor for the RX packet. Each RX packet is currently | |
435 | * received through a single RX descriptor, so not having each RX | |
436 | * descriptor with its first and last bits set is an error | |
437 | */ | |
438 | static int mvneta_rxq_desc_is_first_last(u32 status) | |
439 | { | |
440 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == | |
441 | MVNETA_RXD_FIRST_LAST_DESC; | |
442 | } | |
443 | ||
444 | /* Add number of descriptors ready to receive new packets */ | |
445 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, | |
446 | struct mvneta_rx_queue *rxq, | |
447 | int ndescs) | |
448 | { | |
449 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can | |
450 | * be added at once | |
451 | */ | |
452 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { | |
453 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
454 | (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << | |
455 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
456 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; | |
457 | } | |
458 | ||
459 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
460 | (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
461 | } | |
462 | ||
463 | /* Get number of RX descriptors occupied by received packets */ | |
464 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, | |
465 | struct mvneta_rx_queue *rxq) | |
466 | { | |
467 | u32 val; | |
468 | ||
469 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); | |
470 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; | |
471 | } | |
472 | ||
473 | /* Update num of rx desc called upon return from rx path or | |
474 | * from mvneta_rxq_drop_pkts(). | |
475 | */ | |
476 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, | |
477 | struct mvneta_rx_queue *rxq, | |
478 | int rx_done, int rx_filled) | |
479 | { | |
480 | u32 val; | |
481 | ||
482 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { | |
483 | val = rx_done | | |
484 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); | |
485 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
486 | return; | |
487 | } | |
488 | ||
489 | /* Only 255 descriptors can be added at once */ | |
490 | while ((rx_done > 0) || (rx_filled > 0)) { | |
491 | if (rx_done <= 0xff) { | |
492 | val = rx_done; | |
493 | rx_done = 0; | |
494 | } else { | |
495 | val = 0xff; | |
496 | rx_done -= 0xff; | |
497 | } | |
498 | if (rx_filled <= 0xff) { | |
499 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
500 | rx_filled = 0; | |
501 | } else { | |
502 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
503 | rx_filled -= 0xff; | |
504 | } | |
505 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
506 | } | |
507 | } | |
508 | ||
509 | /* Get pointer to next RX descriptor to be processed by SW */ | |
510 | static struct mvneta_rx_desc * | |
511 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) | |
512 | { | |
513 | int rx_desc = rxq->next_desc_to_proc; | |
514 | ||
515 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); | |
516 | return rxq->descs + rx_desc; | |
517 | } | |
518 | ||
519 | /* Tx descriptors helper methods */ | |
520 | ||
521 | /* Update HW with number of TX descriptors to be sent */ | |
522 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, | |
523 | struct mvneta_tx_queue *txq, | |
524 | int pend_desc) | |
525 | { | |
526 | u32 val; | |
527 | ||
528 | /* Only 255 descriptors can be added at once ; Assume caller | |
529 | * process TX desriptors in quanta less than 256 | |
530 | */ | |
531 | val = pend_desc; | |
532 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
533 | } | |
534 | ||
535 | /* Get pointer to next TX descriptor to be processed (send) by HW */ | |
536 | static struct mvneta_tx_desc * | |
537 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) | |
538 | { | |
539 | int tx_desc = txq->next_desc_to_proc; | |
540 | ||
541 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); | |
542 | return txq->descs + tx_desc; | |
543 | } | |
544 | ||
545 | /* Set rxq buf size */ | |
546 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, | |
547 | struct mvneta_rx_queue *rxq, | |
548 | int buf_size) | |
549 | { | |
550 | u32 val; | |
551 | ||
552 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); | |
553 | ||
554 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; | |
555 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); | |
556 | ||
557 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); | |
558 | } | |
559 | ||
560 | /* Start the Ethernet port RX and TX activity */ | |
561 | static void mvneta_port_up(struct mvneta_port *pp) | |
562 | { | |
563 | int queue; | |
564 | u32 q_map; | |
565 | ||
566 | /* Enable all initialized TXs. */ | |
567 | mvneta_mib_counters_clear(pp); | |
568 | q_map = 0; | |
569 | for (queue = 0; queue < txq_number; queue++) { | |
570 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
571 | if (txq->descs != NULL) | |
572 | q_map |= (1 << queue); | |
573 | } | |
574 | mvreg_write(pp, MVNETA_TXQ_CMD, q_map); | |
575 | ||
576 | /* Enable all initialized RXQs. */ | |
577 | q_map = 0; | |
578 | for (queue = 0; queue < rxq_number; queue++) { | |
579 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
580 | if (rxq->descs != NULL) | |
581 | q_map |= (1 << queue); | |
582 | } | |
583 | mvreg_write(pp, MVNETA_RXQ_CMD, q_map); | |
584 | } | |
585 | ||
586 | /* Stop the Ethernet port activity */ | |
587 | static void mvneta_port_down(struct mvneta_port *pp) | |
588 | { | |
589 | u32 val; | |
590 | int count; | |
591 | ||
592 | /* Stop Rx port activity. Check port Rx activity. */ | |
593 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; | |
594 | ||
595 | /* Issue stop command for active channels only */ | |
596 | if (val != 0) | |
597 | mvreg_write(pp, MVNETA_RXQ_CMD, | |
598 | val << MVNETA_RXQ_DISABLE_SHIFT); | |
599 | ||
600 | /* Wait for all Rx activity to terminate. */ | |
601 | count = 0; | |
602 | do { | |
603 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { | |
604 | netdev_warn(pp->dev, | |
605 | "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", | |
606 | val); | |
607 | break; | |
608 | } | |
609 | mdelay(1); | |
610 | ||
611 | val = mvreg_read(pp, MVNETA_RXQ_CMD); | |
612 | } while (val & 0xff); | |
613 | ||
614 | /* Stop Tx port activity. Check port Tx activity. Issue stop | |
615 | * command for active channels only | |
616 | */ | |
617 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; | |
618 | ||
619 | if (val != 0) | |
620 | mvreg_write(pp, MVNETA_TXQ_CMD, | |
621 | (val << MVNETA_TXQ_DISABLE_SHIFT)); | |
622 | ||
623 | /* Wait for all Tx activity to terminate. */ | |
624 | count = 0; | |
625 | do { | |
626 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { | |
627 | netdev_warn(pp->dev, | |
628 | "TIMEOUT for TX stopped status=0x%08x\n", | |
629 | val); | |
630 | break; | |
631 | } | |
632 | mdelay(1); | |
633 | ||
634 | /* Check TX Command reg that all Txqs are stopped */ | |
635 | val = mvreg_read(pp, MVNETA_TXQ_CMD); | |
636 | ||
637 | } while (val & 0xff); | |
638 | ||
639 | /* Double check to verify that TX FIFO is empty */ | |
640 | count = 0; | |
641 | do { | |
642 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { | |
643 | netdev_warn(pp->dev, | |
644 | "TX FIFO empty timeout status=0x08%x\n", | |
645 | val); | |
646 | break; | |
647 | } | |
648 | mdelay(1); | |
649 | ||
650 | val = mvreg_read(pp, MVNETA_PORT_STATUS); | |
651 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && | |
652 | (val & MVNETA_TX_IN_PRGRS)); | |
653 | ||
654 | udelay(200); | |
655 | } | |
656 | ||
657 | /* Enable the port by setting the port enable bit of the MAC control register */ | |
658 | static void mvneta_port_enable(struct mvneta_port *pp) | |
659 | { | |
660 | u32 val; | |
661 | ||
662 | /* Enable port */ | |
663 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
664 | val |= MVNETA_GMAC0_PORT_ENABLE; | |
665 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
666 | } | |
667 | ||
668 | /* Disable the port and wait for about 200 usec before retuning */ | |
669 | static void mvneta_port_disable(struct mvneta_port *pp) | |
670 | { | |
671 | u32 val; | |
672 | ||
673 | /* Reset the Enable bit in the Serial Control Register */ | |
674 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
675 | val &= ~MVNETA_GMAC0_PORT_ENABLE; | |
676 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
677 | ||
678 | udelay(200); | |
679 | } | |
680 | ||
681 | /* Multicast tables methods */ | |
682 | ||
683 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ | |
684 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) | |
685 | { | |
686 | int offset; | |
687 | u32 val; | |
688 | ||
689 | if (queue == -1) { | |
690 | val = 0; | |
691 | } else { | |
692 | val = 0x1 | (queue << 1); | |
693 | val |= (val << 24) | (val << 16) | (val << 8); | |
694 | } | |
695 | ||
696 | for (offset = 0; offset <= 0xc; offset += 4) | |
697 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); | |
698 | } | |
699 | ||
700 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ | |
701 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) | |
702 | { | |
703 | int offset; | |
704 | u32 val; | |
705 | ||
706 | if (queue == -1) { | |
707 | val = 0; | |
708 | } else { | |
709 | val = 0x1 | (queue << 1); | |
710 | val |= (val << 24) | (val << 16) | (val << 8); | |
711 | } | |
712 | ||
713 | for (offset = 0; offset <= 0xfc; offset += 4) | |
714 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); | |
715 | } | |
716 | ||
717 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ | |
718 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) | |
719 | { | |
720 | int offset; | |
721 | u32 val; | |
722 | ||
723 | if (queue == -1) { | |
724 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); | |
725 | val = 0; | |
726 | } else { | |
727 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); | |
728 | val = 0x1 | (queue << 1); | |
729 | val |= (val << 24) | (val << 16) | (val << 8); | |
730 | } | |
731 | ||
732 | for (offset = 0; offset <= 0xfc; offset += 4) | |
733 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); | |
734 | } | |
735 | ||
736 | /* This method sets defaults to the NETA port: | |
737 | * Clears interrupt Cause and Mask registers. | |
738 | * Clears all MAC tables. | |
739 | * Sets defaults to all registers. | |
740 | * Resets RX and TX descriptor rings. | |
741 | * Resets PHY. | |
742 | * This method can be called after mvneta_port_down() to return the port | |
743 | * settings to defaults. | |
744 | */ | |
745 | static void mvneta_defaults_set(struct mvneta_port *pp) | |
746 | { | |
747 | int cpu; | |
748 | int queue; | |
749 | u32 val; | |
750 | ||
751 | /* Clear all Cause registers */ | |
752 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); | |
753 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); | |
754 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); | |
755 | ||
756 | /* Mask all interrupts */ | |
757 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); | |
758 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); | |
759 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); | |
760 | mvreg_write(pp, MVNETA_INTR_ENABLE, 0); | |
761 | ||
762 | /* Enable MBUS Retry bit16 */ | |
763 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); | |
764 | ||
765 | /* Set CPU queue access map - all CPUs have access to all RX | |
766 | * queues and to all TX queues | |
767 | */ | |
768 | for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) | |
769 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), | |
770 | (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | | |
771 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); | |
772 | ||
773 | /* Reset RX and TX DMAs */ | |
774 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); | |
775 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); | |
776 | ||
777 | /* Disable Legacy WRR, Disable EJP, Release from reset */ | |
778 | mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); | |
779 | for (queue = 0; queue < txq_number; queue++) { | |
780 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); | |
781 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); | |
782 | } | |
783 | ||
784 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); | |
785 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); | |
786 | ||
787 | /* Set Port Acceleration Mode */ | |
788 | val = MVNETA_ACC_MODE_EXT; | |
789 | mvreg_write(pp, MVNETA_ACC_MODE, val); | |
790 | ||
791 | /* Update val of portCfg register accordingly with all RxQueue types */ | |
792 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); | |
793 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); | |
794 | ||
795 | val = 0; | |
796 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); | |
797 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); | |
798 | ||
799 | /* Build PORT_SDMA_CONFIG_REG */ | |
800 | val = 0; | |
801 | ||
802 | /* Default burst size */ | |
803 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
804 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
805 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; | |
806 | ||
807 | /* Assign port SDMA configuration */ | |
808 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); | |
809 | ||
810 | /* Enable PHY polling in hardware for U-Boot */ | |
811 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); | |
812 | val |= MVNETA_PHY_POLLING_ENABLE; | |
813 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); | |
814 | ||
815 | mvneta_set_ucast_table(pp, -1); | |
816 | mvneta_set_special_mcast_table(pp, -1); | |
817 | mvneta_set_other_mcast_table(pp, -1); | |
818 | } | |
819 | ||
820 | /* Set unicast address */ | |
821 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, | |
822 | int queue) | |
823 | { | |
824 | unsigned int unicast_reg; | |
825 | unsigned int tbl_offset; | |
826 | unsigned int reg_offset; | |
827 | ||
828 | /* Locate the Unicast table entry */ | |
829 | last_nibble = (0xf & last_nibble); | |
830 | ||
831 | /* offset from unicast tbl base */ | |
832 | tbl_offset = (last_nibble / 4) * 4; | |
833 | ||
834 | /* offset within the above reg */ | |
835 | reg_offset = last_nibble % 4; | |
836 | ||
837 | unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); | |
838 | ||
839 | if (queue == -1) { | |
840 | /* Clear accepts frame bit at specified unicast DA tbl entry */ | |
841 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
842 | } else { | |
843 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
844 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); | |
845 | } | |
846 | ||
847 | mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); | |
848 | } | |
849 | ||
850 | /* Set mac address */ | |
851 | static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, | |
852 | int queue) | |
853 | { | |
854 | unsigned int mac_h; | |
855 | unsigned int mac_l; | |
856 | ||
857 | if (queue != -1) { | |
858 | mac_l = (addr[4] << 8) | (addr[5]); | |
859 | mac_h = (addr[0] << 24) | (addr[1] << 16) | | |
860 | (addr[2] << 8) | (addr[3] << 0); | |
861 | ||
862 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); | |
863 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); | |
864 | } | |
865 | ||
866 | /* Accept frames of this address */ | |
867 | mvneta_set_ucast_addr(pp, addr[5], queue); | |
868 | } | |
869 | ||
870 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ | |
871 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, | |
872 | u32 phys_addr, u32 cookie) | |
873 | { | |
874 | rx_desc->buf_cookie = cookie; | |
875 | rx_desc->buf_phys_addr = phys_addr; | |
876 | } | |
877 | ||
878 | /* Decrement sent descriptors counter */ | |
879 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, | |
880 | struct mvneta_tx_queue *txq, | |
881 | int sent_desc) | |
882 | { | |
883 | u32 val; | |
884 | ||
885 | /* Only 255 TX descriptors can be updated at once */ | |
886 | while (sent_desc > 0xff) { | |
887 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; | |
888 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
889 | sent_desc = sent_desc - 0xff; | |
890 | } | |
891 | ||
892 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; | |
893 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
894 | } | |
895 | ||
896 | /* Get number of TX descriptors already sent by HW */ | |
897 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, | |
898 | struct mvneta_tx_queue *txq) | |
899 | { | |
900 | u32 val; | |
901 | int sent_desc; | |
902 | ||
903 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); | |
904 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> | |
905 | MVNETA_TXQ_SENT_DESC_SHIFT; | |
906 | ||
907 | return sent_desc; | |
908 | } | |
909 | ||
910 | /* Display more error info */ | |
911 | static void mvneta_rx_error(struct mvneta_port *pp, | |
912 | struct mvneta_rx_desc *rx_desc) | |
913 | { | |
914 | u32 status = rx_desc->status; | |
915 | ||
916 | if (!mvneta_rxq_desc_is_first_last(status)) { | |
917 | netdev_err(pp->dev, | |
918 | "bad rx status %08x (buffer oversize), size=%d\n", | |
919 | status, rx_desc->data_size); | |
920 | return; | |
921 | } | |
922 | ||
923 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { | |
924 | case MVNETA_RXD_ERR_CRC: | |
925 | netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", | |
926 | status, rx_desc->data_size); | |
927 | break; | |
928 | case MVNETA_RXD_ERR_OVERRUN: | |
929 | netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", | |
930 | status, rx_desc->data_size); | |
931 | break; | |
932 | case MVNETA_RXD_ERR_LEN: | |
933 | netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", | |
934 | status, rx_desc->data_size); | |
935 | break; | |
936 | case MVNETA_RXD_ERR_RESOURCE: | |
937 | netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", | |
938 | status, rx_desc->data_size); | |
939 | break; | |
940 | } | |
941 | } | |
942 | ||
943 | static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp, | |
944 | int rxq) | |
945 | { | |
946 | return &pp->rxqs[rxq]; | |
947 | } | |
948 | ||
949 | ||
950 | /* Drop packets received by the RXQ and free buffers */ | |
951 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |
952 | struct mvneta_rx_queue *rxq) | |
953 | { | |
954 | int rx_done; | |
955 | ||
956 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
957 | if (rx_done) | |
958 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); | |
959 | } | |
960 | ||
961 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ | |
962 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, | |
963 | int num) | |
964 | { | |
965 | int i; | |
966 | ||
967 | for (i = 0; i < num; i++) { | |
968 | u32 addr; | |
969 | ||
970 | /* U-Boot special: Fill in the rx buffer addresses */ | |
971 | addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE); | |
972 | mvneta_rx_desc_fill(rxq->descs + i, addr, addr); | |
973 | } | |
974 | ||
975 | /* Add this number of RX descriptors as non occupied (ready to | |
976 | * get packets) | |
977 | */ | |
978 | mvneta_rxq_non_occup_desc_add(pp, rxq, i); | |
979 | ||
980 | return 0; | |
981 | } | |
982 | ||
983 | /* Rx/Tx queue initialization/cleanup methods */ | |
984 | ||
985 | /* Create a specified RX queue */ | |
986 | static int mvneta_rxq_init(struct mvneta_port *pp, | |
987 | struct mvneta_rx_queue *rxq) | |
988 | ||
989 | { | |
990 | rxq->size = pp->rx_ring_size; | |
991 | ||
992 | /* Allocate memory for RX descriptors */ | |
993 | rxq->descs_phys = (dma_addr_t)rxq->descs; | |
994 | if (rxq->descs == NULL) | |
995 | return -ENOMEM; | |
996 | ||
997 | rxq->last_desc = rxq->size - 1; | |
998 | ||
999 | /* Set Rx descriptors queue starting address */ | |
1000 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); | |
1001 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); | |
1002 | ||
1003 | /* Fill RXQ with buffers from RX pool */ | |
1004 | mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE); | |
1005 | mvneta_rxq_fill(pp, rxq, rxq->size); | |
1006 | ||
1007 | return 0; | |
1008 | } | |
1009 | ||
1010 | /* Cleanup Rx queue */ | |
1011 | static void mvneta_rxq_deinit(struct mvneta_port *pp, | |
1012 | struct mvneta_rx_queue *rxq) | |
1013 | { | |
1014 | mvneta_rxq_drop_pkts(pp, rxq); | |
1015 | ||
1016 | rxq->descs = NULL; | |
1017 | rxq->last_desc = 0; | |
1018 | rxq->next_desc_to_proc = 0; | |
1019 | rxq->descs_phys = 0; | |
1020 | } | |
1021 | ||
1022 | /* Create and initialize a tx queue */ | |
1023 | static int mvneta_txq_init(struct mvneta_port *pp, | |
1024 | struct mvneta_tx_queue *txq) | |
1025 | { | |
1026 | txq->size = pp->tx_ring_size; | |
1027 | ||
1028 | /* Allocate memory for TX descriptors */ | |
3cbc11da | 1029 | txq->descs_phys = (dma_addr_t)txq->descs; |
19fc2eae SR |
1030 | if (txq->descs == NULL) |
1031 | return -ENOMEM; | |
1032 | ||
1033 | txq->last_desc = txq->size - 1; | |
1034 | ||
1035 | /* Set maximum bandwidth for enabled TXQs */ | |
1036 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); | |
1037 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); | |
1038 | ||
1039 | /* Set Tx descriptors queue starting address */ | |
1040 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); | |
1041 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); | |
1042 | ||
1043 | return 0; | |
1044 | } | |
1045 | ||
1046 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ | |
1047 | static void mvneta_txq_deinit(struct mvneta_port *pp, | |
1048 | struct mvneta_tx_queue *txq) | |
1049 | { | |
1050 | txq->descs = NULL; | |
1051 | txq->last_desc = 0; | |
1052 | txq->next_desc_to_proc = 0; | |
1053 | txq->descs_phys = 0; | |
1054 | ||
1055 | /* Set minimum bandwidth for disabled TXQs */ | |
1056 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); | |
1057 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); | |
1058 | ||
1059 | /* Set Tx descriptors queue starting address and size */ | |
1060 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); | |
1061 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); | |
1062 | } | |
1063 | ||
1064 | /* Cleanup all Tx queues */ | |
1065 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) | |
1066 | { | |
1067 | int queue; | |
1068 | ||
1069 | for (queue = 0; queue < txq_number; queue++) | |
1070 | mvneta_txq_deinit(pp, &pp->txqs[queue]); | |
1071 | } | |
1072 | ||
1073 | /* Cleanup all Rx queues */ | |
1074 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) | |
1075 | { | |
1076 | int queue; | |
1077 | ||
1078 | for (queue = 0; queue < rxq_number; queue++) | |
1079 | mvneta_rxq_deinit(pp, &pp->rxqs[queue]); | |
1080 | } | |
1081 | ||
1082 | ||
1083 | /* Init all Rx queues */ | |
1084 | static int mvneta_setup_rxqs(struct mvneta_port *pp) | |
1085 | { | |
1086 | int queue; | |
1087 | ||
1088 | for (queue = 0; queue < rxq_number; queue++) { | |
1089 | int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); | |
1090 | if (err) { | |
1091 | netdev_err(pp->dev, "%s: can't create rxq=%d\n", | |
1092 | __func__, queue); | |
1093 | mvneta_cleanup_rxqs(pp); | |
1094 | return err; | |
1095 | } | |
1096 | } | |
1097 | ||
1098 | return 0; | |
1099 | } | |
1100 | ||
1101 | /* Init all tx queues */ | |
1102 | static int mvneta_setup_txqs(struct mvneta_port *pp) | |
1103 | { | |
1104 | int queue; | |
1105 | ||
1106 | for (queue = 0; queue < txq_number; queue++) { | |
1107 | int err = mvneta_txq_init(pp, &pp->txqs[queue]); | |
1108 | if (err) { | |
1109 | netdev_err(pp->dev, "%s: can't create txq=%d\n", | |
1110 | __func__, queue); | |
1111 | mvneta_cleanup_txqs(pp); | |
1112 | return err; | |
1113 | } | |
1114 | } | |
1115 | ||
1116 | return 0; | |
1117 | } | |
1118 | ||
1119 | static void mvneta_start_dev(struct mvneta_port *pp) | |
1120 | { | |
1121 | /* start the Rx/Tx activity */ | |
1122 | mvneta_port_enable(pp); | |
1123 | } | |
1124 | ||
e3b9c98a | 1125 | static void mvneta_adjust_link(struct udevice *dev) |
19fc2eae | 1126 | { |
e3b9c98a | 1127 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1128 | struct phy_device *phydev = pp->phydev; |
1129 | int status_change = 0; | |
1130 | ||
1131 | if (phydev->link) { | |
1132 | if ((pp->speed != phydev->speed) || | |
1133 | (pp->duplex != phydev->duplex)) { | |
1134 | u32 val; | |
1135 | ||
1136 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
1137 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | | |
1138 | MVNETA_GMAC_CONFIG_GMII_SPEED | | |
1139 | MVNETA_GMAC_CONFIG_FULL_DUPLEX | | |
1140 | MVNETA_GMAC_AN_SPEED_EN | | |
1141 | MVNETA_GMAC_AN_DUPLEX_EN); | |
1142 | ||
1143 | if (phydev->duplex) | |
1144 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; | |
1145 | ||
1146 | if (phydev->speed == SPEED_1000) | |
1147 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; | |
1148 | else | |
1149 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; | |
1150 | ||
1151 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1152 | ||
1153 | pp->duplex = phydev->duplex; | |
1154 | pp->speed = phydev->speed; | |
1155 | } | |
1156 | } | |
1157 | ||
1158 | if (phydev->link != pp->link) { | |
1159 | if (!phydev->link) { | |
1160 | pp->duplex = -1; | |
1161 | pp->speed = 0; | |
1162 | } | |
1163 | ||
1164 | pp->link = phydev->link; | |
1165 | status_change = 1; | |
1166 | } | |
1167 | ||
1168 | if (status_change) { | |
1169 | if (phydev->link) { | |
1170 | u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
1171 | val |= (MVNETA_GMAC_FORCE_LINK_PASS | | |
1172 | MVNETA_GMAC_FORCE_LINK_DOWN); | |
1173 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1174 | mvneta_port_up(pp); | |
1175 | } else { | |
1176 | mvneta_port_down(pp); | |
1177 | } | |
1178 | } | |
1179 | } | |
1180 | ||
e3b9c98a | 1181 | static int mvneta_open(struct udevice *dev) |
19fc2eae | 1182 | { |
e3b9c98a | 1183 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1184 | int ret; |
1185 | ||
1186 | ret = mvneta_setup_rxqs(pp); | |
1187 | if (ret) | |
1188 | return ret; | |
1189 | ||
1190 | ret = mvneta_setup_txqs(pp); | |
1191 | if (ret) | |
1192 | return ret; | |
1193 | ||
1194 | mvneta_adjust_link(dev); | |
1195 | ||
1196 | mvneta_start_dev(pp); | |
1197 | ||
1198 | return 0; | |
1199 | } | |
1200 | ||
1201 | /* Initialize hw */ | |
e3b9c98a | 1202 | static int mvneta_init2(struct mvneta_port *pp) |
19fc2eae SR |
1203 | { |
1204 | int queue; | |
1205 | ||
1206 | /* Disable port */ | |
1207 | mvneta_port_disable(pp); | |
1208 | ||
1209 | /* Set port default values */ | |
1210 | mvneta_defaults_set(pp); | |
1211 | ||
1212 | pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), | |
1213 | GFP_KERNEL); | |
1214 | if (!pp->txqs) | |
1215 | return -ENOMEM; | |
1216 | ||
1217 | /* U-Boot special: use preallocated area */ | |
1218 | pp->txqs[0].descs = buffer_loc.tx_descs; | |
1219 | ||
1220 | /* Initialize TX descriptor rings */ | |
1221 | for (queue = 0; queue < txq_number; queue++) { | |
1222 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
1223 | txq->id = queue; | |
1224 | txq->size = pp->tx_ring_size; | |
1225 | } | |
1226 | ||
1227 | pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), | |
1228 | GFP_KERNEL); | |
1229 | if (!pp->rxqs) { | |
1230 | kfree(pp->txqs); | |
1231 | return -ENOMEM; | |
1232 | } | |
1233 | ||
1234 | /* U-Boot special: use preallocated area */ | |
1235 | pp->rxqs[0].descs = buffer_loc.rx_descs; | |
1236 | ||
1237 | /* Create Rx descriptor rings */ | |
1238 | for (queue = 0; queue < rxq_number; queue++) { | |
1239 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
1240 | rxq->id = queue; | |
1241 | rxq->size = pp->rx_ring_size; | |
1242 | } | |
1243 | ||
1244 | return 0; | |
1245 | } | |
1246 | ||
1247 | /* platform glue : initialize decoding windows */ | |
544eefe0 SR |
1248 | |
1249 | /* | |
1250 | * Not like A380, in Armada3700, there are two layers of decode windows for GBE: | |
1251 | * First layer is: GbE Address window that resides inside the GBE unit, | |
1252 | * Second layer is: Fabric address window which is located in the NIC400 | |
1253 | * (South Fabric). | |
1254 | * To simplify the address decode configuration for Armada3700, we bypass the | |
1255 | * first layer of GBE decode window by setting the first window to 4GB. | |
1256 | */ | |
1257 | static void mvneta_bypass_mbus_windows(struct mvneta_port *pp) | |
1258 | { | |
1259 | /* | |
1260 | * Set window size to 4GB, to bypass GBE address decode, leave the | |
1261 | * work to MBUS decode window | |
1262 | */ | |
1263 | mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK); | |
1264 | ||
1265 | /* Enable GBE address decode window 0 by set bit 0 to 0 */ | |
1266 | clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE, | |
1267 | MVNETA_BASE_ADDR_ENABLE_BIT); | |
1268 | ||
1269 | /* Set GBE address decode window 0 to full Access (read or write) */ | |
1270 | setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT, | |
1271 | MVNETA_PORT_ACCESS_PROTECT_WIN0_RW); | |
1272 | } | |
1273 | ||
19fc2eae SR |
1274 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp) |
1275 | { | |
1276 | const struct mbus_dram_target_info *dram; | |
1277 | u32 win_enable; | |
1278 | u32 win_protect; | |
1279 | int i; | |
1280 | ||
1281 | dram = mvebu_mbus_dram_info(); | |
1282 | for (i = 0; i < 6; i++) { | |
1283 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); | |
1284 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); | |
1285 | ||
1286 | if (i < 4) | |
1287 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); | |
1288 | } | |
1289 | ||
1290 | win_enable = 0x3f; | |
1291 | win_protect = 0; | |
1292 | ||
1293 | for (i = 0; i < dram->num_cs; i++) { | |
1294 | const struct mbus_dram_window *cs = dram->cs + i; | |
1295 | mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | | |
1296 | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); | |
1297 | ||
1298 | mvreg_write(pp, MVNETA_WIN_SIZE(i), | |
1299 | (cs->size - 1) & 0xffff0000); | |
1300 | ||
1301 | win_enable &= ~(1 << i); | |
1302 | win_protect |= 3 << (2 * i); | |
1303 | } | |
1304 | ||
1305 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); | |
1306 | } | |
1307 | ||
1308 | /* Power up the port */ | |
1309 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |
1310 | { | |
1311 | u32 ctrl; | |
1312 | ||
1313 | /* MAC Cause register should be cleared */ | |
1314 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); | |
1315 | ||
1316 | ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | |
1317 | ||
1318 | /* Even though it might look weird, when we're configured in | |
1319 | * SGMII or QSGMII mode, the RGMII bit needs to be set. | |
1320 | */ | |
1321 | switch (phy_mode) { | |
1322 | case PHY_INTERFACE_MODE_QSGMII: | |
1323 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); | |
1324 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
1325 | break; | |
1326 | case PHY_INTERFACE_MODE_SGMII: | |
1327 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | |
1328 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
1329 | break; | |
1330 | case PHY_INTERFACE_MODE_RGMII: | |
1331 | case PHY_INTERFACE_MODE_RGMII_ID: | |
1332 | ctrl |= MVNETA_GMAC2_PORT_RGMII; | |
1333 | break; | |
1334 | default: | |
1335 | return -EINVAL; | |
1336 | } | |
1337 | ||
1338 | /* Cancel Port Reset */ | |
1339 | ctrl &= ~MVNETA_GMAC2_PORT_RESET; | |
1340 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); | |
1341 | ||
1342 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & | |
1343 | MVNETA_GMAC2_PORT_RESET) != 0) | |
1344 | continue; | |
1345 | ||
1346 | return 0; | |
1347 | } | |
1348 | ||
1349 | /* Device initialization routine */ | |
e3b9c98a | 1350 | static int mvneta_init(struct udevice *dev) |
19fc2eae | 1351 | { |
e3b9c98a SR |
1352 | struct eth_pdata *pdata = dev_get_platdata(dev); |
1353 | struct mvneta_port *pp = dev_get_priv(dev); | |
19fc2eae SR |
1354 | int err; |
1355 | ||
1356 | pp->tx_ring_size = MVNETA_MAX_TXD; | |
1357 | pp->rx_ring_size = MVNETA_MAX_RXD; | |
1358 | ||
e3b9c98a | 1359 | err = mvneta_init2(pp); |
19fc2eae SR |
1360 | if (err < 0) { |
1361 | dev_err(&pdev->dev, "can't init eth hal\n"); | |
1362 | return err; | |
1363 | } | |
1364 | ||
e3b9c98a | 1365 | mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def); |
19fc2eae SR |
1366 | |
1367 | err = mvneta_port_power_up(pp, pp->phy_interface); | |
1368 | if (err < 0) { | |
1369 | dev_err(&pdev->dev, "can't power up port\n"); | |
1370 | return err; | |
1371 | } | |
1372 | ||
1373 | /* Call open() now as it needs to be done before runing send() */ | |
1374 | mvneta_open(dev); | |
1375 | ||
1376 | return 0; | |
1377 | } | |
1378 | ||
1379 | /* U-Boot only functions follow here */ | |
1380 | ||
1381 | /* SMI / MDIO functions */ | |
1382 | ||
1383 | static int smi_wait_ready(struct mvneta_port *pp) | |
1384 | { | |
1385 | u32 timeout = MVNETA_SMI_TIMEOUT; | |
1386 | u32 smi_reg; | |
1387 | ||
1388 | /* wait till the SMI is not busy */ | |
1389 | do { | |
1390 | /* read smi register */ | |
1391 | smi_reg = mvreg_read(pp, MVNETA_SMI); | |
1392 | if (timeout-- == 0) { | |
1393 | printf("Error: SMI busy timeout\n"); | |
1394 | return -EFAULT; | |
1395 | } | |
1396 | } while (smi_reg & MVNETA_SMI_BUSY); | |
1397 | ||
1398 | return 0; | |
1399 | } | |
1400 | ||
1401 | /* | |
e3b9c98a | 1402 | * mvneta_mdio_read - miiphy_read callback function. |
19fc2eae SR |
1403 | * |
1404 | * Returns 16bit phy register value, or 0xffff on error | |
1405 | */ | |
e3b9c98a | 1406 | static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) |
19fc2eae | 1407 | { |
e3b9c98a | 1408 | struct mvneta_port *pp = bus->priv; |
19fc2eae SR |
1409 | u32 smi_reg; |
1410 | u32 timeout; | |
1411 | ||
1412 | /* check parameters */ | |
e3b9c98a SR |
1413 | if (addr > MVNETA_PHY_ADDR_MASK) { |
1414 | printf("Error: Invalid PHY address %d\n", addr); | |
19fc2eae SR |
1415 | return -EFAULT; |
1416 | } | |
1417 | ||
e3b9c98a SR |
1418 | if (reg > MVNETA_PHY_REG_MASK) { |
1419 | printf("Err: Invalid register offset %d\n", reg); | |
19fc2eae SR |
1420 | return -EFAULT; |
1421 | } | |
1422 | ||
1423 | /* wait till the SMI is not busy */ | |
1424 | if (smi_wait_ready(pp) < 0) | |
1425 | return -EFAULT; | |
1426 | ||
1427 | /* fill the phy address and regiser offset and read opcode */ | |
e3b9c98a SR |
1428 | smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS) |
1429 | | (reg << MVNETA_SMI_REG_ADDR_OFFS) | |
19fc2eae SR |
1430 | | MVNETA_SMI_OPCODE_READ; |
1431 | ||
1432 | /* write the smi register */ | |
1433 | mvreg_write(pp, MVNETA_SMI, smi_reg); | |
1434 | ||
e3b9c98a | 1435 | /* wait till read value is ready */ |
19fc2eae SR |
1436 | timeout = MVNETA_SMI_TIMEOUT; |
1437 | ||
1438 | do { | |
1439 | /* read smi register */ | |
1440 | smi_reg = mvreg_read(pp, MVNETA_SMI); | |
1441 | if (timeout-- == 0) { | |
1442 | printf("Err: SMI read ready timeout\n"); | |
1443 | return -EFAULT; | |
1444 | } | |
1445 | } while (!(smi_reg & MVNETA_SMI_READ_VALID)); | |
1446 | ||
1447 | /* Wait for the data to update in the SMI register */ | |
1448 | for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++) | |
1449 | ; | |
1450 | ||
e3b9c98a | 1451 | return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK; |
19fc2eae SR |
1452 | } |
1453 | ||
1454 | /* | |
e3b9c98a | 1455 | * mvneta_mdio_write - miiphy_write callback function. |
19fc2eae SR |
1456 | * |
1457 | * Returns 0 if write succeed, -EINVAL on bad parameters | |
1458 | * -ETIME on timeout | |
1459 | */ | |
e3b9c98a SR |
1460 | static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, |
1461 | u16 value) | |
19fc2eae | 1462 | { |
e3b9c98a | 1463 | struct mvneta_port *pp = bus->priv; |
19fc2eae SR |
1464 | u32 smi_reg; |
1465 | ||
1466 | /* check parameters */ | |
e3b9c98a SR |
1467 | if (addr > MVNETA_PHY_ADDR_MASK) { |
1468 | printf("Error: Invalid PHY address %d\n", addr); | |
19fc2eae SR |
1469 | return -EFAULT; |
1470 | } | |
1471 | ||
e3b9c98a SR |
1472 | if (reg > MVNETA_PHY_REG_MASK) { |
1473 | printf("Err: Invalid register offset %d\n", reg); | |
19fc2eae SR |
1474 | return -EFAULT; |
1475 | } | |
1476 | ||
1477 | /* wait till the SMI is not busy */ | |
1478 | if (smi_wait_ready(pp) < 0) | |
1479 | return -EFAULT; | |
1480 | ||
1481 | /* fill the phy addr and reg offset and write opcode and data */ | |
e3b9c98a SR |
1482 | smi_reg = value << MVNETA_SMI_DATA_OFFS; |
1483 | smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS) | |
1484 | | (reg << MVNETA_SMI_REG_ADDR_OFFS); | |
19fc2eae SR |
1485 | smi_reg &= ~MVNETA_SMI_OPCODE_READ; |
1486 | ||
1487 | /* write the smi register */ | |
1488 | mvreg_write(pp, MVNETA_SMI, smi_reg); | |
1489 | ||
1490 | return 0; | |
1491 | } | |
1492 | ||
e3b9c98a | 1493 | static int mvneta_start(struct udevice *dev) |
19fc2eae | 1494 | { |
e3b9c98a | 1495 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1496 | struct phy_device *phydev; |
1497 | ||
1498 | mvneta_port_power_up(pp, pp->phy_interface); | |
1499 | ||
1500 | if (!pp->init || pp->link == 0) { | |
1501 | /* Set phy address of the port */ | |
1502 | mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr); | |
1503 | phydev = phy_connect(pp->bus, pp->phyaddr, dev, | |
1504 | pp->phy_interface); | |
1505 | ||
1506 | pp->phydev = phydev; | |
1507 | phy_config(phydev); | |
1508 | phy_startup(phydev); | |
1509 | if (!phydev->link) { | |
1510 | printf("%s: No link.\n", phydev->dev->name); | |
1511 | return -1; | |
1512 | } | |
1513 | ||
1514 | /* Full init on first call */ | |
e3b9c98a | 1515 | mvneta_init(dev); |
19fc2eae SR |
1516 | pp->init = 1; |
1517 | } else { | |
1518 | /* Upon all following calls, this is enough */ | |
1519 | mvneta_port_up(pp); | |
1520 | mvneta_port_enable(pp); | |
1521 | } | |
1522 | ||
1523 | return 0; | |
1524 | } | |
1525 | ||
e3b9c98a | 1526 | static int mvneta_send(struct udevice *dev, void *packet, int length) |
19fc2eae | 1527 | { |
e3b9c98a | 1528 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1529 | struct mvneta_tx_queue *txq = &pp->txqs[0]; |
1530 | struct mvneta_tx_desc *tx_desc; | |
1531 | int sent_desc; | |
1532 | u32 timeout = 0; | |
1533 | ||
1534 | /* Get a descriptor for the first part of the packet */ | |
1535 | tx_desc = mvneta_txq_next_desc_get(txq); | |
1536 | ||
3cbc11da | 1537 | tx_desc->buf_phys_addr = (u32)(uintptr_t)packet; |
e3b9c98a | 1538 | tx_desc->data_size = length; |
3cbc11da SR |
1539 | flush_dcache_range((ulong)packet, |
1540 | (ulong)packet + ALIGN(length, PKTALIGN)); | |
19fc2eae SR |
1541 | |
1542 | /* First and Last descriptor */ | |
1543 | tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC; | |
1544 | mvneta_txq_pend_desc_add(pp, txq, 1); | |
1545 | ||
1546 | /* Wait for packet to be sent (queue might help with speed here) */ | |
1547 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); | |
1548 | while (!sent_desc) { | |
1549 | if (timeout++ > 10000) { | |
1550 | printf("timeout: packet not sent\n"); | |
1551 | return -1; | |
1552 | } | |
1553 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); | |
1554 | } | |
1555 | ||
1556 | /* txDone has increased - hw sent packet */ | |
1557 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); | |
19fc2eae SR |
1558 | |
1559 | return 0; | |
1560 | } | |
1561 | ||
e3b9c98a | 1562 | static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp) |
19fc2eae | 1563 | { |
e3b9c98a | 1564 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae | 1565 | int rx_done; |
19fc2eae | 1566 | struct mvneta_rx_queue *rxq; |
e3b9c98a | 1567 | int rx_bytes = 0; |
19fc2eae SR |
1568 | |
1569 | /* get rx queue */ | |
1570 | rxq = mvneta_rxq_handle_get(pp, rxq_def); | |
1571 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
19fc2eae | 1572 | |
e3b9c98a | 1573 | if (rx_done) { |
19fc2eae SR |
1574 | struct mvneta_rx_desc *rx_desc; |
1575 | unsigned char *data; | |
1576 | u32 rx_status; | |
19fc2eae SR |
1577 | |
1578 | /* | |
1579 | * No cache invalidation needed here, since the desc's are | |
1580 | * located in a uncached memory region | |
1581 | */ | |
1582 | rx_desc = mvneta_rxq_next_desc_get(rxq); | |
1583 | ||
1584 | rx_status = rx_desc->status; | |
1585 | if (!mvneta_rxq_desc_is_first_last(rx_status) || | |
1586 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { | |
1587 | mvneta_rx_error(pp, rx_desc); | |
1588 | /* leave the descriptor untouched */ | |
e3b9c98a | 1589 | return -EIO; |
19fc2eae SR |
1590 | } |
1591 | ||
1592 | /* 2 bytes for marvell header. 4 bytes for crc */ | |
1593 | rx_bytes = rx_desc->data_size - 6; | |
1594 | ||
1595 | /* give packet to stack - skip on first 2 bytes */ | |
3cbc11da | 1596 | data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2; |
19fc2eae SR |
1597 | /* |
1598 | * No cache invalidation needed here, since the rx_buffer's are | |
1599 | * located in a uncached memory region | |
1600 | */ | |
e3b9c98a | 1601 | *packetp = data; |
19fc2eae | 1602 | |
19fc2eae | 1603 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); |
e3b9c98a | 1604 | } |
19fc2eae | 1605 | |
e3b9c98a | 1606 | return rx_bytes; |
19fc2eae SR |
1607 | } |
1608 | ||
e3b9c98a | 1609 | static int mvneta_probe(struct udevice *dev) |
19fc2eae | 1610 | { |
e3b9c98a SR |
1611 | struct eth_pdata *pdata = dev_get_platdata(dev); |
1612 | struct mvneta_port *pp = dev_get_priv(dev); | |
1613 | void *blob = (void *)gd->fdt_blob; | |
1614 | int node = dev->of_offset; | |
1615 | struct mii_dev *bus; | |
1616 | unsigned long addr; | |
19fc2eae SR |
1617 | void *bd_space; |
1618 | ||
19fc2eae SR |
1619 | /* |
1620 | * Allocate buffer area for descs and rx_buffers. This is only | |
1621 | * done once for all interfaces. As only one interface can | |
6723b235 | 1622 | * be active. Make this area DMA safe by disabling the D-cache |
19fc2eae SR |
1623 | */ |
1624 | if (!buffer_loc.tx_descs) { | |
1625 | /* Align buffer area for descs and rx_buffers to 1MiB */ | |
1626 | bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); | |
3cbc11da | 1627 | mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE, |
19fc2eae SR |
1628 | DCACHE_OFF); |
1629 | buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space; | |
1630 | buffer_loc.rx_descs = (struct mvneta_rx_desc *) | |
3cbc11da | 1631 | ((phys_addr_t)bd_space + |
19fc2eae | 1632 | MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc)); |
3cbc11da | 1633 | buffer_loc.rx_buffers = (phys_addr_t) |
19fc2eae SR |
1634 | (bd_space + |
1635 | MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc) + | |
1636 | MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc)); | |
1637 | } | |
1638 | ||
e3b9c98a | 1639 | pp->base = (void __iomem *)pdata->iobase; |
19fc2eae | 1640 | |
e3b9c98a | 1641 | /* Configure MBUS address windows */ |
544eefe0 SR |
1642 | if (of_device_is_compatible(dev, "marvell,armada-3700-neta")) |
1643 | mvneta_bypass_mbus_windows(pp); | |
1644 | else | |
1645 | mvneta_conf_mbus_windows(pp); | |
19fc2eae | 1646 | |
e3b9c98a SR |
1647 | /* PHY interface is already decoded in mvneta_ofdata_to_platdata() */ |
1648 | pp->phy_interface = pdata->phy_interface; | |
1649 | ||
1650 | /* Now read phyaddr from DT */ | |
1651 | addr = fdtdec_get_int(blob, node, "phy", 0); | |
1652 | addr = fdt_node_offset_by_phandle(blob, addr); | |
1653 | pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0); | |
1654 | ||
1655 | bus = mdio_alloc(); | |
1656 | if (!bus) { | |
1657 | printf("Failed to allocate MDIO bus\n"); | |
1658 | return -ENOMEM; | |
1659 | } | |
1660 | ||
1661 | bus->read = mvneta_mdio_read; | |
1662 | bus->write = mvneta_mdio_write; | |
1663 | snprintf(bus->name, sizeof(bus->name), dev->name); | |
1664 | bus->priv = (void *)pp; | |
1665 | pp->bus = bus; | |
19fc2eae | 1666 | |
e3b9c98a SR |
1667 | return mdio_register(bus); |
1668 | } | |
19fc2eae | 1669 | |
e3b9c98a SR |
1670 | static void mvneta_stop(struct udevice *dev) |
1671 | { | |
1672 | struct mvneta_port *pp = dev_get_priv(dev); | |
19fc2eae | 1673 | |
e3b9c98a SR |
1674 | mvneta_port_down(pp); |
1675 | mvneta_port_disable(pp); | |
19fc2eae | 1676 | } |
e3b9c98a SR |
1677 | |
1678 | static const struct eth_ops mvneta_ops = { | |
1679 | .start = mvneta_start, | |
1680 | .send = mvneta_send, | |
1681 | .recv = mvneta_recv, | |
1682 | .stop = mvneta_stop, | |
1683 | }; | |
1684 | ||
1685 | static int mvneta_ofdata_to_platdata(struct udevice *dev) | |
1686 | { | |
1687 | struct eth_pdata *pdata = dev_get_platdata(dev); | |
1688 | const char *phy_mode; | |
1689 | ||
1690 | pdata->iobase = dev_get_addr(dev); | |
1691 | ||
1692 | /* Get phy-mode / phy_interface from DT */ | |
1693 | pdata->phy_interface = -1; | |
1694 | phy_mode = fdt_getprop(gd->fdt_blob, dev->of_offset, "phy-mode", NULL); | |
1695 | if (phy_mode) | |
1696 | pdata->phy_interface = phy_get_interface_by_name(phy_mode); | |
1697 | if (pdata->phy_interface == -1) { | |
1698 | debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); | |
1699 | return -EINVAL; | |
1700 | } | |
1701 | ||
1702 | return 0; | |
1703 | } | |
1704 | ||
1705 | static const struct udevice_id mvneta_ids[] = { | |
1706 | { .compatible = "marvell,armada-370-neta" }, | |
1707 | { .compatible = "marvell,armada-xp-neta" }, | |
544eefe0 | 1708 | { .compatible = "marvell,armada-3700-neta" }, |
e3b9c98a SR |
1709 | { } |
1710 | }; | |
1711 | ||
1712 | U_BOOT_DRIVER(mvneta) = { | |
1713 | .name = "mvneta", | |
1714 | .id = UCLASS_ETH, | |
1715 | .of_match = mvneta_ids, | |
1716 | .ofdata_to_platdata = mvneta_ofdata_to_platdata, | |
1717 | .probe = mvneta_probe, | |
1718 | .ops = &mvneta_ops, | |
1719 | .priv_auto_alloc_size = sizeof(struct mvneta_port), | |
1720 | .platdata_auto_alloc_size = sizeof(struct eth_pdata), | |
1721 | }; |