]> git.ipfire.org Git - thirdparty/u-boot.git/blame - drivers/net/designware.c
net: designware: Support high memory nodes
[thirdparty/u-boot.git] / drivers / net / designware.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
5b1b1883
VK
2/*
3 * (C) Copyright 2010
eae488b7 4 * Vipin Kumar, STMicroelectronics, vipin.kumar@st.com.
5b1b1883
VK
5 */
6
7/*
64dcd25f 8 * Designware ethernet IP driver for U-Boot
5b1b1883
VK
9 */
10
11#include <common.h>
ba1f9667 12#include <clk.h>
1eb69ae4 13#include <cpu_func.h>
75577ba4 14#include <dm.h>
64dcd25f 15#include <errno.h>
f1e2f412 16#include <eth_phy.h>
f7ae49fc 17#include <log.h>
5b1b1883
VK
18#include <miiphy.h>
19#include <malloc.h>
90526e9f 20#include <net.h>
8b7ee66c 21#include <pci.h>
495c70f9 22#include <reset.h>
d44f3d21 23#include <phys2bus.h>
90526e9f 24#include <asm/cache.h>
336d4615 25#include <dm/device_compat.h>
5160b456 26#include <dm/device-internal.h>
61b29b82 27#include <dm/devres.h>
5160b456 28#include <dm/lists.h>
ef76025a 29#include <linux/compiler.h>
c05ed00a 30#include <linux/delay.h>
5b1b1883 31#include <linux/err.h>
7a9ca9db 32#include <linux/kernel.h>
5b1b1883 33#include <asm/io.h>
1e94b46f 34#include <linux/printk.h>
6ec922fa 35#include <power/regulator.h>
5b1b1883
VK
36#include "designware.h"
37
92a190aa
AB
38static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
39{
90b7fc92
SS
40 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
41 struct eth_mac_regs *mac_p = priv->mac_regs_p;
92a190aa
AB
42 ulong start;
43 u16 miiaddr;
6e7df1d1 44 int timeout = CFG_MDIO_TIMEOUT;
92a190aa
AB
45
46 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
47 ((reg << MIIREGSHIFT) & MII_REGMSK);
48
49 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
50
51 start = get_timer(0);
52 while (get_timer(start) < timeout) {
53 if (!(readl(&mac_p->miiaddr) & MII_BUSY))
54 return readl(&mac_p->miidata);
55 udelay(10);
56 };
57
64dcd25f 58 return -ETIMEDOUT;
92a190aa
AB
59}
60
61static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
62 u16 val)
63{
90b7fc92
SS
64 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
65 struct eth_mac_regs *mac_p = priv->mac_regs_p;
92a190aa
AB
66 ulong start;
67 u16 miiaddr;
6e7df1d1 68 int ret = -ETIMEDOUT, timeout = CFG_MDIO_TIMEOUT;
92a190aa
AB
69
70 writel(val, &mac_p->miidata);
71 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
72 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
73
74 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
75
76 start = get_timer(0);
77 while (get_timer(start) < timeout) {
78 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
79 ret = 0;
80 break;
81 }
82 udelay(10);
83 };
84
85 return ret;
86}
87
acb30ccc 88#if CONFIG_IS_ENABLED(DM_GPIO)
98b82046 89static int __dw_mdio_reset(struct udevice *dev)
90b7fc92 90{
90b7fc92 91 struct dw_eth_dev *priv = dev_get_priv(dev);
c69cda25 92 struct dw_eth_pdata *pdata = dev_get_plat(dev);
90b7fc92
SS
93 int ret;
94
95 if (!dm_gpio_is_valid(&priv->reset_gpio))
96 return 0;
97
98 /* reset the phy */
99 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
100 if (ret)
101 return ret;
102
103 udelay(pdata->reset_delays[0]);
104
105 ret = dm_gpio_set_value(&priv->reset_gpio, 1);
106 if (ret)
107 return ret;
108
109 udelay(pdata->reset_delays[1]);
110
111 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
112 if (ret)
113 return ret;
114
115 udelay(pdata->reset_delays[2]);
116
117 return 0;
118}
98b82046
NA
119
120static int dw_mdio_reset(struct mii_dev *bus)
121{
122 struct udevice *dev = bus->priv;
123
124 return __dw_mdio_reset(dev);
125}
90b7fc92
SS
126#endif
127
5160b456
NA
128#if IS_ENABLED(CONFIG_DM_MDIO)
129int designware_eth_mdio_read(struct udevice *mdio_dev, int addr, int devad, int reg)
130{
131 struct mdio_perdev_priv *pdata = dev_get_uclass_priv(mdio_dev);
132
133 return dw_mdio_read(pdata->mii_bus, addr, devad, reg);
134}
135
136int designware_eth_mdio_write(struct udevice *mdio_dev, int addr, int devad, int reg, u16 val)
137{
138 struct mdio_perdev_priv *pdata = dev_get_uclass_priv(mdio_dev);
139
140 return dw_mdio_write(pdata->mii_bus, addr, devad, reg, val);
141}
142
143#if CONFIG_IS_ENABLED(DM_GPIO)
144int designware_eth_mdio_reset(struct udevice *mdio_dev)
145{
98b82046
NA
146 struct mdio_perdev_priv *mdio_pdata = dev_get_uclass_priv(mdio_dev);
147 struct udevice *dev = mdio_pdata->mii_bus->priv;
5160b456 148
98b82046 149 return __dw_mdio_reset(dev->parent);
5160b456
NA
150}
151#endif
152
153static const struct mdio_ops designware_eth_mdio_ops = {
154 .read = designware_eth_mdio_read,
155 .write = designware_eth_mdio_write,
156#if CONFIG_IS_ENABLED(DM_GPIO)
157 .reset = designware_eth_mdio_reset,
158#endif
159};
160
161static int designware_eth_mdio_probe(struct udevice *dev)
162{
163 /* Use the priv data of parent */
164 dev_set_priv(dev, dev_get_priv(dev->parent));
165
166 return 0;
167}
168
169U_BOOT_DRIVER(designware_eth_mdio) = {
170 .name = "eth_designware_mdio",
171 .id = UCLASS_MDIO,
172 .probe = designware_eth_mdio_probe,
173 .ops = &designware_eth_mdio_ops,
174 .plat_auto = sizeof(struct mdio_perdev_priv),
175};
176#endif
177
90b7fc92 178static int dw_mdio_init(const char *name, void *priv)
92a190aa
AB
179{
180 struct mii_dev *bus = mdio_alloc();
181
182 if (!bus) {
183 printf("Failed to allocate MDIO bus\n");
64dcd25f 184 return -ENOMEM;
92a190aa
AB
185 }
186
187 bus->read = dw_mdio_read;
188 bus->write = dw_mdio_write;
192bc694 189 snprintf(bus->name, sizeof(bus->name), "%s", name);
acb30ccc 190#if CONFIG_IS_ENABLED(DM_GPIO)
90b7fc92
SS
191 bus->reset = dw_mdio_reset;
192#endif
92a190aa 193
90b7fc92 194 bus->priv = priv;
92a190aa
AB
195
196 return mdio_register(bus);
197}
13edd170 198
5160b456
NA
199#if IS_ENABLED(CONFIG_DM_MDIO)
200static int dw_dm_mdio_init(const char *name, void *priv)
201{
202 struct udevice *dev = priv;
203 ofnode node;
204 int ret;
205
206 ofnode_for_each_subnode(node, dev_ofnode(dev)) {
207 const char *subnode_name = ofnode_get_name(node);
208 struct udevice *mdiodev;
209
210 if (strcmp(subnode_name, "mdio"))
211 continue;
212
213 ret = device_bind_driver_to_node(dev, "eth_designware_mdio",
214 subnode_name, node, &mdiodev);
215 if (ret)
216 debug("%s: not able to bind mdio device node\n", __func__);
217
218 return 0;
219 }
220
221 printf("%s: mdio node is missing, registering legacy mdio bus", __func__);
222
223 return dw_mdio_init(name, priv);
224}
225#endif
226
64dcd25f 227static void tx_descs_init(struct dw_eth_dev *priv)
5b1b1883 228{
5b1b1883
VK
229 struct eth_dma_regs *dma_p = priv->dma_regs_p;
230 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
231 char *txbuffs = &priv->txbuffs[0];
232 struct dmamacdescr *desc_p;
233 u32 idx;
234
6e7df1d1 235 for (idx = 0; idx < CFG_TX_DESCR_NUM; idx++) {
5b1b1883 236 desc_p = &desc_table_p[idx];
d44f3d21
BS
237 desc_p->dmamac_addr = dev_phys_to_bus(priv->dev,
238 (ulong)&txbuffs[idx * CFG_ETH_BUFSIZE]);
239 desc_p->dmamac_next = dev_phys_to_bus(priv->dev,
240 (ulong)&desc_table_p[idx + 1]);
5b1b1883
VK
241
242#if defined(CONFIG_DW_ALTDESCRIPTOR)
243 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
2b261092
MV
244 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
245 DESC_TXSTS_TXCHECKINSCTRL |
5b1b1883
VK
246 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
247
248 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
249 desc_p->dmamac_cntl = 0;
250 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
251#else
252 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
253 desc_p->txrx_status = 0;
254#endif
255 }
256
257 /* Correcting the last pointer of the chain */
d44f3d21 258 desc_p->dmamac_next = dev_phys_to_bus(priv->dev, (ulong)&desc_table_p[0]);
5b1b1883 259
50b0df81 260 /* Flush all Tx buffer descriptors at once */
0e1a3e30
BG
261 flush_dcache_range((ulong)priv->tx_mac_descrtable,
262 (ulong)priv->tx_mac_descrtable +
50b0df81
AB
263 sizeof(priv->tx_mac_descrtable));
264
d44f3d21
BS
265 writel(dev_phys_to_bus(priv->dev, (ulong)&desc_table_p[0]),
266 &dma_p->txdesclistaddr);
74cb708d 267 priv->tx_currdescnum = 0;
5b1b1883
VK
268}
269
64dcd25f 270static void rx_descs_init(struct dw_eth_dev *priv)
5b1b1883 271{
5b1b1883
VK
272 struct eth_dma_regs *dma_p = priv->dma_regs_p;
273 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
274 char *rxbuffs = &priv->rxbuffs[0];
275 struct dmamacdescr *desc_p;
276 u32 idx;
277
50b0df81
AB
278 /* Before passing buffers to GMAC we need to make sure zeros
279 * written there right after "priv" structure allocation were
280 * flushed into RAM.
281 * Otherwise there's a chance to get some of them flushed in RAM when
282 * GMAC is already pushing data to RAM via DMA. This way incoming from
283 * GMAC data will be corrupted. */
0e1a3e30 284 flush_dcache_range((ulong)rxbuffs, (ulong)rxbuffs + RX_TOTAL_BUFSIZE);
50b0df81 285
6e7df1d1 286 for (idx = 0; idx < CFG_RX_DESCR_NUM; idx++) {
5b1b1883 287 desc_p = &desc_table_p[idx];
d44f3d21
BS
288 desc_p->dmamac_addr = dev_phys_to_bus(priv->dev,
289 (ulong)&rxbuffs[idx * CFG_ETH_BUFSIZE]);
290 desc_p->dmamac_next = dev_phys_to_bus(priv->dev,
291 (ulong)&desc_table_p[idx + 1]);
5b1b1883
VK
292
293 desc_p->dmamac_cntl =
2b261092 294 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
5b1b1883
VK
295 DESC_RXCTRL_RXCHAIN;
296
297 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
298 }
299
300 /* Correcting the last pointer of the chain */
d44f3d21 301 desc_p->dmamac_next = dev_phys_to_bus(priv->dev, (ulong)&desc_table_p[0]);
5b1b1883 302
50b0df81 303 /* Flush all Rx buffer descriptors at once */
0e1a3e30
BG
304 flush_dcache_range((ulong)priv->rx_mac_descrtable,
305 (ulong)priv->rx_mac_descrtable +
50b0df81
AB
306 sizeof(priv->rx_mac_descrtable));
307
d44f3d21
BS
308 writel(dev_phys_to_bus(priv->dev, (ulong)&desc_table_p[0]),
309 &dma_p->rxdesclistaddr);
74cb708d 310 priv->rx_currdescnum = 0;
5b1b1883
VK
311}
312
64dcd25f 313static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
5b1b1883 314{
92a190aa
AB
315 struct eth_mac_regs *mac_p = priv->mac_regs_p;
316 u32 macid_lo, macid_hi;
92a190aa
AB
317
318 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
319 (mac_id[3] << 24);
320 macid_hi = mac_id[4] + (mac_id[5] << 8);
321
322 writel(macid_hi, &mac_p->macaddr0hi);
323 writel(macid_lo, &mac_p->macaddr0lo);
324
325 return 0;
5b1b1883
VK
326}
327
0ea38db9
SG
328static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p,
329 struct phy_device *phydev)
5b1b1883 330{
92a190aa 331 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
5b1b1883 332
92a190aa
AB
333 if (!phydev->link) {
334 printf("%s: No link.\n", phydev->dev->name);
0ea38db9 335 return 0;
92a190aa 336 }
5b1b1883 337
92a190aa
AB
338 if (phydev->speed != 1000)
339 conf |= MII_PORTSELECT;
b884c3fe
AB
340 else
341 conf &= ~MII_PORTSELECT;
7091915a 342
92a190aa
AB
343 if (phydev->speed == 100)
344 conf |= FES_100;
5b1b1883 345
92a190aa
AB
346 if (phydev->duplex)
347 conf |= FULLDPLXMODE;
cafabe19 348
92a190aa 349 writel(conf, &mac_p->conf);
5b1b1883 350
92a190aa
AB
351 printf("Speed: %d, %s duplex%s\n", phydev->speed,
352 (phydev->duplex) ? "full" : "half",
353 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
0ea38db9
SG
354
355 return 0;
5b1b1883
VK
356}
357
64dcd25f 358static void _dw_eth_halt(struct dw_eth_dev *priv)
5b1b1883 359{
5b1b1883 360 struct eth_mac_regs *mac_p = priv->mac_regs_p;
92a190aa 361 struct eth_dma_regs *dma_p = priv->dma_regs_p;
5b1b1883 362
92a190aa
AB
363 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
364 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
5b1b1883 365
92a190aa 366 phy_shutdown(priv->phydev);
5b1b1883
VK
367}
368
e72ced23 369int designware_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
5b1b1883 370{
5b1b1883
VK
371 struct eth_mac_regs *mac_p = priv->mac_regs_p;
372 struct eth_dma_regs *dma_p = priv->dma_regs_p;
92a190aa 373 unsigned int start;
64dcd25f 374 int ret;
5b1b1883 375
92a190aa 376 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
13edd170 377
c6122194
QS
378 /*
379 * When a MII PHY is used, we must set the PS bit for the DMA
380 * reset to succeed.
381 */
382 if (priv->phydev->interface == PHY_INTERFACE_MODE_MII)
383 writel(readl(&mac_p->conf) | MII_PORTSELECT, &mac_p->conf);
384 else
385 writel(readl(&mac_p->conf) & ~MII_PORTSELECT, &mac_p->conf);
386
92a190aa
AB
387 start = get_timer(0);
388 while (readl(&dma_p->busmode) & DMAMAC_SRST) {
6e7df1d1 389 if (get_timer(start) >= CFG_MACRESET_TIMEOUT) {
875143f3 390 printf("DMA reset timeout\n");
64dcd25f 391 return -ETIMEDOUT;
875143f3 392 }
ef76025a 393
92a190aa
AB
394 mdelay(100);
395 };
5b1b1883 396
f3edfd30
BM
397 /*
398 * Soft reset above clears HW address registers.
399 * So we have to set it here once again.
400 */
401 _dw_write_hwaddr(priv, enetaddr);
402
64dcd25f
SG
403 rx_descs_init(priv);
404 tx_descs_init(priv);
5b1b1883 405
49692c5f 406 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
5b1b1883 407
d2279221 408#ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
92a190aa
AB
409 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
410 &dma_p->opmode);
d2279221
SZ
411#else
412 writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
413 &dma_p->opmode);
414#endif
5b1b1883 415
92a190aa 416 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
9afc1af0 417
2ddaf13b
SZ
418#ifdef CONFIG_DW_AXI_BURST_LEN
419 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
420#endif
421
92a190aa 422 /* Start up the PHY */
64dcd25f
SG
423 ret = phy_startup(priv->phydev);
424 if (ret) {
92a190aa
AB
425 printf("Could not initialize PHY %s\n",
426 priv->phydev->dev->name);
64dcd25f 427 return ret;
9afc1af0
VK
428 }
429
0ea38db9
SG
430 ret = dw_adjust_link(priv, mac_p, priv->phydev);
431 if (ret)
432 return ret;
5b1b1883 433
f63f28ee
SG
434 return 0;
435}
436
e72ced23 437int designware_eth_enable(struct dw_eth_dev *priv)
f63f28ee
SG
438{
439 struct eth_mac_regs *mac_p = priv->mac_regs_p;
440
92a190aa 441 if (!priv->phydev->link)
64dcd25f 442 return -EIO;
5b1b1883 443
aa51005c 444 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
5b1b1883
VK
445
446 return 0;
447}
448
7a9ca9db
FF
449#define ETH_ZLEN 60
450
64dcd25f 451static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
5b1b1883 452{
5b1b1883
VK
453 struct eth_dma_regs *dma_p = priv->dma_regs_p;
454 u32 desc_num = priv->tx_currdescnum;
455 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
0e1a3e30
BG
456 ulong desc_start = (ulong)desc_p;
457 ulong desc_end = desc_start +
96cec17d 458 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
d44f3d21 459 ulong data_start = dev_bus_to_phys(priv->dev, desc_p->dmamac_addr);
0e1a3e30 460 ulong data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
964ea7c1
IC
461 /*
462 * Strictly we only need to invalidate the "txrx_status" field
463 * for the following check, but on some platforms we cannot
96cec17d
MV
464 * invalidate only 4 bytes, so we flush the entire descriptor,
465 * which is 16 bytes in total. This is safe because the
466 * individual descriptors in the array are each aligned to
467 * ARCH_DMA_MINALIGN and padded appropriately.
964ea7c1 468 */
96cec17d 469 invalidate_dcache_range(desc_start, desc_end);
50b0df81 470
5b1b1883
VK
471 /* Check if the descriptor is owned by CPU */
472 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
473 printf("CPU not owner of tx frame\n");
64dcd25f 474 return -EPERM;
5b1b1883
VK
475 }
476
0e1a3e30 477 memcpy((void *)data_start, packet, length);
7efb75b1
SG
478 if (length < ETH_ZLEN) {
479 memset(&((char *)data_start)[length], 0, ETH_ZLEN - length);
480 length = ETH_ZLEN;
481 }
5b1b1883 482
50b0df81 483 /* Flush data to be sent */
96cec17d 484 flush_dcache_range(data_start, data_end);
50b0df81 485
5b1b1883
VK
486#if defined(CONFIG_DW_ALTDESCRIPTOR)
487 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
ae8ac8d4
SG
488 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
489 ((length << DESC_TXCTRL_SIZE1SHFT) &
490 DESC_TXCTRL_SIZE1MASK);
5b1b1883
VK
491
492 desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
493 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
494#else
ae8ac8d4
SG
495 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
496 ((length << DESC_TXCTRL_SIZE1SHFT) &
497 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
498 DESC_TXCTRL_TXFIRST;
5b1b1883
VK
499
500 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
501#endif
502
50b0df81 503 /* Flush modified buffer descriptor */
96cec17d 504 flush_dcache_range(desc_start, desc_end);
50b0df81 505
5b1b1883 506 /* Test the wrap-around condition. */
6e7df1d1 507 if (++desc_num >= CFG_TX_DESCR_NUM)
5b1b1883
VK
508 desc_num = 0;
509
510 priv->tx_currdescnum = desc_num;
511
512 /* Start the transmission */
513 writel(POLL_DATA, &dma_p->txpolldemand);
514
515 return 0;
516}
517
75577ba4 518static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
5b1b1883 519{
50b0df81 520 u32 status, desc_num = priv->rx_currdescnum;
5b1b1883 521 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
75577ba4 522 int length = -EAGAIN;
0e1a3e30
BG
523 ulong desc_start = (ulong)desc_p;
524 ulong desc_end = desc_start +
96cec17d 525 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
d44f3d21 526 ulong data_start = dev_bus_to_phys(priv->dev, desc_p->dmamac_addr);
0e1a3e30 527 ulong data_end;
5b1b1883 528
50b0df81 529 /* Invalidate entire buffer descriptor */
96cec17d 530 invalidate_dcache_range(desc_start, desc_end);
50b0df81
AB
531
532 status = desc_p->txrx_status;
533
5b1b1883
VK
534 /* Check if the owner is the CPU */
535 if (!(status & DESC_RXSTS_OWNBYDMA)) {
536
2b261092 537 length = (status & DESC_RXSTS_FRMLENMSK) >>
5b1b1883
VK
538 DESC_RXSTS_FRMLENSHFT;
539
50b0df81 540 /* Invalidate received data */
96cec17d
MV
541 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
542 invalidate_dcache_range(data_start, data_end);
d44f3d21
BS
543 *packetp = (uchar *)(ulong)dev_bus_to_phys(priv->dev,
544 desc_p->dmamac_addr);
75577ba4 545 }
50b0df81 546
75577ba4
SG
547 return length;
548}
5b1b1883 549
75577ba4
SG
550static int _dw_free_pkt(struct dw_eth_dev *priv)
551{
552 u32 desc_num = priv->rx_currdescnum;
553 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
0e1a3e30
BG
554 ulong desc_start = (ulong)desc_p;
555 ulong desc_end = desc_start +
75577ba4 556 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
5b1b1883 557
75577ba4
SG
558 /*
559 * Make the current descriptor valid again and go to
560 * the next one
561 */
562 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
50b0df81 563
75577ba4
SG
564 /* Flush only status field - others weren't changed */
565 flush_dcache_range(desc_start, desc_end);
5b1b1883 566
75577ba4 567 /* Test the wrap-around condition. */
6e7df1d1 568 if (++desc_num >= CFG_RX_DESCR_NUM)
75577ba4 569 desc_num = 0;
5b1b1883
VK
570 priv->rx_currdescnum = desc_num;
571
75577ba4 572 return 0;
5b1b1883
VK
573}
574
64dcd25f 575static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
5b1b1883 576{
92a190aa 577 struct phy_device *phydev;
5160b456
NA
578 int ret;
579
f1e2f412
JK
580 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
581 eth_phy_set_mdio_bus(dev, NULL);
582
acb30ccc 583#if IS_ENABLED(CONFIG_DM_MDIO)
5160b456
NA
584 phydev = dm_eth_phy_connect(dev);
585 if (!phydev)
586 return -ENODEV;
587#else
588 int phy_addr = -1;
cafabe19 589
f1e2f412
JK
590 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
591 phy_addr = eth_phy_get_addr(dev);
592
92a190aa 593#ifdef CONFIG_PHY_ADDR
5dce9df0 594 phy_addr = CONFIG_PHY_ADDR;
5b1b1883
VK
595#endif
596
5dce9df0 597 phydev = phy_connect(priv->bus, phy_addr, dev, priv->interface);
92a190aa 598 if (!phydev)
64dcd25f 599 return -ENODEV;
5160b456 600#endif
5b1b1883 601
92a190aa 602 phydev->supported &= PHY_GBIT_FEATURES;
6968ec92
AB
603 if (priv->max_speed) {
604 ret = phy_set_supported(phydev, priv->max_speed);
605 if (ret)
606 return ret;
607 }
92a190aa 608 phydev->advertising = phydev->supported;
5b1b1883 609
92a190aa
AB
610 priv->phydev = phydev;
611 phy_config(phydev);
ef76025a 612
64dcd25f
SG
613 return 0;
614}
615
75577ba4
SG
616static int designware_eth_start(struct udevice *dev)
617{
c69cda25 618 struct eth_pdata *pdata = dev_get_plat(dev);
f63f28ee
SG
619 struct dw_eth_dev *priv = dev_get_priv(dev);
620 int ret;
75577ba4 621
e72ced23 622 ret = designware_eth_init(priv, pdata->enetaddr);
f63f28ee
SG
623 if (ret)
624 return ret;
625 ret = designware_eth_enable(priv);
626 if (ret)
627 return ret;
628
629 return 0;
75577ba4
SG
630}
631
e72ced23 632int designware_eth_send(struct udevice *dev, void *packet, int length)
75577ba4
SG
633{
634 struct dw_eth_dev *priv = dev_get_priv(dev);
635
636 return _dw_eth_send(priv, packet, length);
637}
638
e72ced23 639int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
75577ba4
SG
640{
641 struct dw_eth_dev *priv = dev_get_priv(dev);
642
643 return _dw_eth_recv(priv, packetp);
644}
645
e72ced23 646int designware_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
75577ba4
SG
647{
648 struct dw_eth_dev *priv = dev_get_priv(dev);
649
650 return _dw_free_pkt(priv);
651}
652
e72ced23 653void designware_eth_stop(struct udevice *dev)
75577ba4
SG
654{
655 struct dw_eth_dev *priv = dev_get_priv(dev);
656
657 return _dw_eth_halt(priv);
658}
659
e72ced23 660int designware_eth_write_hwaddr(struct udevice *dev)
75577ba4 661{
c69cda25 662 struct eth_pdata *pdata = dev_get_plat(dev);
75577ba4
SG
663 struct dw_eth_dev *priv = dev_get_priv(dev);
664
665 return _dw_write_hwaddr(priv, pdata->enetaddr);
666}
667
8b7ee66c
BM
668static int designware_eth_bind(struct udevice *dev)
669{
e882a59e
SG
670 if (IS_ENABLED(CONFIG_PCI)) {
671 static int num_cards;
672 char name[20];
673
674 /* Create a unique device name for PCI type devices */
675 if (device_is_on_pci_bus(dev)) {
676 sprintf(name, "eth_designware#%u", num_cards++);
677 device_set_name(dev, name);
678 }
8b7ee66c 679 }
8b7ee66c
BM
680
681 return 0;
682}
683
b9e08d0e 684int designware_eth_probe(struct udevice *dev)
75577ba4 685{
c69cda25 686 struct eth_pdata *pdata = dev_get_plat(dev);
75577ba4 687 struct dw_eth_dev *priv = dev_get_priv(dev);
a5f877a0
NLR
688 phys_addr_t iobase = pdata->iobase;
689 void *ioaddr;
4ee587e2 690 int ret, err;
495c70f9 691 struct reset_ctl_bulk reset_bulk;
ba1f9667 692#ifdef CONFIG_CLK
4ee587e2 693 int i, clock_nb;
ba1f9667
PC
694
695 priv->clock_count = 0;
89f68302
PD
696 clock_nb = dev_count_phandle_with_args(dev, "clocks", "#clock-cells",
697 0);
ba1f9667
PC
698 if (clock_nb > 0) {
699 priv->clocks = devm_kcalloc(dev, clock_nb, sizeof(struct clk),
700 GFP_KERNEL);
701 if (!priv->clocks)
702 return -ENOMEM;
703
704 for (i = 0; i < clock_nb; i++) {
705 err = clk_get_by_index(dev, i, &priv->clocks[i]);
706 if (err < 0)
707 break;
708
709 err = clk_enable(&priv->clocks[i]);
1693a577 710 if (err && err != -ENOSYS && err != -ENOTSUPP) {
ba1f9667 711 pr_err("failed to enable clock %d\n", i);
ba1f9667
PC
712 goto clk_err;
713 }
714 priv->clock_count++;
715 }
716 } else if (clock_nb != -ENOENT) {
717 pr_err("failed to get clock phandle(%d)\n", clock_nb);
718 return clock_nb;
719 }
720#endif
75577ba4 721
6ec922fa
JC
722#if defined(CONFIG_DM_REGULATOR)
723 struct udevice *phy_supply;
724
725 ret = device_get_supply_regulator(dev, "phy-supply",
726 &phy_supply);
727 if (ret) {
728 debug("%s: No phy supply\n", dev->name);
729 } else {
730 ret = regulator_set_enable(phy_supply, true);
731 if (ret) {
732 puts("Error enabling phy supply\n");
733 return ret;
734 }
735 }
736#endif
737
495c70f9
LFT
738 ret = reset_get_bulk(dev, &reset_bulk);
739 if (ret)
740 dev_warn(dev, "Can't get reset: %d\n", ret);
741 else
742 reset_deassert_bulk(&reset_bulk);
743
8b7ee66c
BM
744 /*
745 * If we are on PCI bus, either directly attached to a PCI root port,
caa4daa2 746 * or via a PCI bridge, fill in plat before we probe the hardware.
8b7ee66c 747 */
e882a59e 748 if (IS_ENABLED(CONFIG_PCI) && device_is_on_pci_bus(dev)) {
a5f877a0 749 u32 pcibase;
8b7ee66c 750
a5f877a0
NLR
751 dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &pcibase);
752 pcibase &= PCI_BASE_ADDRESS_MEM_MASK;
753
754 iobase = dm_pci_mem_to_phys(dev, pcibase);
8b7ee66c
BM
755 pdata->iobase = iobase;
756 pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
757 }
8b7ee66c 758
a5f877a0
NLR
759 debug("%s, iobase=%pa, priv=%p\n", __func__, &iobase, priv);
760 ioaddr = phys_to_virt(iobase);
0e1a3e30
BG
761 priv->mac_regs_p = (struct eth_mac_regs *)ioaddr;
762 priv->dma_regs_p = (struct eth_dma_regs *)(ioaddr + DW_DMA_BASE_OFFSET);
75577ba4 763 priv->interface = pdata->phy_interface;
6968ec92 764 priv->max_speed = pdata->max_speed;
75577ba4 765
5160b456
NA
766#if IS_ENABLED(CONFIG_DM_MDIO)
767 ret = dw_dm_mdio_init(dev->name, dev);
768#else
4ee587e2 769 ret = dw_mdio_init(dev->name, dev);
5160b456 770#endif
4ee587e2
SG
771 if (ret) {
772 err = ret;
773 goto mdio_err;
774 }
75577ba4 775 priv->bus = miiphy_get_dev_by_name(dev->name);
d44f3d21 776 priv->dev = dev;
75577ba4
SG
777
778 ret = dw_phy_init(priv, dev);
779 debug("%s, ret=%d\n", __func__, ret);
4ee587e2
SG
780 if (!ret)
781 return 0;
75577ba4 782
4ee587e2
SG
783 /* continue here for cleanup if no PHY found */
784 err = ret;
785 mdio_unregister(priv->bus);
786 mdio_free(priv->bus);
787mdio_err:
ba1f9667
PC
788
789#ifdef CONFIG_CLK
790clk_err:
791 ret = clk_release_all(priv->clocks, priv->clock_count);
792 if (ret)
793 pr_err("failed to disable all clocks\n");
794
ba1f9667 795#endif
4ee587e2 796 return err;
75577ba4
SG
797}
798
5d2459fd
BM
799static int designware_eth_remove(struct udevice *dev)
800{
801 struct dw_eth_dev *priv = dev_get_priv(dev);
802
803 free(priv->phydev);
804 mdio_unregister(priv->bus);
805 mdio_free(priv->bus);
806
ba1f9667
PC
807#ifdef CONFIG_CLK
808 return clk_release_all(priv->clocks, priv->clock_count);
809#else
5d2459fd 810 return 0;
ba1f9667 811#endif
5d2459fd
BM
812}
813
b9e08d0e 814const struct eth_ops designware_eth_ops = {
75577ba4
SG
815 .start = designware_eth_start,
816 .send = designware_eth_send,
817 .recv = designware_eth_recv,
818 .free_pkt = designware_eth_free_pkt,
819 .stop = designware_eth_stop,
820 .write_hwaddr = designware_eth_write_hwaddr,
821};
822
d1998a9f 823int designware_eth_of_to_plat(struct udevice *dev)
75577ba4 824{
c69cda25 825 struct dw_eth_pdata *dw_pdata = dev_get_plat(dev);
bcee8d67 826#if CONFIG_IS_ENABLED(DM_GPIO)
90b7fc92 827 struct dw_eth_dev *priv = dev_get_priv(dev);
66d027e2 828#endif
90b7fc92 829 struct eth_pdata *pdata = &dw_pdata->eth_pdata;
bcee8d67 830#if CONFIG_IS_ENABLED(DM_GPIO)
90b7fc92 831 int reset_flags = GPIOD_IS_OUT;
66d027e2 832#endif
90b7fc92 833 int ret = 0;
75577ba4 834
15050f1c 835 pdata->iobase = dev_read_addr(dev);
123ca114 836 pdata->phy_interface = dev_read_phy_mode(dev);
ffb0f6f4 837 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
75577ba4 838 return -EINVAL;
75577ba4 839
15050f1c 840 pdata->max_speed = dev_read_u32_default(dev, "max-speed", 0);
6968ec92 841
bcee8d67 842#if CONFIG_IS_ENABLED(DM_GPIO)
7ad326a9 843 if (dev_read_bool(dev, "snps,reset-active-low"))
90b7fc92
SS
844 reset_flags |= GPIOD_ACTIVE_LOW;
845
846 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
847 &priv->reset_gpio, reset_flags);
848 if (ret == 0) {
7ad326a9
PT
849 ret = dev_read_u32_array(dev, "snps,reset-delays-us",
850 dw_pdata->reset_delays, 3);
90b7fc92
SS
851 } else if (ret == -ENOENT) {
852 ret = 0;
853 }
66d027e2 854#endif
90b7fc92
SS
855
856 return ret;
75577ba4
SG
857}
858
859static const struct udevice_id designware_eth_ids[] = {
860 { .compatible = "allwinner,sun7i-a20-gmac" },
cfe25561 861 { .compatible = "amlogic,meson6-dwmac" },
b20b70fc 862 { .compatible = "st,stm32-dwmac" },
2a723237 863 { .compatible = "snps,arc-dwmac-3.70a" },
75577ba4
SG
864 { }
865};
866
9f76f105 867U_BOOT_DRIVER(eth_designware) = {
75577ba4
SG
868 .name = "eth_designware",
869 .id = UCLASS_ETH,
870 .of_match = designware_eth_ids,
d1998a9f 871 .of_to_plat = designware_eth_of_to_plat,
8b7ee66c 872 .bind = designware_eth_bind,
75577ba4 873 .probe = designware_eth_probe,
5d2459fd 874 .remove = designware_eth_remove,
75577ba4 875 .ops = &designware_eth_ops,
41575d8e 876 .priv_auto = sizeof(struct dw_eth_dev),
caa4daa2 877 .plat_auto = sizeof(struct dw_eth_pdata),
75577ba4
SG
878 .flags = DM_FLAG_ALLOC_PRIV_DMA,
879};
8b7ee66c
BM
880
881static struct pci_device_id supported[] = {
882 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
883 { }
884};
885
886U_BOOT_PCI_DEVICE(eth_designware, supported);