]> git.ipfire.org Git - people/ms/u-boot.git/blame - drivers/net/fec_mxc.c
iMX: adding parsing to hab_status command
[people/ms/u-boot.git] / drivers / net / fec_mxc.c
CommitLineData
0b23fb36
IY
1/*
2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4 * (C) Copyright 2008 Armadeus Systems nc
5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
7 *
1a459660 8 * SPDX-License-Identifier: GPL-2.0+
0b23fb36
IY
9 */
10
11#include <common.h>
12#include <malloc.h>
13#include <net.h>
84f64c8b 14#include <netdev.h>
0b23fb36
IY
15#include <miiphy.h>
16#include "fec_mxc.h"
17
18#include <asm/arch/clock.h>
19#include <asm/arch/imx-regs.h>
20#include <asm/io.h>
21#include <asm/errno.h>
e2a66e60 22#include <linux/compiler.h>
0b23fb36
IY
23
24DECLARE_GLOBAL_DATA_PTR;
25
bc1ce150
MV
26/*
27 * Timeout the transfer after 5 mS. This is usually a bit more, since
28 * the code in the tightloops this timeout is used in adds some overhead.
29 */
30#define FEC_XFER_TIMEOUT 5000
31
db5b7f56
FE
32/*
33 * The standard 32-byte DMA alignment does not work on mx6solox, which requires
34 * 64-byte alignment in the DMA RX FEC buffer.
35 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
36 * satisfies the alignment on other SoCs (32-bytes)
37 */
38#define FEC_DMA_RX_MINALIGN 64
39
0b23fb36
IY
40#ifndef CONFIG_MII
41#error "CONFIG_MII has to be defined!"
42#endif
43
5c1ad3e6
EN
44#ifndef CONFIG_FEC_XCV_TYPE
45#define CONFIG_FEC_XCV_TYPE MII100
392b8502
MV
46#endif
47
be7e87e2
MV
48/*
49 * The i.MX28 operates with packets in big endian. We need to swap them before
50 * sending and after receiving.
51 */
5c1ad3e6
EN
52#ifdef CONFIG_MX28
53#define CONFIG_FEC_MXC_SWAP_PACKET
54#endif
55
56#define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
57
58/* Check various alignment issues at compile time */
59#if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
60#error "ARCH_DMA_MINALIGN must be multiple of 16!"
61#endif
62
63#if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
64 (PKTALIGN % ARCH_DMA_MINALIGN != 0))
65#error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
be7e87e2
MV
66#endif
67
0b23fb36
IY
68#undef DEBUG
69
70struct nbuf {
71 uint8_t data[1500]; /**< actual data */
72 int length; /**< actual length */
73 int used; /**< buffer in use or not */
74 uint8_t head[16]; /**< MAC header(6 + 6 + 2) + 2(aligned) */
75};
76
5c1ad3e6 77#ifdef CONFIG_FEC_MXC_SWAP_PACKET
be7e87e2
MV
78static void swap_packet(uint32_t *packet, int length)
79{
80 int i;
81
82 for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
83 packet[i] = __swab32(packet[i]);
84}
85#endif
86
0b23fb36
IY
87/*
88 * MII-interface related functions
89 */
13947f43
TK
90static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr,
91 uint8_t regAddr)
0b23fb36 92{
0b23fb36
IY
93 uint32_t reg; /* convenient holder for the PHY register */
94 uint32_t phy; /* convenient holder for the PHY */
95 uint32_t start;
13947f43 96 int val;
0b23fb36
IY
97
98 /*
99 * reading from any PHY's register is done by properly
100 * programming the FEC's MII data register.
101 */
d133b881 102 writel(FEC_IEVENT_MII, &eth->ievent);
0b23fb36
IY
103 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
104 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
105
106 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
d133b881 107 phy | reg, &eth->mii_data);
0b23fb36
IY
108
109 /*
110 * wait for the related interrupt
111 */
a60d1e5b 112 start = get_timer(0);
d133b881 113 while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
0b23fb36
IY
114 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
115 printf("Read MDIO failed...\n");
116 return -1;
117 }
118 }
119
120 /*
121 * clear mii interrupt bit
122 */
d133b881 123 writel(FEC_IEVENT_MII, &eth->ievent);
0b23fb36
IY
124
125 /*
126 * it's now safe to read the PHY's register
127 */
13947f43
TK
128 val = (unsigned short)readl(&eth->mii_data);
129 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
130 regAddr, val);
131 return val;
0b23fb36
IY
132}
133
575c5cc0 134static void fec_mii_setspeed(struct ethernet_regs *eth)
4294b248
SB
135{
136 /*
137 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
138 * and do not drop the Preamble.
139 */
6ba45cc0
MN
140 register u32 speed = DIV_ROUND_UP(imx_get_fecclk(), 5000000);
141#ifdef FEC_QUIRK_ENET_MAC
142 speed--;
143#endif
144 speed <<= 1;
145 writel(speed, &eth->mii_speed);
575c5cc0 146 debug("%s: mii_speed %08x\n", __func__, readl(&eth->mii_speed));
4294b248 147}
0b23fb36 148
13947f43
TK
149static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr,
150 uint8_t regAddr, uint16_t data)
151{
0b23fb36
IY
152 uint32_t reg; /* convenient holder for the PHY register */
153 uint32_t phy; /* convenient holder for the PHY */
154 uint32_t start;
155
156 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
157 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
158
159 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
d133b881 160 FEC_MII_DATA_TA | phy | reg | data, &eth->mii_data);
0b23fb36
IY
161
162 /*
163 * wait for the MII interrupt
164 */
a60d1e5b 165 start = get_timer(0);
d133b881 166 while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
0b23fb36
IY
167 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
168 printf("Write MDIO failed...\n");
169 return -1;
170 }
171 }
172
173 /*
174 * clear MII interrupt bit
175 */
d133b881 176 writel(FEC_IEVENT_MII, &eth->ievent);
13947f43 177 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
0b23fb36
IY
178 regAddr, data);
179
180 return 0;
181}
182
84f64c8b
JH
183static int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr,
184 int regAddr)
13947f43
TK
185{
186 return fec_mdio_read(bus->priv, phyAddr, regAddr);
187}
188
84f64c8b
JH
189static int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr,
190 int regAddr, u16 data)
13947f43
TK
191{
192 return fec_mdio_write(bus->priv, phyAddr, regAddr, data);
193}
194
195#ifndef CONFIG_PHYLIB
0b23fb36
IY
196static int miiphy_restart_aneg(struct eth_device *dev)
197{
b774fe9d
SB
198 int ret = 0;
199#if !defined(CONFIG_FEC_MXC_NO_ANEG)
9e27e9dc 200 struct fec_priv *fec = (struct fec_priv *)dev->priv;
13947f43 201 struct ethernet_regs *eth = fec->bus->priv;
9e27e9dc 202
0b23fb36
IY
203 /*
204 * Wake up from sleep if necessary
205 * Reset PHY, then delay 300ns
206 */
cb17b92d 207#ifdef CONFIG_MX27
13947f43 208 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
cb17b92d 209#endif
13947f43 210 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
0b23fb36
IY
211 udelay(1000);
212
213 /*
214 * Set the auto-negotiation advertisement register bits
215 */
13947f43 216 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
8ef583a0
MF
217 LPA_100FULL | LPA_100HALF | LPA_10FULL |
218 LPA_10HALF | PHY_ANLPAR_PSB_802_3);
13947f43 219 fec_mdio_write(eth, fec->phy_id, MII_BMCR,
8ef583a0 220 BMCR_ANENABLE | BMCR_ANRESTART);
2e5f4421
MV
221
222 if (fec->mii_postcall)
223 ret = fec->mii_postcall(fec->phy_id);
224
b774fe9d 225#endif
2e5f4421 226 return ret;
0b23fb36
IY
227}
228
229static int miiphy_wait_aneg(struct eth_device *dev)
230{
231 uint32_t start;
13947f43 232 int status;
9e27e9dc 233 struct fec_priv *fec = (struct fec_priv *)dev->priv;
13947f43 234 struct ethernet_regs *eth = fec->bus->priv;
0b23fb36
IY
235
236 /*
237 * Wait for AN completion
238 */
a60d1e5b 239 start = get_timer(0);
0b23fb36
IY
240 do {
241 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
242 printf("%s: Autonegotiation timeout\n", dev->name);
243 return -1;
244 }
245
13947f43
TK
246 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
247 if (status < 0) {
248 printf("%s: Autonegotiation failed. status: %d\n",
0b23fb36
IY
249 dev->name, status);
250 return -1;
251 }
8ef583a0 252 } while (!(status & BMSR_LSTATUS));
0b23fb36
IY
253
254 return 0;
255}
13947f43
TK
256#endif
257
0b23fb36
IY
258static int fec_rx_task_enable(struct fec_priv *fec)
259{
c0b5a3bb 260 writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active);
0b23fb36
IY
261 return 0;
262}
263
264static int fec_rx_task_disable(struct fec_priv *fec)
265{
266 return 0;
267}
268
269static int fec_tx_task_enable(struct fec_priv *fec)
270{
c0b5a3bb 271 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
0b23fb36
IY
272 return 0;
273}
274
275static int fec_tx_task_disable(struct fec_priv *fec)
276{
277 return 0;
278}
279
280/**
281 * Initialize receive task's buffer descriptors
282 * @param[in] fec all we know about the device yet
283 * @param[in] count receive buffer count to be allocated
5c1ad3e6 284 * @param[in] dsize desired size of each receive buffer
0b23fb36
IY
285 * @return 0 on success
286 *
79e5f27b 287 * Init all RX descriptors to default values.
0b23fb36 288 */
79e5f27b 289static void fec_rbd_init(struct fec_priv *fec, int count, int dsize)
0b23fb36 290{
5c1ad3e6 291 uint32_t size;
79e5f27b 292 uint8_t *data;
5c1ad3e6
EN
293 int i;
294
0b23fb36 295 /*
79e5f27b
MV
296 * Reload the RX descriptors with default values and wipe
297 * the RX buffers.
0b23fb36 298 */
5c1ad3e6
EN
299 size = roundup(dsize, ARCH_DMA_MINALIGN);
300 for (i = 0; i < count; i++) {
79e5f27b
MV
301 data = (uint8_t *)fec->rbd_base[i].data_pointer;
302 memset(data, 0, dsize);
303 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
304
305 fec->rbd_base[i].status = FEC_RBD_EMPTY;
306 fec->rbd_base[i].data_length = 0;
5c1ad3e6
EN
307 }
308
309 /* Mark the last RBD to close the ring. */
79e5f27b 310 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
0b23fb36
IY
311 fec->rbd_index = 0;
312
79e5f27b
MV
313 flush_dcache_range((unsigned)fec->rbd_base,
314 (unsigned)fec->rbd_base + size);
0b23fb36
IY
315}
316
317/**
318 * Initialize transmit task's buffer descriptors
319 * @param[in] fec all we know about the device yet
320 *
321 * Transmit buffers are created externally. We only have to init the BDs here.\n
322 * Note: There is a race condition in the hardware. When only one BD is in
323 * use it must be marked with the WRAP bit to use it for every transmitt.
324 * This bit in combination with the READY bit results into double transmit
325 * of each data buffer. It seems the state machine checks READY earlier then
326 * resetting it after the first transfer.
327 * Using two BDs solves this issue.
328 */
329static void fec_tbd_init(struct fec_priv *fec)
330{
5c1ad3e6
EN
331 unsigned addr = (unsigned)fec->tbd_base;
332 unsigned size = roundup(2 * sizeof(struct fec_bd),
333 ARCH_DMA_MINALIGN);
79e5f27b
MV
334
335 memset(fec->tbd_base, 0, size);
336 fec->tbd_base[0].status = 0;
337 fec->tbd_base[1].status = FEC_TBD_WRAP;
0b23fb36 338 fec->tbd_index = 0;
79e5f27b 339 flush_dcache_range(addr, addr + size);
0b23fb36
IY
340}
341
342/**
343 * Mark the given read buffer descriptor as free
344 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
345 * @param[in] pRbd buffer descriptor to mark free again
346 */
347static void fec_rbd_clean(int last, struct fec_bd *pRbd)
348{
5c1ad3e6 349 unsigned short flags = FEC_RBD_EMPTY;
0b23fb36 350 if (last)
5c1ad3e6
EN
351 flags |= FEC_RBD_WRAP;
352 writew(flags, &pRbd->status);
0b23fb36
IY
353 writew(0, &pRbd->data_length);
354}
355
be252b65
FE
356static int fec_get_hwaddr(struct eth_device *dev, int dev_id,
357 unsigned char *mac)
0b23fb36 358{
be252b65 359 imx_get_mac_from_fuse(dev_id, mac);
0adb5b76 360 return !is_valid_ethaddr(mac);
0b23fb36
IY
361}
362
4294b248 363static int fec_set_hwaddr(struct eth_device *dev)
0b23fb36 364{
4294b248 365 uchar *mac = dev->enetaddr;
0b23fb36
IY
366 struct fec_priv *fec = (struct fec_priv *)dev->priv;
367
368 writel(0, &fec->eth->iaddr1);
369 writel(0, &fec->eth->iaddr2);
370 writel(0, &fec->eth->gaddr1);
371 writel(0, &fec->eth->gaddr2);
372
373 /*
374 * Set physical address
375 */
376 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
377 &fec->eth->paddr1);
378 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
379
380 return 0;
381}
382
a5990b26
MV
383/*
384 * Do initial configuration of the FEC registers
385 */
386static void fec_reg_setup(struct fec_priv *fec)
387{
388 uint32_t rcntrl;
389
390 /*
391 * Set interrupt mask register
392 */
393 writel(0x00000000, &fec->eth->imask);
394
395 /*
396 * Clear FEC-Lite interrupt event register(IEVENT)
397 */
398 writel(0xffffffff, &fec->eth->ievent);
399
400
401 /*
402 * Set FEC-Lite receive control register(R_CNTRL):
403 */
404
405 /* Start with frame length = 1518, common for all modes. */
406 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
9d2d924a 407 if (fec->xcv_type != SEVENWIRE) /* xMII modes */
408 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
409 if (fec->xcv_type == RGMII)
a5990b26
MV
410 rcntrl |= FEC_RCNTRL_RGMII;
411 else if (fec->xcv_type == RMII)
412 rcntrl |= FEC_RCNTRL_RMII;
a5990b26
MV
413
414 writel(rcntrl, &fec->eth->r_cntrl);
415}
416
0b23fb36
IY
417/**
418 * Start the FEC engine
419 * @param[in] dev Our device to handle
420 */
421static int fec_open(struct eth_device *edev)
422{
423 struct fec_priv *fec = (struct fec_priv *)edev->priv;
28774cba 424 int speed;
5c1ad3e6
EN
425 uint32_t addr, size;
426 int i;
0b23fb36
IY
427
428 debug("fec_open: fec_open(dev)\n");
429 /* full-duplex, heartbeat disabled */
430 writel(1 << 2, &fec->eth->x_cntrl);
431 fec->rbd_index = 0;
432
5c1ad3e6
EN
433 /* Invalidate all descriptors */
434 for (i = 0; i < FEC_RBD_NUM - 1; i++)
435 fec_rbd_clean(0, &fec->rbd_base[i]);
436 fec_rbd_clean(1, &fec->rbd_base[i]);
437
438 /* Flush the descriptors into RAM */
439 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
440 ARCH_DMA_MINALIGN);
441 addr = (uint32_t)fec->rbd_base;
442 flush_dcache_range(addr, addr + size);
443
28774cba 444#ifdef FEC_QUIRK_ENET_MAC
2ef2b950
JL
445 /* Enable ENET HW endian SWAP */
446 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
447 &fec->eth->ecntrl);
448 /* Enable ENET store and forward mode */
449 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
450 &fec->eth->x_wmrk);
451#endif
0b23fb36
IY
452 /*
453 * Enable FEC-Lite controller
454 */
cb17b92d
JR
455 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
456 &fec->eth->ecntrl);
7df51fd8 457#if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
740d6ae5
JR
458 udelay(100);
459 /*
460 * setup the MII gasket for RMII mode
461 */
462
463 /* disable the gasket */
464 writew(0, &fec->eth->miigsk_enr);
465
466 /* wait for the gasket to be disabled */
467 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
468 udelay(2);
469
470 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */
471 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
472
473 /* re-enable the gasket */
474 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
475
476 /* wait until MII gasket is ready */
477 int max_loops = 10;
478 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
479 if (--max_loops <= 0) {
480 printf("WAIT for MII Gasket ready timed out\n");
481 break;
482 }
483 }
484#endif
0b23fb36 485
13947f43 486#ifdef CONFIG_PHYLIB
4dc27eed 487 {
13947f43 488 /* Start up the PHY */
11af8d65
TT
489 int ret = phy_startup(fec->phydev);
490
491 if (ret) {
492 printf("Could not initialize PHY %s\n",
493 fec->phydev->dev->name);
494 return ret;
495 }
13947f43 496 speed = fec->phydev->speed;
13947f43
TK
497 }
498#else
0b23fb36 499 miiphy_wait_aneg(edev);
28774cba 500 speed = miiphy_speed(edev->name, fec->phy_id);
9e27e9dc 501 miiphy_duplex(edev->name, fec->phy_id);
13947f43 502#endif
0b23fb36 503
28774cba
TK
504#ifdef FEC_QUIRK_ENET_MAC
505 {
506 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
bcb6e902 507 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
28774cba
TK
508 if (speed == _1000BASET)
509 ecr |= FEC_ECNTRL_SPEED;
510 else if (speed != _100BASET)
511 rcr |= FEC_RCNTRL_RMII_10T;
512 writel(ecr, &fec->eth->ecntrl);
513 writel(rcr, &fec->eth->r_cntrl);
514 }
515#endif
516 debug("%s:Speed=%i\n", __func__, speed);
517
0b23fb36
IY
518 /*
519 * Enable SmartDMA receive task
520 */
521 fec_rx_task_enable(fec);
522
523 udelay(100000);
524 return 0;
525}
526
527static int fec_init(struct eth_device *dev, bd_t* bd)
528{
0b23fb36 529 struct fec_priv *fec = (struct fec_priv *)dev->priv;
9e27e9dc 530 uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
79e5f27b 531 int i;
0b23fb36 532
e9319f11
JR
533 /* Initialize MAC address */
534 fec_set_hwaddr(dev);
535
0b23fb36 536 /*
79e5f27b 537 * Setup transmit descriptors, there are two in total.
0b23fb36 538 */
79e5f27b 539 fec_tbd_init(fec);
0b23fb36 540
79e5f27b
MV
541 /* Setup receive descriptors. */
542 fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE);
0b23fb36 543
a5990b26 544 fec_reg_setup(fec);
9eb3770b 545
f41471e6 546 if (fec->xcv_type != SEVENWIRE)
575c5cc0 547 fec_mii_setspeed(fec->bus->priv);
9eb3770b 548
0b23fb36
IY
549 /*
550 * Set Opcode/Pause Duration Register
551 */
552 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */
553 writel(0x2, &fec->eth->x_wmrk);
554 /*
555 * Set multicast address filter
556 */
557 writel(0x00000000, &fec->eth->gaddr1);
558 writel(0x00000000, &fec->eth->gaddr2);
559
560
561 /* clear MIB RAM */
9e27e9dc
MV
562 for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
563 writel(0, i);
0b23fb36
IY
564
565 /* FIFO receive start register */
566 writel(0x520, &fec->eth->r_fstart);
567
568 /* size and address of each buffer */
569 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
570 writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
571 writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
572
13947f43 573#ifndef CONFIG_PHYLIB
0b23fb36
IY
574 if (fec->xcv_type != SEVENWIRE)
575 miiphy_restart_aneg(dev);
13947f43 576#endif
0b23fb36
IY
577 fec_open(dev);
578 return 0;
579}
580
581/**
582 * Halt the FEC engine
583 * @param[in] dev Our device to handle
584 */
585static void fec_halt(struct eth_device *dev)
586{
9e27e9dc 587 struct fec_priv *fec = (struct fec_priv *)dev->priv;
0b23fb36
IY
588 int counter = 0xffff;
589
590 /*
591 * issue graceful stop command to the FEC transmitter if necessary
592 */
cb17b92d 593 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
0b23fb36
IY
594 &fec->eth->x_cntrl);
595
596 debug("eth_halt: wait for stop regs\n");
597 /*
598 * wait for graceful stop to register
599 */
600 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
cb17b92d 601 udelay(1);
0b23fb36
IY
602
603 /*
604 * Disable SmartDMA tasks
605 */
606 fec_tx_task_disable(fec);
607 fec_rx_task_disable(fec);
608
609 /*
610 * Disable the Ethernet Controller
611 * Note: this will also reset the BD index counter!
612 */
740d6ae5
JR
613 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
614 &fec->eth->ecntrl);
0b23fb36
IY
615 fec->rbd_index = 0;
616 fec->tbd_index = 0;
0b23fb36
IY
617 debug("eth_halt: done\n");
618}
619
620/**
621 * Transmit one frame
622 * @param[in] dev Our ethernet device to handle
623 * @param[in] packet Pointer to the data to be transmitted
624 * @param[in] length Data count in bytes
625 * @return 0 on success
626 */
442dac4c 627static int fec_send(struct eth_device *dev, void *packet, int length)
0b23fb36
IY
628{
629 unsigned int status;
efe24d2e 630 uint32_t size, end;
5c1ad3e6 631 uint32_t addr;
bc1ce150
MV
632 int timeout = FEC_XFER_TIMEOUT;
633 int ret = 0;
0b23fb36
IY
634
635 /*
636 * This routine transmits one frame. This routine only accepts
637 * 6-byte Ethernet addresses.
638 */
639 struct fec_priv *fec = (struct fec_priv *)dev->priv;
640
641 /*
642 * Check for valid length of data.
643 */
644 if ((length > 1500) || (length <= 0)) {
4294b248 645 printf("Payload (%d) too large\n", length);
0b23fb36
IY
646 return -1;
647 }
648
649 /*
5c1ad3e6
EN
650 * Setup the transmit buffer. We are always using the first buffer for
651 * transmission, the second will be empty and only used to stop the DMA
652 * engine. We also flush the packet to RAM here to avoid cache trouble.
0b23fb36 653 */
5c1ad3e6 654#ifdef CONFIG_FEC_MXC_SWAP_PACKET
be7e87e2
MV
655 swap_packet((uint32_t *)packet, length);
656#endif
5c1ad3e6
EN
657
658 addr = (uint32_t)packet;
efe24d2e
MV
659 end = roundup(addr + length, ARCH_DMA_MINALIGN);
660 addr &= ~(ARCH_DMA_MINALIGN - 1);
661 flush_dcache_range(addr, end);
5c1ad3e6 662
0b23fb36 663 writew(length, &fec->tbd_base[fec->tbd_index].data_length);
5c1ad3e6
EN
664 writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);
665
0b23fb36
IY
666 /*
667 * update BD's status now
668 * This block:
669 * - is always the last in a chain (means no chain)
670 * - should transmitt the CRC
671 * - might be the last BD in the list, so the address counter should
672 * wrap (-> keep the WRAP flag)
673 */
674 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
675 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
676 writew(status, &fec->tbd_base[fec->tbd_index].status);
677
5c1ad3e6
EN
678 /*
679 * Flush data cache. This code flushes both TX descriptors to RAM.
680 * After this code, the descriptors will be safely in RAM and we
681 * can start DMA.
682 */
683 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
684 addr = (uint32_t)fec->tbd_base;
685 flush_dcache_range(addr, addr + size);
686
ab94cd49
MV
687 /*
688 * Below we read the DMA descriptor's last four bytes back from the
689 * DRAM. This is important in order to make sure that all WRITE
690 * operations on the bus that were triggered by previous cache FLUSH
691 * have completed.
692 *
693 * Otherwise, on MX28, it is possible to observe a corruption of the
694 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
695 * for the bus structure of MX28. The scenario is as follows:
696 *
697 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
698 * to DRAM due to flush_dcache_range()
699 * 2) ARM core writes the FEC registers via AHB_ARB2
700 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
701 *
702 * Note that 2) does sometimes finish before 1) due to reordering of
703 * WRITE accesses on the AHB bus, therefore triggering 3) before the
704 * DMA descriptor is fully written into DRAM. This results in occasional
705 * corruption of the DMA descriptor.
706 */
707 readl(addr + size - 4);
708
0b23fb36
IY
709 /*
710 * Enable SmartDMA transmit task
711 */
712 fec_tx_task_enable(fec);
713
714 /*
5c1ad3e6
EN
715 * Wait until frame is sent. On each turn of the wait cycle, we must
716 * invalidate data cache to see what's really in RAM. Also, we need
717 * barrier here.
0b23fb36 718 */
67449098 719 while (--timeout) {
c0b5a3bb 720 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
bc1ce150 721 break;
0b23fb36 722 }
5c1ad3e6 723
f599288d 724 if (!timeout) {
67449098 725 ret = -EINVAL;
f599288d
FE
726 goto out;
727 }
728
729 /*
730 * The TDAR bit is cleared when the descriptors are all out from TX
731 * but on mx6solox we noticed that the READY bit is still not cleared
732 * right after TDAR.
733 * These are two distinct signals, and in IC simulation, we found that
734 * TDAR always gets cleared prior than the READY bit of last BD becomes
735 * cleared.
736 * In mx6solox, we use a later version of FEC IP. It looks like that
737 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
738 * version.
739 *
740 * Fix this by polling the READY bit of BD after the TDAR polling,
741 * which covers the mx6solox case and does not harm the other SoCs.
742 */
743 timeout = FEC_XFER_TIMEOUT;
744 while (--timeout) {
745 invalidate_dcache_range(addr, addr + size);
746 if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
747 FEC_TBD_READY))
748 break;
749 }
67449098 750
f599288d 751 if (!timeout)
67449098
MV
752 ret = -EINVAL;
753
f599288d 754out:
67449098 755 debug("fec_send: status 0x%x index %d ret %i\n",
0b23fb36 756 readw(&fec->tbd_base[fec->tbd_index].status),
67449098 757 fec->tbd_index, ret);
0b23fb36
IY
758 /* for next transmission use the other buffer */
759 if (fec->tbd_index)
760 fec->tbd_index = 0;
761 else
762 fec->tbd_index = 1;
763
bc1ce150 764 return ret;
0b23fb36
IY
765}
766
767/**
768 * Pull one frame from the card
769 * @param[in] dev Our ethernet device to handle
770 * @return Length of packet read
771 */
772static int fec_recv(struct eth_device *dev)
773{
774 struct fec_priv *fec = (struct fec_priv *)dev->priv;
775 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
776 unsigned long ievent;
777 int frame_length, len = 0;
778 struct nbuf *frame;
779 uint16_t bd_status;
efe24d2e 780 uint32_t addr, size, end;
5c1ad3e6 781 int i;
fd37f195 782 ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
0b23fb36
IY
783
784 /*
785 * Check if any critical events have happened
786 */
787 ievent = readl(&fec->eth->ievent);
788 writel(ievent, &fec->eth->ievent);
eda959f3 789 debug("fec_recv: ievent 0x%lx\n", ievent);
0b23fb36
IY
790 if (ievent & FEC_IEVENT_BABR) {
791 fec_halt(dev);
792 fec_init(dev, fec->bd);
793 printf("some error: 0x%08lx\n", ievent);
794 return 0;
795 }
796 if (ievent & FEC_IEVENT_HBERR) {
797 /* Heartbeat error */
798 writel(0x00000001 | readl(&fec->eth->x_cntrl),
799 &fec->eth->x_cntrl);
800 }
801 if (ievent & FEC_IEVENT_GRA) {
802 /* Graceful stop complete */
803 if (readl(&fec->eth->x_cntrl) & 0x00000001) {
804 fec_halt(dev);
805 writel(~0x00000001 & readl(&fec->eth->x_cntrl),
806 &fec->eth->x_cntrl);
807 fec_init(dev, fec->bd);
808 }
809 }
810
811 /*
5c1ad3e6
EN
812 * Read the buffer status. Before the status can be read, the data cache
813 * must be invalidated, because the data in RAM might have been changed
814 * by DMA. The descriptors are properly aligned to cachelines so there's
815 * no need to worry they'd overlap.
816 *
817 * WARNING: By invalidating the descriptor here, we also invalidate
818 * the descriptors surrounding this one. Therefore we can NOT change the
819 * contents of this descriptor nor the surrounding ones. The problem is
820 * that in order to mark the descriptor as processed, we need to change
821 * the descriptor. The solution is to mark the whole cache line when all
822 * descriptors in the cache line are processed.
0b23fb36 823 */
5c1ad3e6
EN
824 addr = (uint32_t)rbd;
825 addr &= ~(ARCH_DMA_MINALIGN - 1);
826 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
827 invalidate_dcache_range(addr, addr + size);
828
0b23fb36
IY
829 bd_status = readw(&rbd->status);
830 debug("fec_recv: status 0x%x\n", bd_status);
831
832 if (!(bd_status & FEC_RBD_EMPTY)) {
833 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
834 ((readw(&rbd->data_length) - 4) > 14)) {
835 /*
836 * Get buffer address and size
837 */
838 frame = (struct nbuf *)readl(&rbd->data_pointer);
839 frame_length = readw(&rbd->data_length) - 4;
5c1ad3e6
EN
840 /*
841 * Invalidate data cache over the buffer
842 */
843 addr = (uint32_t)frame;
efe24d2e
MV
844 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
845 addr &= ~(ARCH_DMA_MINALIGN - 1);
846 invalidate_dcache_range(addr, end);
5c1ad3e6 847
0b23fb36
IY
848 /*
849 * Fill the buffer and pass it to upper layers
850 */
5c1ad3e6 851#ifdef CONFIG_FEC_MXC_SWAP_PACKET
be7e87e2
MV
852 swap_packet((uint32_t *)frame->data, frame_length);
853#endif
0b23fb36 854 memcpy(buff, frame->data, frame_length);
1fd92db8 855 net_process_received_packet(buff, frame_length);
0b23fb36
IY
856 len = frame_length;
857 } else {
858 if (bd_status & FEC_RBD_ERR)
859 printf("error frame: 0x%08lx 0x%08x\n",
860 (ulong)rbd->data_pointer,
861 bd_status);
862 }
5c1ad3e6 863
0b23fb36 864 /*
5c1ad3e6
EN
865 * Free the current buffer, restart the engine and move forward
866 * to the next buffer. Here we check if the whole cacheline of
867 * descriptors was already processed and if so, we mark it free
868 * as whole.
0b23fb36 869 */
5c1ad3e6
EN
870 size = RXDESC_PER_CACHELINE - 1;
871 if ((fec->rbd_index & size) == size) {
872 i = fec->rbd_index - size;
873 addr = (uint32_t)&fec->rbd_base[i];
874 for (; i <= fec->rbd_index ; i++) {
875 fec_rbd_clean(i == (FEC_RBD_NUM - 1),
876 &fec->rbd_base[i]);
877 }
878 flush_dcache_range(addr,
879 addr + ARCH_DMA_MINALIGN);
880 }
881
0b23fb36
IY
882 fec_rx_task_enable(fec);
883 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
884 }
885 debug("fec_recv: stop\n");
886
887 return len;
888}
889
ef8e3a3b
TK
890static void fec_set_dev_name(char *dest, int dev_id)
891{
892 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
893}
894
79e5f27b
MV
895static int fec_alloc_descs(struct fec_priv *fec)
896{
897 unsigned int size;
898 int i;
899 uint8_t *data;
900
901 /* Allocate TX descriptors. */
902 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
903 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
904 if (!fec->tbd_base)
905 goto err_tx;
906
907 /* Allocate RX descriptors. */
908 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
909 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
910 if (!fec->rbd_base)
911 goto err_rx;
912
913 memset(fec->rbd_base, 0, size);
914
915 /* Allocate RX buffers. */
916
917 /* Maximum RX buffer size. */
db5b7f56 918 size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN);
79e5f27b 919 for (i = 0; i < FEC_RBD_NUM; i++) {
db5b7f56 920 data = memalign(FEC_DMA_RX_MINALIGN, size);
79e5f27b
MV
921 if (!data) {
922 printf("%s: error allocating rxbuf %d\n", __func__, i);
923 goto err_ring;
924 }
925
926 memset(data, 0, size);
927
928 fec->rbd_base[i].data_pointer = (uint32_t)data;
929 fec->rbd_base[i].status = FEC_RBD_EMPTY;
930 fec->rbd_base[i].data_length = 0;
931 /* Flush the buffer to memory. */
932 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
933 }
934
935 /* Mark the last RBD to close the ring. */
936 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
937
938 fec->rbd_index = 0;
939 fec->tbd_index = 0;
940
941 return 0;
942
943err_ring:
944 for (; i >= 0; i--)
945 free((void *)fec->rbd_base[i].data_pointer);
946 free(fec->rbd_base);
947err_rx:
948 free(fec->tbd_base);
949err_tx:
950 return -ENOMEM;
951}
952
953static void fec_free_descs(struct fec_priv *fec)
954{
955 int i;
956
957 for (i = 0; i < FEC_RBD_NUM; i++)
958 free((void *)fec->rbd_base[i].data_pointer);
959 free(fec->rbd_base);
960 free(fec->tbd_base);
961}
962
fe428b90
TK
963#ifdef CONFIG_PHYLIB
964int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
965 struct mii_dev *bus, struct phy_device *phydev)
966#else
967static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
968 struct mii_dev *bus, int phy_id)
969#endif
0b23fb36 970{
0b23fb36 971 struct eth_device *edev;
9e27e9dc 972 struct fec_priv *fec;
0b23fb36 973 unsigned char ethaddr[6];
e382fb48
MV
974 uint32_t start;
975 int ret = 0;
0b23fb36
IY
976
977 /* create and fill edev struct */
978 edev = (struct eth_device *)malloc(sizeof(struct eth_device));
979 if (!edev) {
9e27e9dc 980 puts("fec_mxc: not enough malloc memory for eth_device\n");
e382fb48
MV
981 ret = -ENOMEM;
982 goto err1;
9e27e9dc
MV
983 }
984
985 fec = (struct fec_priv *)malloc(sizeof(struct fec_priv));
986 if (!fec) {
987 puts("fec_mxc: not enough malloc memory for fec_priv\n");
e382fb48
MV
988 ret = -ENOMEM;
989 goto err2;
0b23fb36 990 }
9e27e9dc 991
de0b9576 992 memset(edev, 0, sizeof(*edev));
9e27e9dc
MV
993 memset(fec, 0, sizeof(*fec));
994
79e5f27b
MV
995 ret = fec_alloc_descs(fec);
996 if (ret)
997 goto err3;
998
0b23fb36
IY
999 edev->priv = fec;
1000 edev->init = fec_init;
1001 edev->send = fec_send;
1002 edev->recv = fec_recv;
1003 edev->halt = fec_halt;
fb57ec97 1004 edev->write_hwaddr = fec_set_hwaddr;
0b23fb36 1005
9e27e9dc 1006 fec->eth = (struct ethernet_regs *)base_addr;
0b23fb36
IY
1007 fec->bd = bd;
1008
392b8502 1009 fec->xcv_type = CONFIG_FEC_XCV_TYPE;
0b23fb36
IY
1010
1011 /* Reset chip. */
cb17b92d 1012 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
e382fb48
MV
1013 start = get_timer(0);
1014 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
1015 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
1016 printf("FEC MXC: Timeout reseting chip\n");
79e5f27b 1017 goto err4;
e382fb48 1018 }
0b23fb36 1019 udelay(10);
e382fb48 1020 }
0b23fb36 1021
a5990b26 1022 fec_reg_setup(fec);
ef8e3a3b
TK
1023 fec_set_dev_name(edev->name, dev_id);
1024 fec->dev_id = (dev_id == -1) ? 0 : dev_id;
fe428b90
TK
1025 fec->bus = bus;
1026 fec_mii_setspeed(bus->priv);
1027#ifdef CONFIG_PHYLIB
1028 fec->phydev = phydev;
1029 phy_connect_dev(phydev, edev);
1030 /* Configure phy */
1031 phy_config(phydev);
1032#else
9e27e9dc 1033 fec->phy_id = phy_id;
fe428b90
TK
1034#endif
1035 eth_register(edev);
1036
1037 if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) {
1038 debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr);
1039 memcpy(edev->enetaddr, ethaddr, 6);
ddb636bd
EN
1040 if (!getenv("ethaddr"))
1041 eth_setenv_enetaddr("ethaddr", ethaddr);
fe428b90
TK
1042 }
1043 return ret;
79e5f27b
MV
1044err4:
1045 fec_free_descs(fec);
fe428b90
TK
1046err3:
1047 free(fec);
1048err2:
1049 free(edev);
1050err1:
1051 return ret;
1052}
1053
1054struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id)
1055{
1056 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
1057 struct mii_dev *bus;
1058 int ret;
0b23fb36 1059
13947f43
TK
1060 bus = mdio_alloc();
1061 if (!bus) {
1062 printf("mdio_alloc failed\n");
fe428b90 1063 return NULL;
13947f43
TK
1064 }
1065 bus->read = fec_phy_read;
1066 bus->write = fec_phy_write;
fe428b90 1067 bus->priv = eth;
ef8e3a3b 1068 fec_set_dev_name(bus->name, dev_id);
fe428b90
TK
1069
1070 ret = mdio_register(bus);
1071 if (ret) {
1072 printf("mdio_register failed\n");
1073 free(bus);
1074 return NULL;
1075 }
1076 fec_mii_setspeed(eth);
1077 return bus;
1078}
1079
1080int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
1081{
1082 uint32_t base_mii;
1083 struct mii_dev *bus = NULL;
1084#ifdef CONFIG_PHYLIB
1085 struct phy_device *phydev = NULL;
1086#endif
1087 int ret;
1088
5c1ad3e6 1089#ifdef CONFIG_MX28
13947f43
TK
1090 /*
1091 * The i.MX28 has two ethernet interfaces, but they are not equal.
1092 * Only the first one can access the MDIO bus.
1093 */
fe428b90 1094 base_mii = MXS_ENET0_BASE;
13947f43 1095#else
fe428b90 1096 base_mii = addr;
13947f43 1097#endif
fe428b90
TK
1098 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
1099 bus = fec_get_miibus(base_mii, dev_id);
1100 if (!bus)
1101 return -ENOMEM;
4dc27eed 1102#ifdef CONFIG_PHYLIB
fe428b90 1103 phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII);
4dc27eed
TK
1104 if (!phydev) {
1105 free(bus);
fe428b90 1106 return -ENOMEM;
4dc27eed 1107 }
fe428b90
TK
1108 ret = fec_probe(bd, dev_id, addr, bus, phydev);
1109#else
1110 ret = fec_probe(bd, dev_id, addr, bus, phy_id);
4dc27eed 1111#endif
fe428b90
TK
1112 if (ret) {
1113#ifdef CONFIG_PHYLIB
1114 free(phydev);
1115#endif
1116 free(bus);
1117 }
e382fb48 1118 return ret;
eef24480 1119}
0b23fb36 1120
eef24480
TK
1121#ifdef CONFIG_FEC_MXC_PHYADDR
1122int fecmxc_initialize(bd_t *bd)
1123{
1124 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
1125 IMX_FEC_BASE);
0b23fb36 1126}
eef24480 1127#endif
2e5f4421 1128
13947f43 1129#ifndef CONFIG_PHYLIB
2e5f4421
MV
1130int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
1131{
1132 struct fec_priv *fec = (struct fec_priv *)dev->priv;
1133 fec->mii_postcall = cb;
1134 return 0;
1135}
13947f43 1136#endif