]> git.ipfire.org Git - people/ms/u-boot.git/blame - drivers/net/fec_mxc.c
lmb: make local functions static
[people/ms/u-boot.git] / drivers / net / fec_mxc.c
CommitLineData
0b23fb36
IY
1/*
2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4 * (C) Copyright 2008 Armadeus Systems nc
5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
7 *
1a459660 8 * SPDX-License-Identifier: GPL-2.0+
0b23fb36
IY
9 */
10
11#include <common.h>
12#include <malloc.h>
13#include <net.h>
14#include <miiphy.h>
15#include "fec_mxc.h"
16
17#include <asm/arch/clock.h>
18#include <asm/arch/imx-regs.h>
19#include <asm/io.h>
20#include <asm/errno.h>
e2a66e60 21#include <linux/compiler.h>
0b23fb36
IY
22
23DECLARE_GLOBAL_DATA_PTR;
24
bc1ce150
MV
25/*
26 * Timeout the transfer after 5 mS. This is usually a bit more, since
27 * the code in the tightloops this timeout is used in adds some overhead.
28 */
29#define FEC_XFER_TIMEOUT 5000
30
db5b7f56
FE
31/*
32 * The standard 32-byte DMA alignment does not work on mx6solox, which requires
33 * 64-byte alignment in the DMA RX FEC buffer.
34 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
35 * satisfies the alignment on other SoCs (32-bytes)
36 */
37#define FEC_DMA_RX_MINALIGN 64
38
0b23fb36
IY
39#ifndef CONFIG_MII
40#error "CONFIG_MII has to be defined!"
41#endif
42
5c1ad3e6
EN
43#ifndef CONFIG_FEC_XCV_TYPE
44#define CONFIG_FEC_XCV_TYPE MII100
392b8502
MV
45#endif
46
be7e87e2
MV
47/*
48 * The i.MX28 operates with packets in big endian. We need to swap them before
49 * sending and after receiving.
50 */
5c1ad3e6
EN
51#ifdef CONFIG_MX28
52#define CONFIG_FEC_MXC_SWAP_PACKET
53#endif
54
55#define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
56
57/* Check various alignment issues at compile time */
58#if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
59#error "ARCH_DMA_MINALIGN must be multiple of 16!"
60#endif
61
62#if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
63 (PKTALIGN % ARCH_DMA_MINALIGN != 0))
64#error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
be7e87e2
MV
65#endif
66
0b23fb36
IY
67#undef DEBUG
68
69struct nbuf {
70 uint8_t data[1500]; /**< actual data */
71 int length; /**< actual length */
72 int used; /**< buffer in use or not */
73 uint8_t head[16]; /**< MAC header(6 + 6 + 2) + 2(aligned) */
74};
75
5c1ad3e6 76#ifdef CONFIG_FEC_MXC_SWAP_PACKET
be7e87e2
MV
77static void swap_packet(uint32_t *packet, int length)
78{
79 int i;
80
81 for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
82 packet[i] = __swab32(packet[i]);
83}
84#endif
85
0b23fb36
IY
86/*
87 * MII-interface related functions
88 */
13947f43
TK
89static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr,
90 uint8_t regAddr)
0b23fb36 91{
0b23fb36
IY
92 uint32_t reg; /* convenient holder for the PHY register */
93 uint32_t phy; /* convenient holder for the PHY */
94 uint32_t start;
13947f43 95 int val;
0b23fb36
IY
96
97 /*
98 * reading from any PHY's register is done by properly
99 * programming the FEC's MII data register.
100 */
d133b881 101 writel(FEC_IEVENT_MII, &eth->ievent);
0b23fb36
IY
102 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
103 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
104
105 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
d133b881 106 phy | reg, &eth->mii_data);
0b23fb36
IY
107
108 /*
109 * wait for the related interrupt
110 */
a60d1e5b 111 start = get_timer(0);
d133b881 112 while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
0b23fb36
IY
113 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
114 printf("Read MDIO failed...\n");
115 return -1;
116 }
117 }
118
119 /*
120 * clear mii interrupt bit
121 */
d133b881 122 writel(FEC_IEVENT_MII, &eth->ievent);
0b23fb36
IY
123
124 /*
125 * it's now safe to read the PHY's register
126 */
13947f43
TK
127 val = (unsigned short)readl(&eth->mii_data);
128 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
129 regAddr, val);
130 return val;
0b23fb36
IY
131}
132
575c5cc0 133static void fec_mii_setspeed(struct ethernet_regs *eth)
4294b248
SB
134{
135 /*
136 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
137 * and do not drop the Preamble.
138 */
6ba45cc0
MN
139 register u32 speed = DIV_ROUND_UP(imx_get_fecclk(), 5000000);
140#ifdef FEC_QUIRK_ENET_MAC
141 speed--;
142#endif
143 speed <<= 1;
144 writel(speed, &eth->mii_speed);
575c5cc0 145 debug("%s: mii_speed %08x\n", __func__, readl(&eth->mii_speed));
4294b248 146}
0b23fb36 147
13947f43
TK
148static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr,
149 uint8_t regAddr, uint16_t data)
150{
0b23fb36
IY
151 uint32_t reg; /* convenient holder for the PHY register */
152 uint32_t phy; /* convenient holder for the PHY */
153 uint32_t start;
154
155 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
156 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
157
158 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
d133b881 159 FEC_MII_DATA_TA | phy | reg | data, &eth->mii_data);
0b23fb36
IY
160
161 /*
162 * wait for the MII interrupt
163 */
a60d1e5b 164 start = get_timer(0);
d133b881 165 while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
0b23fb36
IY
166 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
167 printf("Write MDIO failed...\n");
168 return -1;
169 }
170 }
171
172 /*
173 * clear MII interrupt bit
174 */
d133b881 175 writel(FEC_IEVENT_MII, &eth->ievent);
13947f43 176 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
0b23fb36
IY
177 regAddr, data);
178
179 return 0;
180}
181
13947f43
TK
182int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr)
183{
184 return fec_mdio_read(bus->priv, phyAddr, regAddr);
185}
186
187int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr, int regAddr,
188 u16 data)
189{
190 return fec_mdio_write(bus->priv, phyAddr, regAddr, data);
191}
192
193#ifndef CONFIG_PHYLIB
0b23fb36
IY
194static int miiphy_restart_aneg(struct eth_device *dev)
195{
b774fe9d
SB
196 int ret = 0;
197#if !defined(CONFIG_FEC_MXC_NO_ANEG)
9e27e9dc 198 struct fec_priv *fec = (struct fec_priv *)dev->priv;
13947f43 199 struct ethernet_regs *eth = fec->bus->priv;
9e27e9dc 200
0b23fb36
IY
201 /*
202 * Wake up from sleep if necessary
203 * Reset PHY, then delay 300ns
204 */
cb17b92d 205#ifdef CONFIG_MX27
13947f43 206 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
cb17b92d 207#endif
13947f43 208 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
0b23fb36
IY
209 udelay(1000);
210
211 /*
212 * Set the auto-negotiation advertisement register bits
213 */
13947f43 214 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
8ef583a0
MF
215 LPA_100FULL | LPA_100HALF | LPA_10FULL |
216 LPA_10HALF | PHY_ANLPAR_PSB_802_3);
13947f43 217 fec_mdio_write(eth, fec->phy_id, MII_BMCR,
8ef583a0 218 BMCR_ANENABLE | BMCR_ANRESTART);
2e5f4421
MV
219
220 if (fec->mii_postcall)
221 ret = fec->mii_postcall(fec->phy_id);
222
b774fe9d 223#endif
2e5f4421 224 return ret;
0b23fb36
IY
225}
226
227static int miiphy_wait_aneg(struct eth_device *dev)
228{
229 uint32_t start;
13947f43 230 int status;
9e27e9dc 231 struct fec_priv *fec = (struct fec_priv *)dev->priv;
13947f43 232 struct ethernet_regs *eth = fec->bus->priv;
0b23fb36
IY
233
234 /*
235 * Wait for AN completion
236 */
a60d1e5b 237 start = get_timer(0);
0b23fb36
IY
238 do {
239 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
240 printf("%s: Autonegotiation timeout\n", dev->name);
241 return -1;
242 }
243
13947f43
TK
244 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
245 if (status < 0) {
246 printf("%s: Autonegotiation failed. status: %d\n",
0b23fb36
IY
247 dev->name, status);
248 return -1;
249 }
8ef583a0 250 } while (!(status & BMSR_LSTATUS));
0b23fb36
IY
251
252 return 0;
253}
13947f43
TK
254#endif
255
0b23fb36
IY
256static int fec_rx_task_enable(struct fec_priv *fec)
257{
c0b5a3bb 258 writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active);
0b23fb36
IY
259 return 0;
260}
261
262static int fec_rx_task_disable(struct fec_priv *fec)
263{
264 return 0;
265}
266
267static int fec_tx_task_enable(struct fec_priv *fec)
268{
c0b5a3bb 269 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
0b23fb36
IY
270 return 0;
271}
272
273static int fec_tx_task_disable(struct fec_priv *fec)
274{
275 return 0;
276}
277
278/**
279 * Initialize receive task's buffer descriptors
280 * @param[in] fec all we know about the device yet
281 * @param[in] count receive buffer count to be allocated
5c1ad3e6 282 * @param[in] dsize desired size of each receive buffer
0b23fb36
IY
283 * @return 0 on success
284 *
79e5f27b 285 * Init all RX descriptors to default values.
0b23fb36 286 */
79e5f27b 287static void fec_rbd_init(struct fec_priv *fec, int count, int dsize)
0b23fb36 288{
5c1ad3e6 289 uint32_t size;
79e5f27b 290 uint8_t *data;
5c1ad3e6
EN
291 int i;
292
0b23fb36 293 /*
79e5f27b
MV
294 * Reload the RX descriptors with default values and wipe
295 * the RX buffers.
0b23fb36 296 */
5c1ad3e6
EN
297 size = roundup(dsize, ARCH_DMA_MINALIGN);
298 for (i = 0; i < count; i++) {
79e5f27b
MV
299 data = (uint8_t *)fec->rbd_base[i].data_pointer;
300 memset(data, 0, dsize);
301 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
302
303 fec->rbd_base[i].status = FEC_RBD_EMPTY;
304 fec->rbd_base[i].data_length = 0;
5c1ad3e6
EN
305 }
306
307 /* Mark the last RBD to close the ring. */
79e5f27b 308 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
0b23fb36
IY
309 fec->rbd_index = 0;
310
79e5f27b
MV
311 flush_dcache_range((unsigned)fec->rbd_base,
312 (unsigned)fec->rbd_base + size);
0b23fb36
IY
313}
314
315/**
316 * Initialize transmit task's buffer descriptors
317 * @param[in] fec all we know about the device yet
318 *
319 * Transmit buffers are created externally. We only have to init the BDs here.\n
320 * Note: There is a race condition in the hardware. When only one BD is in
321 * use it must be marked with the WRAP bit to use it for every transmitt.
322 * This bit in combination with the READY bit results into double transmit
323 * of each data buffer. It seems the state machine checks READY earlier then
324 * resetting it after the first transfer.
325 * Using two BDs solves this issue.
326 */
327static void fec_tbd_init(struct fec_priv *fec)
328{
5c1ad3e6
EN
329 unsigned addr = (unsigned)fec->tbd_base;
330 unsigned size = roundup(2 * sizeof(struct fec_bd),
331 ARCH_DMA_MINALIGN);
79e5f27b
MV
332
333 memset(fec->tbd_base, 0, size);
334 fec->tbd_base[0].status = 0;
335 fec->tbd_base[1].status = FEC_TBD_WRAP;
0b23fb36 336 fec->tbd_index = 0;
79e5f27b 337 flush_dcache_range(addr, addr + size);
0b23fb36
IY
338}
339
340/**
341 * Mark the given read buffer descriptor as free
342 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
343 * @param[in] pRbd buffer descriptor to mark free again
344 */
345static void fec_rbd_clean(int last, struct fec_bd *pRbd)
346{
5c1ad3e6 347 unsigned short flags = FEC_RBD_EMPTY;
0b23fb36 348 if (last)
5c1ad3e6
EN
349 flags |= FEC_RBD_WRAP;
350 writew(flags, &pRbd->status);
0b23fb36
IY
351 writew(0, &pRbd->data_length);
352}
353
be252b65
FE
354static int fec_get_hwaddr(struct eth_device *dev, int dev_id,
355 unsigned char *mac)
0b23fb36 356{
be252b65 357 imx_get_mac_from_fuse(dev_id, mac);
2e236bf2 358 return !is_valid_ether_addr(mac);
0b23fb36
IY
359}
360
4294b248 361static int fec_set_hwaddr(struct eth_device *dev)
0b23fb36 362{
4294b248 363 uchar *mac = dev->enetaddr;
0b23fb36
IY
364 struct fec_priv *fec = (struct fec_priv *)dev->priv;
365
366 writel(0, &fec->eth->iaddr1);
367 writel(0, &fec->eth->iaddr2);
368 writel(0, &fec->eth->gaddr1);
369 writel(0, &fec->eth->gaddr2);
370
371 /*
372 * Set physical address
373 */
374 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
375 &fec->eth->paddr1);
376 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
377
378 return 0;
379}
380
a5990b26
MV
381/*
382 * Do initial configuration of the FEC registers
383 */
384static void fec_reg_setup(struct fec_priv *fec)
385{
386 uint32_t rcntrl;
387
388 /*
389 * Set interrupt mask register
390 */
391 writel(0x00000000, &fec->eth->imask);
392
393 /*
394 * Clear FEC-Lite interrupt event register(IEVENT)
395 */
396 writel(0xffffffff, &fec->eth->ievent);
397
398
399 /*
400 * Set FEC-Lite receive control register(R_CNTRL):
401 */
402
403 /* Start with frame length = 1518, common for all modes. */
404 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
9d2d924a 405 if (fec->xcv_type != SEVENWIRE) /* xMII modes */
406 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
407 if (fec->xcv_type == RGMII)
a5990b26
MV
408 rcntrl |= FEC_RCNTRL_RGMII;
409 else if (fec->xcv_type == RMII)
410 rcntrl |= FEC_RCNTRL_RMII;
a5990b26
MV
411
412 writel(rcntrl, &fec->eth->r_cntrl);
413}
414
0b23fb36
IY
415/**
416 * Start the FEC engine
417 * @param[in] dev Our device to handle
418 */
419static int fec_open(struct eth_device *edev)
420{
421 struct fec_priv *fec = (struct fec_priv *)edev->priv;
28774cba 422 int speed;
5c1ad3e6
EN
423 uint32_t addr, size;
424 int i;
0b23fb36
IY
425
426 debug("fec_open: fec_open(dev)\n");
427 /* full-duplex, heartbeat disabled */
428 writel(1 << 2, &fec->eth->x_cntrl);
429 fec->rbd_index = 0;
430
5c1ad3e6
EN
431 /* Invalidate all descriptors */
432 for (i = 0; i < FEC_RBD_NUM - 1; i++)
433 fec_rbd_clean(0, &fec->rbd_base[i]);
434 fec_rbd_clean(1, &fec->rbd_base[i]);
435
436 /* Flush the descriptors into RAM */
437 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
438 ARCH_DMA_MINALIGN);
439 addr = (uint32_t)fec->rbd_base;
440 flush_dcache_range(addr, addr + size);
441
28774cba 442#ifdef FEC_QUIRK_ENET_MAC
2ef2b950
JL
443 /* Enable ENET HW endian SWAP */
444 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
445 &fec->eth->ecntrl);
446 /* Enable ENET store and forward mode */
447 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
448 &fec->eth->x_wmrk);
449#endif
0b23fb36
IY
450 /*
451 * Enable FEC-Lite controller
452 */
cb17b92d
JR
453 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
454 &fec->eth->ecntrl);
7df51fd8 455#if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
740d6ae5
JR
456 udelay(100);
457 /*
458 * setup the MII gasket for RMII mode
459 */
460
461 /* disable the gasket */
462 writew(0, &fec->eth->miigsk_enr);
463
464 /* wait for the gasket to be disabled */
465 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
466 udelay(2);
467
468 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */
469 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
470
471 /* re-enable the gasket */
472 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
473
474 /* wait until MII gasket is ready */
475 int max_loops = 10;
476 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
477 if (--max_loops <= 0) {
478 printf("WAIT for MII Gasket ready timed out\n");
479 break;
480 }
481 }
482#endif
0b23fb36 483
13947f43 484#ifdef CONFIG_PHYLIB
4dc27eed 485 {
13947f43 486 /* Start up the PHY */
11af8d65
TT
487 int ret = phy_startup(fec->phydev);
488
489 if (ret) {
490 printf("Could not initialize PHY %s\n",
491 fec->phydev->dev->name);
492 return ret;
493 }
13947f43 494 speed = fec->phydev->speed;
13947f43
TK
495 }
496#else
0b23fb36 497 miiphy_wait_aneg(edev);
28774cba 498 speed = miiphy_speed(edev->name, fec->phy_id);
9e27e9dc 499 miiphy_duplex(edev->name, fec->phy_id);
13947f43 500#endif
0b23fb36 501
28774cba
TK
502#ifdef FEC_QUIRK_ENET_MAC
503 {
504 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
bcb6e902 505 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
28774cba
TK
506 if (speed == _1000BASET)
507 ecr |= FEC_ECNTRL_SPEED;
508 else if (speed != _100BASET)
509 rcr |= FEC_RCNTRL_RMII_10T;
510 writel(ecr, &fec->eth->ecntrl);
511 writel(rcr, &fec->eth->r_cntrl);
512 }
513#endif
514 debug("%s:Speed=%i\n", __func__, speed);
515
0b23fb36
IY
516 /*
517 * Enable SmartDMA receive task
518 */
519 fec_rx_task_enable(fec);
520
521 udelay(100000);
522 return 0;
523}
524
525static int fec_init(struct eth_device *dev, bd_t* bd)
526{
0b23fb36 527 struct fec_priv *fec = (struct fec_priv *)dev->priv;
9e27e9dc 528 uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
79e5f27b 529 int i;
0b23fb36 530
e9319f11
JR
531 /* Initialize MAC address */
532 fec_set_hwaddr(dev);
533
0b23fb36 534 /*
79e5f27b 535 * Setup transmit descriptors, there are two in total.
0b23fb36 536 */
79e5f27b 537 fec_tbd_init(fec);
0b23fb36 538
79e5f27b
MV
539 /* Setup receive descriptors. */
540 fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE);
0b23fb36 541
a5990b26 542 fec_reg_setup(fec);
9eb3770b 543
f41471e6 544 if (fec->xcv_type != SEVENWIRE)
575c5cc0 545 fec_mii_setspeed(fec->bus->priv);
9eb3770b 546
0b23fb36
IY
547 /*
548 * Set Opcode/Pause Duration Register
549 */
550 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */
551 writel(0x2, &fec->eth->x_wmrk);
552 /*
553 * Set multicast address filter
554 */
555 writel(0x00000000, &fec->eth->gaddr1);
556 writel(0x00000000, &fec->eth->gaddr2);
557
558
559 /* clear MIB RAM */
9e27e9dc
MV
560 for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
561 writel(0, i);
0b23fb36
IY
562
563 /* FIFO receive start register */
564 writel(0x520, &fec->eth->r_fstart);
565
566 /* size and address of each buffer */
567 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
568 writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
569 writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
570
13947f43 571#ifndef CONFIG_PHYLIB
0b23fb36
IY
572 if (fec->xcv_type != SEVENWIRE)
573 miiphy_restart_aneg(dev);
13947f43 574#endif
0b23fb36
IY
575 fec_open(dev);
576 return 0;
577}
578
579/**
580 * Halt the FEC engine
581 * @param[in] dev Our device to handle
582 */
583static void fec_halt(struct eth_device *dev)
584{
9e27e9dc 585 struct fec_priv *fec = (struct fec_priv *)dev->priv;
0b23fb36
IY
586 int counter = 0xffff;
587
588 /*
589 * issue graceful stop command to the FEC transmitter if necessary
590 */
cb17b92d 591 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
0b23fb36
IY
592 &fec->eth->x_cntrl);
593
594 debug("eth_halt: wait for stop regs\n");
595 /*
596 * wait for graceful stop to register
597 */
598 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
cb17b92d 599 udelay(1);
0b23fb36
IY
600
601 /*
602 * Disable SmartDMA tasks
603 */
604 fec_tx_task_disable(fec);
605 fec_rx_task_disable(fec);
606
607 /*
608 * Disable the Ethernet Controller
609 * Note: this will also reset the BD index counter!
610 */
740d6ae5
JR
611 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
612 &fec->eth->ecntrl);
0b23fb36
IY
613 fec->rbd_index = 0;
614 fec->tbd_index = 0;
0b23fb36
IY
615 debug("eth_halt: done\n");
616}
617
618/**
619 * Transmit one frame
620 * @param[in] dev Our ethernet device to handle
621 * @param[in] packet Pointer to the data to be transmitted
622 * @param[in] length Data count in bytes
623 * @return 0 on success
624 */
442dac4c 625static int fec_send(struct eth_device *dev, void *packet, int length)
0b23fb36
IY
626{
627 unsigned int status;
efe24d2e 628 uint32_t size, end;
5c1ad3e6 629 uint32_t addr;
bc1ce150
MV
630 int timeout = FEC_XFER_TIMEOUT;
631 int ret = 0;
0b23fb36
IY
632
633 /*
634 * This routine transmits one frame. This routine only accepts
635 * 6-byte Ethernet addresses.
636 */
637 struct fec_priv *fec = (struct fec_priv *)dev->priv;
638
639 /*
640 * Check for valid length of data.
641 */
642 if ((length > 1500) || (length <= 0)) {
4294b248 643 printf("Payload (%d) too large\n", length);
0b23fb36
IY
644 return -1;
645 }
646
647 /*
5c1ad3e6
EN
648 * Setup the transmit buffer. We are always using the first buffer for
649 * transmission, the second will be empty and only used to stop the DMA
650 * engine. We also flush the packet to RAM here to avoid cache trouble.
0b23fb36 651 */
5c1ad3e6 652#ifdef CONFIG_FEC_MXC_SWAP_PACKET
be7e87e2
MV
653 swap_packet((uint32_t *)packet, length);
654#endif
5c1ad3e6
EN
655
656 addr = (uint32_t)packet;
efe24d2e
MV
657 end = roundup(addr + length, ARCH_DMA_MINALIGN);
658 addr &= ~(ARCH_DMA_MINALIGN - 1);
659 flush_dcache_range(addr, end);
5c1ad3e6 660
0b23fb36 661 writew(length, &fec->tbd_base[fec->tbd_index].data_length);
5c1ad3e6
EN
662 writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);
663
0b23fb36
IY
664 /*
665 * update BD's status now
666 * This block:
667 * - is always the last in a chain (means no chain)
668 * - should transmitt the CRC
669 * - might be the last BD in the list, so the address counter should
670 * wrap (-> keep the WRAP flag)
671 */
672 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
673 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
674 writew(status, &fec->tbd_base[fec->tbd_index].status);
675
5c1ad3e6
EN
676 /*
677 * Flush data cache. This code flushes both TX descriptors to RAM.
678 * After this code, the descriptors will be safely in RAM and we
679 * can start DMA.
680 */
681 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
682 addr = (uint32_t)fec->tbd_base;
683 flush_dcache_range(addr, addr + size);
684
ab94cd49
MV
685 /*
686 * Below we read the DMA descriptor's last four bytes back from the
687 * DRAM. This is important in order to make sure that all WRITE
688 * operations on the bus that were triggered by previous cache FLUSH
689 * have completed.
690 *
691 * Otherwise, on MX28, it is possible to observe a corruption of the
692 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
693 * for the bus structure of MX28. The scenario is as follows:
694 *
695 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
696 * to DRAM due to flush_dcache_range()
697 * 2) ARM core writes the FEC registers via AHB_ARB2
698 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
699 *
700 * Note that 2) does sometimes finish before 1) due to reordering of
701 * WRITE accesses on the AHB bus, therefore triggering 3) before the
702 * DMA descriptor is fully written into DRAM. This results in occasional
703 * corruption of the DMA descriptor.
704 */
705 readl(addr + size - 4);
706
0b23fb36
IY
707 /*
708 * Enable SmartDMA transmit task
709 */
710 fec_tx_task_enable(fec);
711
712 /*
5c1ad3e6
EN
713 * Wait until frame is sent. On each turn of the wait cycle, we must
714 * invalidate data cache to see what's really in RAM. Also, we need
715 * barrier here.
0b23fb36 716 */
67449098 717 while (--timeout) {
c0b5a3bb 718 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
bc1ce150 719 break;
0b23fb36 720 }
5c1ad3e6 721
f599288d 722 if (!timeout) {
67449098 723 ret = -EINVAL;
f599288d
FE
724 goto out;
725 }
726
727 /*
728 * The TDAR bit is cleared when the descriptors are all out from TX
729 * but on mx6solox we noticed that the READY bit is still not cleared
730 * right after TDAR.
731 * These are two distinct signals, and in IC simulation, we found that
732 * TDAR always gets cleared prior than the READY bit of last BD becomes
733 * cleared.
734 * In mx6solox, we use a later version of FEC IP. It looks like that
735 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
736 * version.
737 *
738 * Fix this by polling the READY bit of BD after the TDAR polling,
739 * which covers the mx6solox case and does not harm the other SoCs.
740 */
741 timeout = FEC_XFER_TIMEOUT;
742 while (--timeout) {
743 invalidate_dcache_range(addr, addr + size);
744 if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
745 FEC_TBD_READY))
746 break;
747 }
67449098 748
f599288d 749 if (!timeout)
67449098
MV
750 ret = -EINVAL;
751
f599288d 752out:
67449098 753 debug("fec_send: status 0x%x index %d ret %i\n",
0b23fb36 754 readw(&fec->tbd_base[fec->tbd_index].status),
67449098 755 fec->tbd_index, ret);
0b23fb36
IY
756 /* for next transmission use the other buffer */
757 if (fec->tbd_index)
758 fec->tbd_index = 0;
759 else
760 fec->tbd_index = 1;
761
bc1ce150 762 return ret;
0b23fb36
IY
763}
764
765/**
766 * Pull one frame from the card
767 * @param[in] dev Our ethernet device to handle
768 * @return Length of packet read
769 */
770static int fec_recv(struct eth_device *dev)
771{
772 struct fec_priv *fec = (struct fec_priv *)dev->priv;
773 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
774 unsigned long ievent;
775 int frame_length, len = 0;
776 struct nbuf *frame;
777 uint16_t bd_status;
efe24d2e 778 uint32_t addr, size, end;
5c1ad3e6 779 int i;
fd37f195 780 ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
0b23fb36
IY
781
782 /*
783 * Check if any critical events have happened
784 */
785 ievent = readl(&fec->eth->ievent);
786 writel(ievent, &fec->eth->ievent);
eda959f3 787 debug("fec_recv: ievent 0x%lx\n", ievent);
0b23fb36
IY
788 if (ievent & FEC_IEVENT_BABR) {
789 fec_halt(dev);
790 fec_init(dev, fec->bd);
791 printf("some error: 0x%08lx\n", ievent);
792 return 0;
793 }
794 if (ievent & FEC_IEVENT_HBERR) {
795 /* Heartbeat error */
796 writel(0x00000001 | readl(&fec->eth->x_cntrl),
797 &fec->eth->x_cntrl);
798 }
799 if (ievent & FEC_IEVENT_GRA) {
800 /* Graceful stop complete */
801 if (readl(&fec->eth->x_cntrl) & 0x00000001) {
802 fec_halt(dev);
803 writel(~0x00000001 & readl(&fec->eth->x_cntrl),
804 &fec->eth->x_cntrl);
805 fec_init(dev, fec->bd);
806 }
807 }
808
809 /*
5c1ad3e6
EN
810 * Read the buffer status. Before the status can be read, the data cache
811 * must be invalidated, because the data in RAM might have been changed
812 * by DMA. The descriptors are properly aligned to cachelines so there's
813 * no need to worry they'd overlap.
814 *
815 * WARNING: By invalidating the descriptor here, we also invalidate
816 * the descriptors surrounding this one. Therefore we can NOT change the
817 * contents of this descriptor nor the surrounding ones. The problem is
818 * that in order to mark the descriptor as processed, we need to change
819 * the descriptor. The solution is to mark the whole cache line when all
820 * descriptors in the cache line are processed.
0b23fb36 821 */
5c1ad3e6
EN
822 addr = (uint32_t)rbd;
823 addr &= ~(ARCH_DMA_MINALIGN - 1);
824 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
825 invalidate_dcache_range(addr, addr + size);
826
0b23fb36
IY
827 bd_status = readw(&rbd->status);
828 debug("fec_recv: status 0x%x\n", bd_status);
829
830 if (!(bd_status & FEC_RBD_EMPTY)) {
831 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
832 ((readw(&rbd->data_length) - 4) > 14)) {
833 /*
834 * Get buffer address and size
835 */
836 frame = (struct nbuf *)readl(&rbd->data_pointer);
837 frame_length = readw(&rbd->data_length) - 4;
5c1ad3e6
EN
838 /*
839 * Invalidate data cache over the buffer
840 */
841 addr = (uint32_t)frame;
efe24d2e
MV
842 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
843 addr &= ~(ARCH_DMA_MINALIGN - 1);
844 invalidate_dcache_range(addr, end);
5c1ad3e6 845
0b23fb36
IY
846 /*
847 * Fill the buffer and pass it to upper layers
848 */
5c1ad3e6 849#ifdef CONFIG_FEC_MXC_SWAP_PACKET
be7e87e2
MV
850 swap_packet((uint32_t *)frame->data, frame_length);
851#endif
0b23fb36
IY
852 memcpy(buff, frame->data, frame_length);
853 NetReceive(buff, frame_length);
854 len = frame_length;
855 } else {
856 if (bd_status & FEC_RBD_ERR)
857 printf("error frame: 0x%08lx 0x%08x\n",
858 (ulong)rbd->data_pointer,
859 bd_status);
860 }
5c1ad3e6 861
0b23fb36 862 /*
5c1ad3e6
EN
863 * Free the current buffer, restart the engine and move forward
864 * to the next buffer. Here we check if the whole cacheline of
865 * descriptors was already processed and if so, we mark it free
866 * as whole.
0b23fb36 867 */
5c1ad3e6
EN
868 size = RXDESC_PER_CACHELINE - 1;
869 if ((fec->rbd_index & size) == size) {
870 i = fec->rbd_index - size;
871 addr = (uint32_t)&fec->rbd_base[i];
872 for (; i <= fec->rbd_index ; i++) {
873 fec_rbd_clean(i == (FEC_RBD_NUM - 1),
874 &fec->rbd_base[i]);
875 }
876 flush_dcache_range(addr,
877 addr + ARCH_DMA_MINALIGN);
878 }
879
0b23fb36
IY
880 fec_rx_task_enable(fec);
881 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
882 }
883 debug("fec_recv: stop\n");
884
885 return len;
886}
887
ef8e3a3b
TK
888static void fec_set_dev_name(char *dest, int dev_id)
889{
890 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
891}
892
79e5f27b
MV
893static int fec_alloc_descs(struct fec_priv *fec)
894{
895 unsigned int size;
896 int i;
897 uint8_t *data;
898
899 /* Allocate TX descriptors. */
900 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
901 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
902 if (!fec->tbd_base)
903 goto err_tx;
904
905 /* Allocate RX descriptors. */
906 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
907 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
908 if (!fec->rbd_base)
909 goto err_rx;
910
911 memset(fec->rbd_base, 0, size);
912
913 /* Allocate RX buffers. */
914
915 /* Maximum RX buffer size. */
db5b7f56 916 size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN);
79e5f27b 917 for (i = 0; i < FEC_RBD_NUM; i++) {
db5b7f56 918 data = memalign(FEC_DMA_RX_MINALIGN, size);
79e5f27b
MV
919 if (!data) {
920 printf("%s: error allocating rxbuf %d\n", __func__, i);
921 goto err_ring;
922 }
923
924 memset(data, 0, size);
925
926 fec->rbd_base[i].data_pointer = (uint32_t)data;
927 fec->rbd_base[i].status = FEC_RBD_EMPTY;
928 fec->rbd_base[i].data_length = 0;
929 /* Flush the buffer to memory. */
930 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
931 }
932
933 /* Mark the last RBD to close the ring. */
934 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
935
936 fec->rbd_index = 0;
937 fec->tbd_index = 0;
938
939 return 0;
940
941err_ring:
942 for (; i >= 0; i--)
943 free((void *)fec->rbd_base[i].data_pointer);
944 free(fec->rbd_base);
945err_rx:
946 free(fec->tbd_base);
947err_tx:
948 return -ENOMEM;
949}
950
951static void fec_free_descs(struct fec_priv *fec)
952{
953 int i;
954
955 for (i = 0; i < FEC_RBD_NUM; i++)
956 free((void *)fec->rbd_base[i].data_pointer);
957 free(fec->rbd_base);
958 free(fec->tbd_base);
959}
960
fe428b90
TK
961#ifdef CONFIG_PHYLIB
962int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
963 struct mii_dev *bus, struct phy_device *phydev)
964#else
965static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
966 struct mii_dev *bus, int phy_id)
967#endif
0b23fb36 968{
0b23fb36 969 struct eth_device *edev;
9e27e9dc 970 struct fec_priv *fec;
0b23fb36 971 unsigned char ethaddr[6];
e382fb48
MV
972 uint32_t start;
973 int ret = 0;
0b23fb36
IY
974
975 /* create and fill edev struct */
976 edev = (struct eth_device *)malloc(sizeof(struct eth_device));
977 if (!edev) {
9e27e9dc 978 puts("fec_mxc: not enough malloc memory for eth_device\n");
e382fb48
MV
979 ret = -ENOMEM;
980 goto err1;
9e27e9dc
MV
981 }
982
983 fec = (struct fec_priv *)malloc(sizeof(struct fec_priv));
984 if (!fec) {
985 puts("fec_mxc: not enough malloc memory for fec_priv\n");
e382fb48
MV
986 ret = -ENOMEM;
987 goto err2;
0b23fb36 988 }
9e27e9dc 989
de0b9576 990 memset(edev, 0, sizeof(*edev));
9e27e9dc
MV
991 memset(fec, 0, sizeof(*fec));
992
79e5f27b
MV
993 ret = fec_alloc_descs(fec);
994 if (ret)
995 goto err3;
996
0b23fb36
IY
997 edev->priv = fec;
998 edev->init = fec_init;
999 edev->send = fec_send;
1000 edev->recv = fec_recv;
1001 edev->halt = fec_halt;
fb57ec97 1002 edev->write_hwaddr = fec_set_hwaddr;
0b23fb36 1003
9e27e9dc 1004 fec->eth = (struct ethernet_regs *)base_addr;
0b23fb36
IY
1005 fec->bd = bd;
1006
392b8502 1007 fec->xcv_type = CONFIG_FEC_XCV_TYPE;
0b23fb36
IY
1008
1009 /* Reset chip. */
cb17b92d 1010 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
e382fb48
MV
1011 start = get_timer(0);
1012 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
1013 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
1014 printf("FEC MXC: Timeout reseting chip\n");
79e5f27b 1015 goto err4;
e382fb48 1016 }
0b23fb36 1017 udelay(10);
e382fb48 1018 }
0b23fb36 1019
a5990b26 1020 fec_reg_setup(fec);
ef8e3a3b
TK
1021 fec_set_dev_name(edev->name, dev_id);
1022 fec->dev_id = (dev_id == -1) ? 0 : dev_id;
fe428b90
TK
1023 fec->bus = bus;
1024 fec_mii_setspeed(bus->priv);
1025#ifdef CONFIG_PHYLIB
1026 fec->phydev = phydev;
1027 phy_connect_dev(phydev, edev);
1028 /* Configure phy */
1029 phy_config(phydev);
1030#else
9e27e9dc 1031 fec->phy_id = phy_id;
fe428b90
TK
1032#endif
1033 eth_register(edev);
1034
1035 if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) {
1036 debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr);
1037 memcpy(edev->enetaddr, ethaddr, 6);
ddb636bd
EN
1038 if (!getenv("ethaddr"))
1039 eth_setenv_enetaddr("ethaddr", ethaddr);
fe428b90
TK
1040 }
1041 return ret;
79e5f27b
MV
1042err4:
1043 fec_free_descs(fec);
fe428b90
TK
1044err3:
1045 free(fec);
1046err2:
1047 free(edev);
1048err1:
1049 return ret;
1050}
1051
1052struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id)
1053{
1054 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
1055 struct mii_dev *bus;
1056 int ret;
0b23fb36 1057
13947f43
TK
1058 bus = mdio_alloc();
1059 if (!bus) {
1060 printf("mdio_alloc failed\n");
fe428b90 1061 return NULL;
13947f43
TK
1062 }
1063 bus->read = fec_phy_read;
1064 bus->write = fec_phy_write;
fe428b90 1065 bus->priv = eth;
ef8e3a3b 1066 fec_set_dev_name(bus->name, dev_id);
fe428b90
TK
1067
1068 ret = mdio_register(bus);
1069 if (ret) {
1070 printf("mdio_register failed\n");
1071 free(bus);
1072 return NULL;
1073 }
1074 fec_mii_setspeed(eth);
1075 return bus;
1076}
1077
1078int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
1079{
1080 uint32_t base_mii;
1081 struct mii_dev *bus = NULL;
1082#ifdef CONFIG_PHYLIB
1083 struct phy_device *phydev = NULL;
1084#endif
1085 int ret;
1086
5c1ad3e6 1087#ifdef CONFIG_MX28
13947f43
TK
1088 /*
1089 * The i.MX28 has two ethernet interfaces, but they are not equal.
1090 * Only the first one can access the MDIO bus.
1091 */
fe428b90 1092 base_mii = MXS_ENET0_BASE;
13947f43 1093#else
fe428b90 1094 base_mii = addr;
13947f43 1095#endif
fe428b90
TK
1096 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
1097 bus = fec_get_miibus(base_mii, dev_id);
1098 if (!bus)
1099 return -ENOMEM;
4dc27eed 1100#ifdef CONFIG_PHYLIB
fe428b90 1101 phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII);
4dc27eed
TK
1102 if (!phydev) {
1103 free(bus);
fe428b90 1104 return -ENOMEM;
4dc27eed 1105 }
fe428b90
TK
1106 ret = fec_probe(bd, dev_id, addr, bus, phydev);
1107#else
1108 ret = fec_probe(bd, dev_id, addr, bus, phy_id);
4dc27eed 1109#endif
fe428b90
TK
1110 if (ret) {
1111#ifdef CONFIG_PHYLIB
1112 free(phydev);
1113#endif
1114 free(bus);
1115 }
e382fb48 1116 return ret;
eef24480 1117}
0b23fb36 1118
eef24480
TK
1119#ifdef CONFIG_FEC_MXC_PHYADDR
1120int fecmxc_initialize(bd_t *bd)
1121{
1122 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
1123 IMX_FEC_BASE);
0b23fb36 1124}
eef24480 1125#endif
2e5f4421 1126
13947f43 1127#ifndef CONFIG_PHYLIB
2e5f4421
MV
1128int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
1129{
1130 struct fec_priv *fec = (struct fec_priv *)dev->priv;
1131 fec->mii_postcall = cb;
1132 return 0;
1133}
13947f43 1134#endif