]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/fec_mxc.c
siemens,am33x: add draco etamin board
[people/ms/u-boot.git] / drivers / net / fec_mxc.c
1 /*
2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4 * (C) Copyright 2008 Armadeus Systems nc
5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
7 *
8 * SPDX-License-Identifier: GPL-2.0+
9 */
10
11 #include <common.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <net.h>
15 #include <netdev.h>
16 #include <miiphy.h>
17 #include "fec_mxc.h"
18
19 #include <asm/arch/clock.h>
20 #include <asm/arch/imx-regs.h>
21 #include <asm/imx-common/sys_proto.h>
22 #include <asm/io.h>
23 #include <asm/errno.h>
24 #include <linux/compiler.h>
25
26 DECLARE_GLOBAL_DATA_PTR;
27
28 /*
29 * Timeout the transfer after 5 mS. This is usually a bit more, since
30 * the code in the tightloops this timeout is used in adds some overhead.
31 */
32 #define FEC_XFER_TIMEOUT 5000
33
34 /*
35 * The standard 32-byte DMA alignment does not work on mx6solox, which requires
36 * 64-byte alignment in the DMA RX FEC buffer.
37 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
38 * satisfies the alignment on other SoCs (32-bytes)
39 */
40 #define FEC_DMA_RX_MINALIGN 64
41
42 #ifndef CONFIG_MII
43 #error "CONFIG_MII has to be defined!"
44 #endif
45
46 #ifndef CONFIG_FEC_XCV_TYPE
47 #define CONFIG_FEC_XCV_TYPE MII100
48 #endif
49
50 /*
51 * The i.MX28 operates with packets in big endian. We need to swap them before
52 * sending and after receiving.
53 */
54 #ifdef CONFIG_MX28
55 #define CONFIG_FEC_MXC_SWAP_PACKET
56 #endif
57
58 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
59
60 /* Check various alignment issues at compile time */
61 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
62 #error "ARCH_DMA_MINALIGN must be multiple of 16!"
63 #endif
64
65 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
66 (PKTALIGN % ARCH_DMA_MINALIGN != 0))
67 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
68 #endif
69
70 #undef DEBUG
71
72 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
73 static void swap_packet(uint32_t *packet, int length)
74 {
75 int i;
76
77 for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
78 packet[i] = __swab32(packet[i]);
79 }
80 #endif
81
82 /*
83 * MII-interface related functions
84 */
85 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr,
86 uint8_t regAddr)
87 {
88 uint32_t reg; /* convenient holder for the PHY register */
89 uint32_t phy; /* convenient holder for the PHY */
90 uint32_t start;
91 int val;
92
93 /*
94 * reading from any PHY's register is done by properly
95 * programming the FEC's MII data register.
96 */
97 writel(FEC_IEVENT_MII, &eth->ievent);
98 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
99 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
100
101 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
102 phy | reg, &eth->mii_data);
103
104 /*
105 * wait for the related interrupt
106 */
107 start = get_timer(0);
108 while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
109 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
110 printf("Read MDIO failed...\n");
111 return -1;
112 }
113 }
114
115 /*
116 * clear mii interrupt bit
117 */
118 writel(FEC_IEVENT_MII, &eth->ievent);
119
120 /*
121 * it's now safe to read the PHY's register
122 */
123 val = (unsigned short)readl(&eth->mii_data);
124 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
125 regAddr, val);
126 return val;
127 }
128
129 static void fec_mii_setspeed(struct ethernet_regs *eth)
130 {
131 /*
132 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
133 * and do not drop the Preamble.
134 *
135 * The i.MX28 and i.MX6 types have another field in the MSCR (aka
136 * MII_SPEED) register that defines the MDIO output hold time. Earlier
137 * versions are RAZ there, so just ignore the difference and write the
138 * register always.
139 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
140 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
141 * output.
142 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
143 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
144 * holdtime cannot result in a value greater than 3.
145 */
146 u32 pclk = imx_get_fecclk();
147 u32 speed = DIV_ROUND_UP(pclk, 5000000);
148 u32 hold = DIV_ROUND_UP(pclk, 100000000) - 1;
149 #ifdef FEC_QUIRK_ENET_MAC
150 speed--;
151 #endif
152 writel(speed << 1 | hold << 8, &eth->mii_speed);
153 debug("%s: mii_speed %08x\n", __func__, readl(&eth->mii_speed));
154 }
155
156 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr,
157 uint8_t regAddr, uint16_t data)
158 {
159 uint32_t reg; /* convenient holder for the PHY register */
160 uint32_t phy; /* convenient holder for the PHY */
161 uint32_t start;
162
163 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
164 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
165
166 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
167 FEC_MII_DATA_TA | phy | reg | data, &eth->mii_data);
168
169 /*
170 * wait for the MII interrupt
171 */
172 start = get_timer(0);
173 while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
174 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
175 printf("Write MDIO failed...\n");
176 return -1;
177 }
178 }
179
180 /*
181 * clear MII interrupt bit
182 */
183 writel(FEC_IEVENT_MII, &eth->ievent);
184 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
185 regAddr, data);
186
187 return 0;
188 }
189
190 static int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr,
191 int regAddr)
192 {
193 return fec_mdio_read(bus->priv, phyAddr, regAddr);
194 }
195
196 static int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr,
197 int regAddr, u16 data)
198 {
199 return fec_mdio_write(bus->priv, phyAddr, regAddr, data);
200 }
201
202 #ifndef CONFIG_PHYLIB
203 static int miiphy_restart_aneg(struct eth_device *dev)
204 {
205 int ret = 0;
206 #if !defined(CONFIG_FEC_MXC_NO_ANEG)
207 struct fec_priv *fec = (struct fec_priv *)dev->priv;
208 struct ethernet_regs *eth = fec->bus->priv;
209
210 /*
211 * Wake up from sleep if necessary
212 * Reset PHY, then delay 300ns
213 */
214 #ifdef CONFIG_MX27
215 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
216 #endif
217 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
218 udelay(1000);
219
220 /*
221 * Set the auto-negotiation advertisement register bits
222 */
223 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
224 LPA_100FULL | LPA_100HALF | LPA_10FULL |
225 LPA_10HALF | PHY_ANLPAR_PSB_802_3);
226 fec_mdio_write(eth, fec->phy_id, MII_BMCR,
227 BMCR_ANENABLE | BMCR_ANRESTART);
228
229 if (fec->mii_postcall)
230 ret = fec->mii_postcall(fec->phy_id);
231
232 #endif
233 return ret;
234 }
235
236 static int miiphy_wait_aneg(struct eth_device *dev)
237 {
238 uint32_t start;
239 int status;
240 struct fec_priv *fec = (struct fec_priv *)dev->priv;
241 struct ethernet_regs *eth = fec->bus->priv;
242
243 /*
244 * Wait for AN completion
245 */
246 start = get_timer(0);
247 do {
248 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
249 printf("%s: Autonegotiation timeout\n", dev->name);
250 return -1;
251 }
252
253 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
254 if (status < 0) {
255 printf("%s: Autonegotiation failed. status: %d\n",
256 dev->name, status);
257 return -1;
258 }
259 } while (!(status & BMSR_LSTATUS));
260
261 return 0;
262 }
263 #endif
264
265 static int fec_rx_task_enable(struct fec_priv *fec)
266 {
267 writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active);
268 return 0;
269 }
270
271 static int fec_rx_task_disable(struct fec_priv *fec)
272 {
273 return 0;
274 }
275
276 static int fec_tx_task_enable(struct fec_priv *fec)
277 {
278 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
279 return 0;
280 }
281
282 static int fec_tx_task_disable(struct fec_priv *fec)
283 {
284 return 0;
285 }
286
287 /**
288 * Initialize receive task's buffer descriptors
289 * @param[in] fec all we know about the device yet
290 * @param[in] count receive buffer count to be allocated
291 * @param[in] dsize desired size of each receive buffer
292 * @return 0 on success
293 *
294 * Init all RX descriptors to default values.
295 */
296 static void fec_rbd_init(struct fec_priv *fec, int count, int dsize)
297 {
298 uint32_t size;
299 uint8_t *data;
300 int i;
301
302 /*
303 * Reload the RX descriptors with default values and wipe
304 * the RX buffers.
305 */
306 size = roundup(dsize, ARCH_DMA_MINALIGN);
307 for (i = 0; i < count; i++) {
308 data = (uint8_t *)fec->rbd_base[i].data_pointer;
309 memset(data, 0, dsize);
310 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
311
312 fec->rbd_base[i].status = FEC_RBD_EMPTY;
313 fec->rbd_base[i].data_length = 0;
314 }
315
316 /* Mark the last RBD to close the ring. */
317 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
318 fec->rbd_index = 0;
319
320 flush_dcache_range((unsigned)fec->rbd_base,
321 (unsigned)fec->rbd_base + size);
322 }
323
324 /**
325 * Initialize transmit task's buffer descriptors
326 * @param[in] fec all we know about the device yet
327 *
328 * Transmit buffers are created externally. We only have to init the BDs here.\n
329 * Note: There is a race condition in the hardware. When only one BD is in
330 * use it must be marked with the WRAP bit to use it for every transmitt.
331 * This bit in combination with the READY bit results into double transmit
332 * of each data buffer. It seems the state machine checks READY earlier then
333 * resetting it after the first transfer.
334 * Using two BDs solves this issue.
335 */
336 static void fec_tbd_init(struct fec_priv *fec)
337 {
338 unsigned addr = (unsigned)fec->tbd_base;
339 unsigned size = roundup(2 * sizeof(struct fec_bd),
340 ARCH_DMA_MINALIGN);
341
342 memset(fec->tbd_base, 0, size);
343 fec->tbd_base[0].status = 0;
344 fec->tbd_base[1].status = FEC_TBD_WRAP;
345 fec->tbd_index = 0;
346 flush_dcache_range(addr, addr + size);
347 }
348
349 /**
350 * Mark the given read buffer descriptor as free
351 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
352 * @param[in] pRbd buffer descriptor to mark free again
353 */
354 static void fec_rbd_clean(int last, struct fec_bd *pRbd)
355 {
356 unsigned short flags = FEC_RBD_EMPTY;
357 if (last)
358 flags |= FEC_RBD_WRAP;
359 writew(flags, &pRbd->status);
360 writew(0, &pRbd->data_length);
361 }
362
363 static int fec_get_hwaddr(struct eth_device *dev, int dev_id,
364 unsigned char *mac)
365 {
366 imx_get_mac_from_fuse(dev_id, mac);
367 return !is_valid_ethaddr(mac);
368 }
369
370 static int fec_set_hwaddr(struct eth_device *dev)
371 {
372 uchar *mac = dev->enetaddr;
373 struct fec_priv *fec = (struct fec_priv *)dev->priv;
374
375 writel(0, &fec->eth->iaddr1);
376 writel(0, &fec->eth->iaddr2);
377 writel(0, &fec->eth->gaddr1);
378 writel(0, &fec->eth->gaddr2);
379
380 /*
381 * Set physical address
382 */
383 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
384 &fec->eth->paddr1);
385 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
386
387 return 0;
388 }
389
390 /*
391 * Do initial configuration of the FEC registers
392 */
393 static void fec_reg_setup(struct fec_priv *fec)
394 {
395 uint32_t rcntrl;
396
397 /*
398 * Set interrupt mask register
399 */
400 writel(0x00000000, &fec->eth->imask);
401
402 /*
403 * Clear FEC-Lite interrupt event register(IEVENT)
404 */
405 writel(0xffffffff, &fec->eth->ievent);
406
407
408 /*
409 * Set FEC-Lite receive control register(R_CNTRL):
410 */
411
412 /* Start with frame length = 1518, common for all modes. */
413 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
414 if (fec->xcv_type != SEVENWIRE) /* xMII modes */
415 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
416 if (fec->xcv_type == RGMII)
417 rcntrl |= FEC_RCNTRL_RGMII;
418 else if (fec->xcv_type == RMII)
419 rcntrl |= FEC_RCNTRL_RMII;
420
421 writel(rcntrl, &fec->eth->r_cntrl);
422 }
423
424 /**
425 * Start the FEC engine
426 * @param[in] dev Our device to handle
427 */
428 static int fec_open(struct eth_device *edev)
429 {
430 struct fec_priv *fec = (struct fec_priv *)edev->priv;
431 int speed;
432 uint32_t addr, size;
433 int i;
434
435 debug("fec_open: fec_open(dev)\n");
436 /* full-duplex, heartbeat disabled */
437 writel(1 << 2, &fec->eth->x_cntrl);
438 fec->rbd_index = 0;
439
440 /* Invalidate all descriptors */
441 for (i = 0; i < FEC_RBD_NUM - 1; i++)
442 fec_rbd_clean(0, &fec->rbd_base[i]);
443 fec_rbd_clean(1, &fec->rbd_base[i]);
444
445 /* Flush the descriptors into RAM */
446 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
447 ARCH_DMA_MINALIGN);
448 addr = (uint32_t)fec->rbd_base;
449 flush_dcache_range(addr, addr + size);
450
451 #ifdef FEC_QUIRK_ENET_MAC
452 /* Enable ENET HW endian SWAP */
453 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
454 &fec->eth->ecntrl);
455 /* Enable ENET store and forward mode */
456 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
457 &fec->eth->x_wmrk);
458 #endif
459 /*
460 * Enable FEC-Lite controller
461 */
462 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
463 &fec->eth->ecntrl);
464 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
465 udelay(100);
466 /*
467 * setup the MII gasket for RMII mode
468 */
469
470 /* disable the gasket */
471 writew(0, &fec->eth->miigsk_enr);
472
473 /* wait for the gasket to be disabled */
474 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
475 udelay(2);
476
477 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */
478 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
479
480 /* re-enable the gasket */
481 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
482
483 /* wait until MII gasket is ready */
484 int max_loops = 10;
485 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
486 if (--max_loops <= 0) {
487 printf("WAIT for MII Gasket ready timed out\n");
488 break;
489 }
490 }
491 #endif
492
493 #ifdef CONFIG_PHYLIB
494 {
495 /* Start up the PHY */
496 int ret = phy_startup(fec->phydev);
497
498 if (ret) {
499 printf("Could not initialize PHY %s\n",
500 fec->phydev->dev->name);
501 return ret;
502 }
503 speed = fec->phydev->speed;
504 }
505 #else
506 miiphy_wait_aneg(edev);
507 speed = miiphy_speed(edev->name, fec->phy_id);
508 miiphy_duplex(edev->name, fec->phy_id);
509 #endif
510
511 #ifdef FEC_QUIRK_ENET_MAC
512 {
513 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
514 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
515 if (speed == _1000BASET)
516 ecr |= FEC_ECNTRL_SPEED;
517 else if (speed != _100BASET)
518 rcr |= FEC_RCNTRL_RMII_10T;
519 writel(ecr, &fec->eth->ecntrl);
520 writel(rcr, &fec->eth->r_cntrl);
521 }
522 #endif
523 debug("%s:Speed=%i\n", __func__, speed);
524
525 /*
526 * Enable SmartDMA receive task
527 */
528 fec_rx_task_enable(fec);
529
530 udelay(100000);
531 return 0;
532 }
533
534 static int fec_init(struct eth_device *dev, bd_t* bd)
535 {
536 struct fec_priv *fec = (struct fec_priv *)dev->priv;
537 uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
538 int i;
539
540 /* Initialize MAC address */
541 fec_set_hwaddr(dev);
542
543 /*
544 * Setup transmit descriptors, there are two in total.
545 */
546 fec_tbd_init(fec);
547
548 /* Setup receive descriptors. */
549 fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE);
550
551 fec_reg_setup(fec);
552
553 if (fec->xcv_type != SEVENWIRE)
554 fec_mii_setspeed(fec->bus->priv);
555
556 /*
557 * Set Opcode/Pause Duration Register
558 */
559 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */
560 writel(0x2, &fec->eth->x_wmrk);
561 /*
562 * Set multicast address filter
563 */
564 writel(0x00000000, &fec->eth->gaddr1);
565 writel(0x00000000, &fec->eth->gaddr2);
566
567
568 /* Do not access reserved register for i.MX6UL */
569 if (!is_cpu_type(MXC_CPU_MX6UL)) {
570 /* clear MIB RAM */
571 for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
572 writel(0, i);
573
574 /* FIFO receive start register */
575 writel(0x520, &fec->eth->r_fstart);
576 }
577
578 /* size and address of each buffer */
579 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
580 writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
581 writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
582
583 #ifndef CONFIG_PHYLIB
584 if (fec->xcv_type != SEVENWIRE)
585 miiphy_restart_aneg(dev);
586 #endif
587 fec_open(dev);
588 return 0;
589 }
590
591 /**
592 * Halt the FEC engine
593 * @param[in] dev Our device to handle
594 */
595 static void fec_halt(struct eth_device *dev)
596 {
597 struct fec_priv *fec = (struct fec_priv *)dev->priv;
598 int counter = 0xffff;
599
600 /*
601 * issue graceful stop command to the FEC transmitter if necessary
602 */
603 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
604 &fec->eth->x_cntrl);
605
606 debug("eth_halt: wait for stop regs\n");
607 /*
608 * wait for graceful stop to register
609 */
610 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
611 udelay(1);
612
613 /*
614 * Disable SmartDMA tasks
615 */
616 fec_tx_task_disable(fec);
617 fec_rx_task_disable(fec);
618
619 /*
620 * Disable the Ethernet Controller
621 * Note: this will also reset the BD index counter!
622 */
623 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
624 &fec->eth->ecntrl);
625 fec->rbd_index = 0;
626 fec->tbd_index = 0;
627 debug("eth_halt: done\n");
628 }
629
630 /**
631 * Transmit one frame
632 * @param[in] dev Our ethernet device to handle
633 * @param[in] packet Pointer to the data to be transmitted
634 * @param[in] length Data count in bytes
635 * @return 0 on success
636 */
637 static int fec_send(struct eth_device *dev, void *packet, int length)
638 {
639 unsigned int status;
640 uint32_t size, end;
641 uint32_t addr;
642 int timeout = FEC_XFER_TIMEOUT;
643 int ret = 0;
644
645 /*
646 * This routine transmits one frame. This routine only accepts
647 * 6-byte Ethernet addresses.
648 */
649 struct fec_priv *fec = (struct fec_priv *)dev->priv;
650
651 /*
652 * Check for valid length of data.
653 */
654 if ((length > 1500) || (length <= 0)) {
655 printf("Payload (%d) too large\n", length);
656 return -1;
657 }
658
659 /*
660 * Setup the transmit buffer. We are always using the first buffer for
661 * transmission, the second will be empty and only used to stop the DMA
662 * engine. We also flush the packet to RAM here to avoid cache trouble.
663 */
664 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
665 swap_packet((uint32_t *)packet, length);
666 #endif
667
668 addr = (uint32_t)packet;
669 end = roundup(addr + length, ARCH_DMA_MINALIGN);
670 addr &= ~(ARCH_DMA_MINALIGN - 1);
671 flush_dcache_range(addr, end);
672
673 writew(length, &fec->tbd_base[fec->tbd_index].data_length);
674 writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);
675
676 /*
677 * update BD's status now
678 * This block:
679 * - is always the last in a chain (means no chain)
680 * - should transmitt the CRC
681 * - might be the last BD in the list, so the address counter should
682 * wrap (-> keep the WRAP flag)
683 */
684 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
685 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
686 writew(status, &fec->tbd_base[fec->tbd_index].status);
687
688 /*
689 * Flush data cache. This code flushes both TX descriptors to RAM.
690 * After this code, the descriptors will be safely in RAM and we
691 * can start DMA.
692 */
693 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
694 addr = (uint32_t)fec->tbd_base;
695 flush_dcache_range(addr, addr + size);
696
697 /*
698 * Below we read the DMA descriptor's last four bytes back from the
699 * DRAM. This is important in order to make sure that all WRITE
700 * operations on the bus that were triggered by previous cache FLUSH
701 * have completed.
702 *
703 * Otherwise, on MX28, it is possible to observe a corruption of the
704 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
705 * for the bus structure of MX28. The scenario is as follows:
706 *
707 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
708 * to DRAM due to flush_dcache_range()
709 * 2) ARM core writes the FEC registers via AHB_ARB2
710 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
711 *
712 * Note that 2) does sometimes finish before 1) due to reordering of
713 * WRITE accesses on the AHB bus, therefore triggering 3) before the
714 * DMA descriptor is fully written into DRAM. This results in occasional
715 * corruption of the DMA descriptor.
716 */
717 readl(addr + size - 4);
718
719 /*
720 * Enable SmartDMA transmit task
721 */
722 fec_tx_task_enable(fec);
723
724 /*
725 * Wait until frame is sent. On each turn of the wait cycle, we must
726 * invalidate data cache to see what's really in RAM. Also, we need
727 * barrier here.
728 */
729 while (--timeout) {
730 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
731 break;
732 }
733
734 if (!timeout) {
735 ret = -EINVAL;
736 goto out;
737 }
738
739 /*
740 * The TDAR bit is cleared when the descriptors are all out from TX
741 * but on mx6solox we noticed that the READY bit is still not cleared
742 * right after TDAR.
743 * These are two distinct signals, and in IC simulation, we found that
744 * TDAR always gets cleared prior than the READY bit of last BD becomes
745 * cleared.
746 * In mx6solox, we use a later version of FEC IP. It looks like that
747 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
748 * version.
749 *
750 * Fix this by polling the READY bit of BD after the TDAR polling,
751 * which covers the mx6solox case and does not harm the other SoCs.
752 */
753 timeout = FEC_XFER_TIMEOUT;
754 while (--timeout) {
755 invalidate_dcache_range(addr, addr + size);
756 if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
757 FEC_TBD_READY))
758 break;
759 }
760
761 if (!timeout)
762 ret = -EINVAL;
763
764 out:
765 debug("fec_send: status 0x%x index %d ret %i\n",
766 readw(&fec->tbd_base[fec->tbd_index].status),
767 fec->tbd_index, ret);
768 /* for next transmission use the other buffer */
769 if (fec->tbd_index)
770 fec->tbd_index = 0;
771 else
772 fec->tbd_index = 1;
773
774 return ret;
775 }
776
777 /**
778 * Pull one frame from the card
779 * @param[in] dev Our ethernet device to handle
780 * @return Length of packet read
781 */
782 static int fec_recv(struct eth_device *dev)
783 {
784 struct fec_priv *fec = (struct fec_priv *)dev->priv;
785 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
786 unsigned long ievent;
787 int frame_length, len = 0;
788 uint16_t bd_status;
789 uint32_t addr, size, end;
790 int i;
791 ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
792
793 /*
794 * Check if any critical events have happened
795 */
796 ievent = readl(&fec->eth->ievent);
797 writel(ievent, &fec->eth->ievent);
798 debug("fec_recv: ievent 0x%lx\n", ievent);
799 if (ievent & FEC_IEVENT_BABR) {
800 fec_halt(dev);
801 fec_init(dev, fec->bd);
802 printf("some error: 0x%08lx\n", ievent);
803 return 0;
804 }
805 if (ievent & FEC_IEVENT_HBERR) {
806 /* Heartbeat error */
807 writel(0x00000001 | readl(&fec->eth->x_cntrl),
808 &fec->eth->x_cntrl);
809 }
810 if (ievent & FEC_IEVENT_GRA) {
811 /* Graceful stop complete */
812 if (readl(&fec->eth->x_cntrl) & 0x00000001) {
813 fec_halt(dev);
814 writel(~0x00000001 & readl(&fec->eth->x_cntrl),
815 &fec->eth->x_cntrl);
816 fec_init(dev, fec->bd);
817 }
818 }
819
820 /*
821 * Read the buffer status. Before the status can be read, the data cache
822 * must be invalidated, because the data in RAM might have been changed
823 * by DMA. The descriptors are properly aligned to cachelines so there's
824 * no need to worry they'd overlap.
825 *
826 * WARNING: By invalidating the descriptor here, we also invalidate
827 * the descriptors surrounding this one. Therefore we can NOT change the
828 * contents of this descriptor nor the surrounding ones. The problem is
829 * that in order to mark the descriptor as processed, we need to change
830 * the descriptor. The solution is to mark the whole cache line when all
831 * descriptors in the cache line are processed.
832 */
833 addr = (uint32_t)rbd;
834 addr &= ~(ARCH_DMA_MINALIGN - 1);
835 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
836 invalidate_dcache_range(addr, addr + size);
837
838 bd_status = readw(&rbd->status);
839 debug("fec_recv: status 0x%x\n", bd_status);
840
841 if (!(bd_status & FEC_RBD_EMPTY)) {
842 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
843 ((readw(&rbd->data_length) - 4) > 14)) {
844 /*
845 * Get buffer address and size
846 */
847 addr = readl(&rbd->data_pointer);
848 frame_length = readw(&rbd->data_length) - 4;
849 /*
850 * Invalidate data cache over the buffer
851 */
852 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
853 addr &= ~(ARCH_DMA_MINALIGN - 1);
854 invalidate_dcache_range(addr, end);
855
856 /*
857 * Fill the buffer and pass it to upper layers
858 */
859 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
860 swap_packet((uint32_t *)addr, frame_length);
861 #endif
862 memcpy(buff, (char *)addr, frame_length);
863 net_process_received_packet(buff, frame_length);
864 len = frame_length;
865 } else {
866 if (bd_status & FEC_RBD_ERR)
867 printf("error frame: 0x%08x 0x%08x\n",
868 addr, bd_status);
869 }
870
871 /*
872 * Free the current buffer, restart the engine and move forward
873 * to the next buffer. Here we check if the whole cacheline of
874 * descriptors was already processed and if so, we mark it free
875 * as whole.
876 */
877 size = RXDESC_PER_CACHELINE - 1;
878 if ((fec->rbd_index & size) == size) {
879 i = fec->rbd_index - size;
880 addr = (uint32_t)&fec->rbd_base[i];
881 for (; i <= fec->rbd_index ; i++) {
882 fec_rbd_clean(i == (FEC_RBD_NUM - 1),
883 &fec->rbd_base[i]);
884 }
885 flush_dcache_range(addr,
886 addr + ARCH_DMA_MINALIGN);
887 }
888
889 fec_rx_task_enable(fec);
890 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
891 }
892 debug("fec_recv: stop\n");
893
894 return len;
895 }
896
897 static void fec_set_dev_name(char *dest, int dev_id)
898 {
899 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
900 }
901
902 static int fec_alloc_descs(struct fec_priv *fec)
903 {
904 unsigned int size;
905 int i;
906 uint8_t *data;
907
908 /* Allocate TX descriptors. */
909 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
910 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
911 if (!fec->tbd_base)
912 goto err_tx;
913
914 /* Allocate RX descriptors. */
915 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
916 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
917 if (!fec->rbd_base)
918 goto err_rx;
919
920 memset(fec->rbd_base, 0, size);
921
922 /* Allocate RX buffers. */
923
924 /* Maximum RX buffer size. */
925 size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN);
926 for (i = 0; i < FEC_RBD_NUM; i++) {
927 data = memalign(FEC_DMA_RX_MINALIGN, size);
928 if (!data) {
929 printf("%s: error allocating rxbuf %d\n", __func__, i);
930 goto err_ring;
931 }
932
933 memset(data, 0, size);
934
935 fec->rbd_base[i].data_pointer = (uint32_t)data;
936 fec->rbd_base[i].status = FEC_RBD_EMPTY;
937 fec->rbd_base[i].data_length = 0;
938 /* Flush the buffer to memory. */
939 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
940 }
941
942 /* Mark the last RBD to close the ring. */
943 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
944
945 fec->rbd_index = 0;
946 fec->tbd_index = 0;
947
948 return 0;
949
950 err_ring:
951 for (; i >= 0; i--)
952 free((void *)fec->rbd_base[i].data_pointer);
953 free(fec->rbd_base);
954 err_rx:
955 free(fec->tbd_base);
956 err_tx:
957 return -ENOMEM;
958 }
959
960 static void fec_free_descs(struct fec_priv *fec)
961 {
962 int i;
963
964 for (i = 0; i < FEC_RBD_NUM; i++)
965 free((void *)fec->rbd_base[i].data_pointer);
966 free(fec->rbd_base);
967 free(fec->tbd_base);
968 }
969
970 #ifdef CONFIG_PHYLIB
971 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
972 struct mii_dev *bus, struct phy_device *phydev)
973 #else
974 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
975 struct mii_dev *bus, int phy_id)
976 #endif
977 {
978 struct eth_device *edev;
979 struct fec_priv *fec;
980 unsigned char ethaddr[6];
981 uint32_t start;
982 int ret = 0;
983
984 /* create and fill edev struct */
985 edev = (struct eth_device *)malloc(sizeof(struct eth_device));
986 if (!edev) {
987 puts("fec_mxc: not enough malloc memory for eth_device\n");
988 ret = -ENOMEM;
989 goto err1;
990 }
991
992 fec = (struct fec_priv *)malloc(sizeof(struct fec_priv));
993 if (!fec) {
994 puts("fec_mxc: not enough malloc memory for fec_priv\n");
995 ret = -ENOMEM;
996 goto err2;
997 }
998
999 memset(edev, 0, sizeof(*edev));
1000 memset(fec, 0, sizeof(*fec));
1001
1002 ret = fec_alloc_descs(fec);
1003 if (ret)
1004 goto err3;
1005
1006 edev->priv = fec;
1007 edev->init = fec_init;
1008 edev->send = fec_send;
1009 edev->recv = fec_recv;
1010 edev->halt = fec_halt;
1011 edev->write_hwaddr = fec_set_hwaddr;
1012
1013 fec->eth = (struct ethernet_regs *)base_addr;
1014 fec->bd = bd;
1015
1016 fec->xcv_type = CONFIG_FEC_XCV_TYPE;
1017
1018 /* Reset chip. */
1019 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
1020 start = get_timer(0);
1021 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
1022 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
1023 printf("FEC MXC: Timeout reseting chip\n");
1024 goto err4;
1025 }
1026 udelay(10);
1027 }
1028
1029 fec_reg_setup(fec);
1030 fec_set_dev_name(edev->name, dev_id);
1031 fec->dev_id = (dev_id == -1) ? 0 : dev_id;
1032 fec->bus = bus;
1033 fec_mii_setspeed(bus->priv);
1034 #ifdef CONFIG_PHYLIB
1035 fec->phydev = phydev;
1036 phy_connect_dev(phydev, edev);
1037 /* Configure phy */
1038 phy_config(phydev);
1039 #else
1040 fec->phy_id = phy_id;
1041 #endif
1042 eth_register(edev);
1043
1044 if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) {
1045 debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr);
1046 memcpy(edev->enetaddr, ethaddr, 6);
1047 if (!getenv("ethaddr"))
1048 eth_setenv_enetaddr("ethaddr", ethaddr);
1049 }
1050 return ret;
1051 err4:
1052 fec_free_descs(fec);
1053 err3:
1054 free(fec);
1055 err2:
1056 free(edev);
1057 err1:
1058 return ret;
1059 }
1060
1061 struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id)
1062 {
1063 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
1064 struct mii_dev *bus;
1065 int ret;
1066
1067 bus = mdio_alloc();
1068 if (!bus) {
1069 printf("mdio_alloc failed\n");
1070 return NULL;
1071 }
1072 bus->read = fec_phy_read;
1073 bus->write = fec_phy_write;
1074 bus->priv = eth;
1075 fec_set_dev_name(bus->name, dev_id);
1076
1077 ret = mdio_register(bus);
1078 if (ret) {
1079 printf("mdio_register failed\n");
1080 free(bus);
1081 return NULL;
1082 }
1083 fec_mii_setspeed(eth);
1084 return bus;
1085 }
1086
1087 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
1088 {
1089 uint32_t base_mii;
1090 struct mii_dev *bus = NULL;
1091 #ifdef CONFIG_PHYLIB
1092 struct phy_device *phydev = NULL;
1093 #endif
1094 int ret;
1095
1096 #ifdef CONFIG_MX28
1097 /*
1098 * The i.MX28 has two ethernet interfaces, but they are not equal.
1099 * Only the first one can access the MDIO bus.
1100 */
1101 base_mii = MXS_ENET0_BASE;
1102 #else
1103 base_mii = addr;
1104 #endif
1105 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
1106 bus = fec_get_miibus(base_mii, dev_id);
1107 if (!bus)
1108 return -ENOMEM;
1109 #ifdef CONFIG_PHYLIB
1110 phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII);
1111 if (!phydev) {
1112 mdio_unregister(bus);
1113 free(bus);
1114 return -ENOMEM;
1115 }
1116 ret = fec_probe(bd, dev_id, addr, bus, phydev);
1117 #else
1118 ret = fec_probe(bd, dev_id, addr, bus, phy_id);
1119 #endif
1120 if (ret) {
1121 #ifdef CONFIG_PHYLIB
1122 free(phydev);
1123 #endif
1124 mdio_unregister(bus);
1125 free(bus);
1126 }
1127 return ret;
1128 }
1129
1130 #ifdef CONFIG_FEC_MXC_PHYADDR
1131 int fecmxc_initialize(bd_t *bd)
1132 {
1133 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
1134 IMX_FEC_BASE);
1135 }
1136 #endif
1137
1138 #ifndef CONFIG_PHYLIB
1139 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
1140 {
1141 struct fec_priv *fec = (struct fec_priv *)dev->priv;
1142 fec->mii_postcall = cb;
1143 return 0;
1144 }
1145 #endif