]>
git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/fec_mxc.c
2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4 * (C) Copyright 2008 Armadeus Systems nc
5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
8 * SPDX-License-Identifier: GPL-2.0+
19 #include <asm/arch/clock.h>
20 #include <asm/arch/imx-regs.h>
21 #include <asm/imx-common/sys_proto.h>
23 #include <asm/errno.h>
24 #include <linux/compiler.h>
26 DECLARE_GLOBAL_DATA_PTR
;
29 * Timeout the transfer after 5 mS. This is usually a bit more, since
30 * the code in the tightloops this timeout is used in adds some overhead.
32 #define FEC_XFER_TIMEOUT 5000
35 * The standard 32-byte DMA alignment does not work on mx6solox, which requires
36 * 64-byte alignment in the DMA RX FEC buffer.
37 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
38 * satisfies the alignment on other SoCs (32-bytes)
40 #define FEC_DMA_RX_MINALIGN 64
43 #error "CONFIG_MII has to be defined!"
46 #ifndef CONFIG_FEC_XCV_TYPE
47 #define CONFIG_FEC_XCV_TYPE MII100
51 * The i.MX28 operates with packets in big endian. We need to swap them before
52 * sending and after receiving.
55 #define CONFIG_FEC_MXC_SWAP_PACKET
58 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
60 /* Check various alignment issues at compile time */
61 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
62 #error "ARCH_DMA_MINALIGN must be multiple of 16!"
65 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
66 (PKTALIGN % ARCH_DMA_MINALIGN != 0))
67 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
72 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
73 static void swap_packet(uint32_t *packet
, int length
)
77 for (i
= 0; i
< DIV_ROUND_UP(length
, 4); i
++)
78 packet
[i
] = __swab32(packet
[i
]);
83 * MII-interface related functions
85 static int fec_mdio_read(struct ethernet_regs
*eth
, uint8_t phyAddr
,
88 uint32_t reg
; /* convenient holder for the PHY register */
89 uint32_t phy
; /* convenient holder for the PHY */
94 * reading from any PHY's register is done by properly
95 * programming the FEC's MII data register.
97 writel(FEC_IEVENT_MII
, ð
->ievent
);
98 reg
= regAddr
<< FEC_MII_DATA_RA_SHIFT
;
99 phy
= phyAddr
<< FEC_MII_DATA_PA_SHIFT
;
101 writel(FEC_MII_DATA_ST
| FEC_MII_DATA_OP_RD
| FEC_MII_DATA_TA
|
102 phy
| reg
, ð
->mii_data
);
105 * wait for the related interrupt
107 start
= get_timer(0);
108 while (!(readl(ð
->ievent
) & FEC_IEVENT_MII
)) {
109 if (get_timer(start
) > (CONFIG_SYS_HZ
/ 1000)) {
110 printf("Read MDIO failed...\n");
116 * clear mii interrupt bit
118 writel(FEC_IEVENT_MII
, ð
->ievent
);
121 * it's now safe to read the PHY's register
123 val
= (unsigned short)readl(ð
->mii_data
);
124 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__
, phyAddr
,
129 static void fec_mii_setspeed(struct ethernet_regs
*eth
)
132 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
133 * and do not drop the Preamble.
135 register u32 speed
= DIV_ROUND_UP(imx_get_fecclk(), 5000000);
136 #ifdef FEC_QUIRK_ENET_MAC
140 writel(speed
, ð
->mii_speed
);
141 debug("%s: mii_speed %08x\n", __func__
, readl(ð
->mii_speed
));
144 static int fec_mdio_write(struct ethernet_regs
*eth
, uint8_t phyAddr
,
145 uint8_t regAddr
, uint16_t data
)
147 uint32_t reg
; /* convenient holder for the PHY register */
148 uint32_t phy
; /* convenient holder for the PHY */
151 reg
= regAddr
<< FEC_MII_DATA_RA_SHIFT
;
152 phy
= phyAddr
<< FEC_MII_DATA_PA_SHIFT
;
154 writel(FEC_MII_DATA_ST
| FEC_MII_DATA_OP_WR
|
155 FEC_MII_DATA_TA
| phy
| reg
| data
, ð
->mii_data
);
158 * wait for the MII interrupt
160 start
= get_timer(0);
161 while (!(readl(ð
->ievent
) & FEC_IEVENT_MII
)) {
162 if (get_timer(start
) > (CONFIG_SYS_HZ
/ 1000)) {
163 printf("Write MDIO failed...\n");
169 * clear MII interrupt bit
171 writel(FEC_IEVENT_MII
, ð
->ievent
);
172 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__
, phyAddr
,
178 static int fec_phy_read(struct mii_dev
*bus
, int phyAddr
, int dev_addr
,
181 return fec_mdio_read(bus
->priv
, phyAddr
, regAddr
);
184 static int fec_phy_write(struct mii_dev
*bus
, int phyAddr
, int dev_addr
,
185 int regAddr
, u16 data
)
187 return fec_mdio_write(bus
->priv
, phyAddr
, regAddr
, data
);
190 #ifndef CONFIG_PHYLIB
191 static int miiphy_restart_aneg(struct eth_device
*dev
)
194 #if !defined(CONFIG_FEC_MXC_NO_ANEG)
195 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
196 struct ethernet_regs
*eth
= fec
->bus
->priv
;
199 * Wake up from sleep if necessary
200 * Reset PHY, then delay 300ns
203 fec_mdio_write(eth
, fec
->phy_id
, MII_DCOUNTER
, 0x00FF);
205 fec_mdio_write(eth
, fec
->phy_id
, MII_BMCR
, BMCR_RESET
);
209 * Set the auto-negotiation advertisement register bits
211 fec_mdio_write(eth
, fec
->phy_id
, MII_ADVERTISE
,
212 LPA_100FULL
| LPA_100HALF
| LPA_10FULL
|
213 LPA_10HALF
| PHY_ANLPAR_PSB_802_3
);
214 fec_mdio_write(eth
, fec
->phy_id
, MII_BMCR
,
215 BMCR_ANENABLE
| BMCR_ANRESTART
);
217 if (fec
->mii_postcall
)
218 ret
= fec
->mii_postcall(fec
->phy_id
);
224 static int miiphy_wait_aneg(struct eth_device
*dev
)
228 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
229 struct ethernet_regs
*eth
= fec
->bus
->priv
;
232 * Wait for AN completion
234 start
= get_timer(0);
236 if (get_timer(start
) > (CONFIG_SYS_HZ
* 5)) {
237 printf("%s: Autonegotiation timeout\n", dev
->name
);
241 status
= fec_mdio_read(eth
, fec
->phy_id
, MII_BMSR
);
243 printf("%s: Autonegotiation failed. status: %d\n",
247 } while (!(status
& BMSR_LSTATUS
));
253 static int fec_rx_task_enable(struct fec_priv
*fec
)
255 writel(FEC_R_DES_ACTIVE_RDAR
, &fec
->eth
->r_des_active
);
259 static int fec_rx_task_disable(struct fec_priv
*fec
)
264 static int fec_tx_task_enable(struct fec_priv
*fec
)
266 writel(FEC_X_DES_ACTIVE_TDAR
, &fec
->eth
->x_des_active
);
270 static int fec_tx_task_disable(struct fec_priv
*fec
)
276 * Initialize receive task's buffer descriptors
277 * @param[in] fec all we know about the device yet
278 * @param[in] count receive buffer count to be allocated
279 * @param[in] dsize desired size of each receive buffer
280 * @return 0 on success
282 * Init all RX descriptors to default values.
284 static void fec_rbd_init(struct fec_priv
*fec
, int count
, int dsize
)
291 * Reload the RX descriptors with default values and wipe
294 size
= roundup(dsize
, ARCH_DMA_MINALIGN
);
295 for (i
= 0; i
< count
; i
++) {
296 data
= (uint8_t *)fec
->rbd_base
[i
].data_pointer
;
297 memset(data
, 0, dsize
);
298 flush_dcache_range((uint32_t)data
, (uint32_t)data
+ size
);
300 fec
->rbd_base
[i
].status
= FEC_RBD_EMPTY
;
301 fec
->rbd_base
[i
].data_length
= 0;
304 /* Mark the last RBD to close the ring. */
305 fec
->rbd_base
[i
- 1].status
= FEC_RBD_WRAP
| FEC_RBD_EMPTY
;
308 flush_dcache_range((unsigned)fec
->rbd_base
,
309 (unsigned)fec
->rbd_base
+ size
);
313 * Initialize transmit task's buffer descriptors
314 * @param[in] fec all we know about the device yet
316 * Transmit buffers are created externally. We only have to init the BDs here.\n
317 * Note: There is a race condition in the hardware. When only one BD is in
318 * use it must be marked with the WRAP bit to use it for every transmitt.
319 * This bit in combination with the READY bit results into double transmit
320 * of each data buffer. It seems the state machine checks READY earlier then
321 * resetting it after the first transfer.
322 * Using two BDs solves this issue.
324 static void fec_tbd_init(struct fec_priv
*fec
)
326 unsigned addr
= (unsigned)fec
->tbd_base
;
327 unsigned size
= roundup(2 * sizeof(struct fec_bd
),
330 memset(fec
->tbd_base
, 0, size
);
331 fec
->tbd_base
[0].status
= 0;
332 fec
->tbd_base
[1].status
= FEC_TBD_WRAP
;
334 flush_dcache_range(addr
, addr
+ size
);
338 * Mark the given read buffer descriptor as free
339 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
340 * @param[in] pRbd buffer descriptor to mark free again
342 static void fec_rbd_clean(int last
, struct fec_bd
*pRbd
)
344 unsigned short flags
= FEC_RBD_EMPTY
;
346 flags
|= FEC_RBD_WRAP
;
347 writew(flags
, &pRbd
->status
);
348 writew(0, &pRbd
->data_length
);
351 static int fec_get_hwaddr(struct eth_device
*dev
, int dev_id
,
354 imx_get_mac_from_fuse(dev_id
, mac
);
355 return !is_valid_ethaddr(mac
);
358 static int fec_set_hwaddr(struct eth_device
*dev
)
360 uchar
*mac
= dev
->enetaddr
;
361 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
363 writel(0, &fec
->eth
->iaddr1
);
364 writel(0, &fec
->eth
->iaddr2
);
365 writel(0, &fec
->eth
->gaddr1
);
366 writel(0, &fec
->eth
->gaddr2
);
369 * Set physical address
371 writel((mac
[0] << 24) + (mac
[1] << 16) + (mac
[2] << 8) + mac
[3],
373 writel((mac
[4] << 24) + (mac
[5] << 16) + 0x8808, &fec
->eth
->paddr2
);
379 * Do initial configuration of the FEC registers
381 static void fec_reg_setup(struct fec_priv
*fec
)
386 * Set interrupt mask register
388 writel(0x00000000, &fec
->eth
->imask
);
391 * Clear FEC-Lite interrupt event register(IEVENT)
393 writel(0xffffffff, &fec
->eth
->ievent
);
397 * Set FEC-Lite receive control register(R_CNTRL):
400 /* Start with frame length = 1518, common for all modes. */
401 rcntrl
= PKTSIZE
<< FEC_RCNTRL_MAX_FL_SHIFT
;
402 if (fec
->xcv_type
!= SEVENWIRE
) /* xMII modes */
403 rcntrl
|= FEC_RCNTRL_FCE
| FEC_RCNTRL_MII_MODE
;
404 if (fec
->xcv_type
== RGMII
)
405 rcntrl
|= FEC_RCNTRL_RGMII
;
406 else if (fec
->xcv_type
== RMII
)
407 rcntrl
|= FEC_RCNTRL_RMII
;
409 writel(rcntrl
, &fec
->eth
->r_cntrl
);
413 * Start the FEC engine
414 * @param[in] dev Our device to handle
416 static int fec_open(struct eth_device
*edev
)
418 struct fec_priv
*fec
= (struct fec_priv
*)edev
->priv
;
423 debug("fec_open: fec_open(dev)\n");
424 /* full-duplex, heartbeat disabled */
425 writel(1 << 2, &fec
->eth
->x_cntrl
);
428 /* Invalidate all descriptors */
429 for (i
= 0; i
< FEC_RBD_NUM
- 1; i
++)
430 fec_rbd_clean(0, &fec
->rbd_base
[i
]);
431 fec_rbd_clean(1, &fec
->rbd_base
[i
]);
433 /* Flush the descriptors into RAM */
434 size
= roundup(FEC_RBD_NUM
* sizeof(struct fec_bd
),
436 addr
= (uint32_t)fec
->rbd_base
;
437 flush_dcache_range(addr
, addr
+ size
);
439 #ifdef FEC_QUIRK_ENET_MAC
440 /* Enable ENET HW endian SWAP */
441 writel(readl(&fec
->eth
->ecntrl
) | FEC_ECNTRL_DBSWAP
,
443 /* Enable ENET store and forward mode */
444 writel(readl(&fec
->eth
->x_wmrk
) | FEC_X_WMRK_STRFWD
,
448 * Enable FEC-Lite controller
450 writel(readl(&fec
->eth
->ecntrl
) | FEC_ECNTRL_ETHER_EN
,
452 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
455 * setup the MII gasket for RMII mode
458 /* disable the gasket */
459 writew(0, &fec
->eth
->miigsk_enr
);
461 /* wait for the gasket to be disabled */
462 while (readw(&fec
->eth
->miigsk_enr
) & MIIGSK_ENR_READY
)
465 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */
466 writew(MIIGSK_CFGR_IF_MODE_RMII
, &fec
->eth
->miigsk_cfgr
);
468 /* re-enable the gasket */
469 writew(MIIGSK_ENR_EN
, &fec
->eth
->miigsk_enr
);
471 /* wait until MII gasket is ready */
473 while ((readw(&fec
->eth
->miigsk_enr
) & MIIGSK_ENR_READY
) == 0) {
474 if (--max_loops
<= 0) {
475 printf("WAIT for MII Gasket ready timed out\n");
483 /* Start up the PHY */
484 int ret
= phy_startup(fec
->phydev
);
487 printf("Could not initialize PHY %s\n",
488 fec
->phydev
->dev
->name
);
491 speed
= fec
->phydev
->speed
;
494 miiphy_wait_aneg(edev
);
495 speed
= miiphy_speed(edev
->name
, fec
->phy_id
);
496 miiphy_duplex(edev
->name
, fec
->phy_id
);
499 #ifdef FEC_QUIRK_ENET_MAC
501 u32 ecr
= readl(&fec
->eth
->ecntrl
) & ~FEC_ECNTRL_SPEED
;
502 u32 rcr
= readl(&fec
->eth
->r_cntrl
) & ~FEC_RCNTRL_RMII_10T
;
503 if (speed
== _1000BASET
)
504 ecr
|= FEC_ECNTRL_SPEED
;
505 else if (speed
!= _100BASET
)
506 rcr
|= FEC_RCNTRL_RMII_10T
;
507 writel(ecr
, &fec
->eth
->ecntrl
);
508 writel(rcr
, &fec
->eth
->r_cntrl
);
511 debug("%s:Speed=%i\n", __func__
, speed
);
514 * Enable SmartDMA receive task
516 fec_rx_task_enable(fec
);
522 static int fec_init(struct eth_device
*dev
, bd_t
* bd
)
524 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
525 uint32_t mib_ptr
= (uint32_t)&fec
->eth
->rmon_t_drop
;
528 /* Initialize MAC address */
532 * Setup transmit descriptors, there are two in total.
536 /* Setup receive descriptors. */
537 fec_rbd_init(fec
, FEC_RBD_NUM
, FEC_MAX_PKT_SIZE
);
541 if (fec
->xcv_type
!= SEVENWIRE
)
542 fec_mii_setspeed(fec
->bus
->priv
);
545 * Set Opcode/Pause Duration Register
547 writel(0x00010020, &fec
->eth
->op_pause
); /* FIXME 0xffff0020; */
548 writel(0x2, &fec
->eth
->x_wmrk
);
550 * Set multicast address filter
552 writel(0x00000000, &fec
->eth
->gaddr1
);
553 writel(0x00000000, &fec
->eth
->gaddr2
);
556 /* Do not access reserved register for i.MX6UL */
557 if (!is_cpu_type(MXC_CPU_MX6UL
)) {
559 for (i
= mib_ptr
; i
<= mib_ptr
+ 0xfc; i
+= 4)
562 /* FIFO receive start register */
563 writel(0x520, &fec
->eth
->r_fstart
);
566 /* size and address of each buffer */
567 writel(FEC_MAX_PKT_SIZE
, &fec
->eth
->emrbr
);
568 writel((uint32_t)fec
->tbd_base
, &fec
->eth
->etdsr
);
569 writel((uint32_t)fec
->rbd_base
, &fec
->eth
->erdsr
);
571 #ifndef CONFIG_PHYLIB
572 if (fec
->xcv_type
!= SEVENWIRE
)
573 miiphy_restart_aneg(dev
);
580 * Halt the FEC engine
581 * @param[in] dev Our device to handle
583 static void fec_halt(struct eth_device
*dev
)
585 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
586 int counter
= 0xffff;
589 * issue graceful stop command to the FEC transmitter if necessary
591 writel(FEC_TCNTRL_GTS
| readl(&fec
->eth
->x_cntrl
),
594 debug("eth_halt: wait for stop regs\n");
596 * wait for graceful stop to register
598 while ((counter
--) && (!(readl(&fec
->eth
->ievent
) & FEC_IEVENT_GRA
)))
602 * Disable SmartDMA tasks
604 fec_tx_task_disable(fec
);
605 fec_rx_task_disable(fec
);
608 * Disable the Ethernet Controller
609 * Note: this will also reset the BD index counter!
611 writel(readl(&fec
->eth
->ecntrl
) & ~FEC_ECNTRL_ETHER_EN
,
615 debug("eth_halt: done\n");
620 * @param[in] dev Our ethernet device to handle
621 * @param[in] packet Pointer to the data to be transmitted
622 * @param[in] length Data count in bytes
623 * @return 0 on success
625 static int fec_send(struct eth_device
*dev
, void *packet
, int length
)
630 int timeout
= FEC_XFER_TIMEOUT
;
634 * This routine transmits one frame. This routine only accepts
635 * 6-byte Ethernet addresses.
637 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
640 * Check for valid length of data.
642 if ((length
> 1500) || (length
<= 0)) {
643 printf("Payload (%d) too large\n", length
);
648 * Setup the transmit buffer. We are always using the first buffer for
649 * transmission, the second will be empty and only used to stop the DMA
650 * engine. We also flush the packet to RAM here to avoid cache trouble.
652 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
653 swap_packet((uint32_t *)packet
, length
);
656 addr
= (uint32_t)packet
;
657 end
= roundup(addr
+ length
, ARCH_DMA_MINALIGN
);
658 addr
&= ~(ARCH_DMA_MINALIGN
- 1);
659 flush_dcache_range(addr
, end
);
661 writew(length
, &fec
->tbd_base
[fec
->tbd_index
].data_length
);
662 writel(addr
, &fec
->tbd_base
[fec
->tbd_index
].data_pointer
);
665 * update BD's status now
667 * - is always the last in a chain (means no chain)
668 * - should transmitt the CRC
669 * - might be the last BD in the list, so the address counter should
670 * wrap (-> keep the WRAP flag)
672 status
= readw(&fec
->tbd_base
[fec
->tbd_index
].status
) & FEC_TBD_WRAP
;
673 status
|= FEC_TBD_LAST
| FEC_TBD_TC
| FEC_TBD_READY
;
674 writew(status
, &fec
->tbd_base
[fec
->tbd_index
].status
);
677 * Flush data cache. This code flushes both TX descriptors to RAM.
678 * After this code, the descriptors will be safely in RAM and we
681 size
= roundup(2 * sizeof(struct fec_bd
), ARCH_DMA_MINALIGN
);
682 addr
= (uint32_t)fec
->tbd_base
;
683 flush_dcache_range(addr
, addr
+ size
);
686 * Below we read the DMA descriptor's last four bytes back from the
687 * DRAM. This is important in order to make sure that all WRITE
688 * operations on the bus that were triggered by previous cache FLUSH
691 * Otherwise, on MX28, it is possible to observe a corruption of the
692 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
693 * for the bus structure of MX28. The scenario is as follows:
695 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
696 * to DRAM due to flush_dcache_range()
697 * 2) ARM core writes the FEC registers via AHB_ARB2
698 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
700 * Note that 2) does sometimes finish before 1) due to reordering of
701 * WRITE accesses on the AHB bus, therefore triggering 3) before the
702 * DMA descriptor is fully written into DRAM. This results in occasional
703 * corruption of the DMA descriptor.
705 readl(addr
+ size
- 4);
708 * Enable SmartDMA transmit task
710 fec_tx_task_enable(fec
);
713 * Wait until frame is sent. On each turn of the wait cycle, we must
714 * invalidate data cache to see what's really in RAM. Also, we need
718 if (!(readl(&fec
->eth
->x_des_active
) & FEC_X_DES_ACTIVE_TDAR
))
728 * The TDAR bit is cleared when the descriptors are all out from TX
729 * but on mx6solox we noticed that the READY bit is still not cleared
731 * These are two distinct signals, and in IC simulation, we found that
732 * TDAR always gets cleared prior than the READY bit of last BD becomes
734 * In mx6solox, we use a later version of FEC IP. It looks like that
735 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
738 * Fix this by polling the READY bit of BD after the TDAR polling,
739 * which covers the mx6solox case and does not harm the other SoCs.
741 timeout
= FEC_XFER_TIMEOUT
;
743 invalidate_dcache_range(addr
, addr
+ size
);
744 if (!(readw(&fec
->tbd_base
[fec
->tbd_index
].status
) &
753 debug("fec_send: status 0x%x index %d ret %i\n",
754 readw(&fec
->tbd_base
[fec
->tbd_index
].status
),
755 fec
->tbd_index
, ret
);
756 /* for next transmission use the other buffer */
766 * Pull one frame from the card
767 * @param[in] dev Our ethernet device to handle
768 * @return Length of packet read
770 static int fec_recv(struct eth_device
*dev
)
772 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
773 struct fec_bd
*rbd
= &fec
->rbd_base
[fec
->rbd_index
];
774 unsigned long ievent
;
775 int frame_length
, len
= 0;
777 uint32_t addr
, size
, end
;
779 ALLOC_CACHE_ALIGN_BUFFER(uchar
, buff
, FEC_MAX_PKT_SIZE
);
782 * Check if any critical events have happened
784 ievent
= readl(&fec
->eth
->ievent
);
785 writel(ievent
, &fec
->eth
->ievent
);
786 debug("fec_recv: ievent 0x%lx\n", ievent
);
787 if (ievent
& FEC_IEVENT_BABR
) {
789 fec_init(dev
, fec
->bd
);
790 printf("some error: 0x%08lx\n", ievent
);
793 if (ievent
& FEC_IEVENT_HBERR
) {
794 /* Heartbeat error */
795 writel(0x00000001 | readl(&fec
->eth
->x_cntrl
),
798 if (ievent
& FEC_IEVENT_GRA
) {
799 /* Graceful stop complete */
800 if (readl(&fec
->eth
->x_cntrl
) & 0x00000001) {
802 writel(~0x00000001 & readl(&fec
->eth
->x_cntrl
),
804 fec_init(dev
, fec
->bd
);
809 * Read the buffer status. Before the status can be read, the data cache
810 * must be invalidated, because the data in RAM might have been changed
811 * by DMA. The descriptors are properly aligned to cachelines so there's
812 * no need to worry they'd overlap.
814 * WARNING: By invalidating the descriptor here, we also invalidate
815 * the descriptors surrounding this one. Therefore we can NOT change the
816 * contents of this descriptor nor the surrounding ones. The problem is
817 * that in order to mark the descriptor as processed, we need to change
818 * the descriptor. The solution is to mark the whole cache line when all
819 * descriptors in the cache line are processed.
821 addr
= (uint32_t)rbd
;
822 addr
&= ~(ARCH_DMA_MINALIGN
- 1);
823 size
= roundup(sizeof(struct fec_bd
), ARCH_DMA_MINALIGN
);
824 invalidate_dcache_range(addr
, addr
+ size
);
826 bd_status
= readw(&rbd
->status
);
827 debug("fec_recv: status 0x%x\n", bd_status
);
829 if (!(bd_status
& FEC_RBD_EMPTY
)) {
830 if ((bd_status
& FEC_RBD_LAST
) && !(bd_status
& FEC_RBD_ERR
) &&
831 ((readw(&rbd
->data_length
) - 4) > 14)) {
833 * Get buffer address and size
835 addr
= readl(&rbd
->data_pointer
);
836 frame_length
= readw(&rbd
->data_length
) - 4;
838 * Invalidate data cache over the buffer
840 end
= roundup(addr
+ frame_length
, ARCH_DMA_MINALIGN
);
841 addr
&= ~(ARCH_DMA_MINALIGN
- 1);
842 invalidate_dcache_range(addr
, end
);
845 * Fill the buffer and pass it to upper layers
847 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
848 swap_packet((uint32_t *)addr
, frame_length
);
850 memcpy(buff
, (char *)addr
, frame_length
);
851 net_process_received_packet(buff
, frame_length
);
854 if (bd_status
& FEC_RBD_ERR
)
855 printf("error frame: 0x%08x 0x%08x\n",
860 * Free the current buffer, restart the engine and move forward
861 * to the next buffer. Here we check if the whole cacheline of
862 * descriptors was already processed and if so, we mark it free
865 size
= RXDESC_PER_CACHELINE
- 1;
866 if ((fec
->rbd_index
& size
) == size
) {
867 i
= fec
->rbd_index
- size
;
868 addr
= (uint32_t)&fec
->rbd_base
[i
];
869 for (; i
<= fec
->rbd_index
; i
++) {
870 fec_rbd_clean(i
== (FEC_RBD_NUM
- 1),
873 flush_dcache_range(addr
,
874 addr
+ ARCH_DMA_MINALIGN
);
877 fec_rx_task_enable(fec
);
878 fec
->rbd_index
= (fec
->rbd_index
+ 1) % FEC_RBD_NUM
;
880 debug("fec_recv: stop\n");
885 static void fec_set_dev_name(char *dest
, int dev_id
)
887 sprintf(dest
, (dev_id
== -1) ? "FEC" : "FEC%i", dev_id
);
890 static int fec_alloc_descs(struct fec_priv
*fec
)
896 /* Allocate TX descriptors. */
897 size
= roundup(2 * sizeof(struct fec_bd
), ARCH_DMA_MINALIGN
);
898 fec
->tbd_base
= memalign(ARCH_DMA_MINALIGN
, size
);
902 /* Allocate RX descriptors. */
903 size
= roundup(FEC_RBD_NUM
* sizeof(struct fec_bd
), ARCH_DMA_MINALIGN
);
904 fec
->rbd_base
= memalign(ARCH_DMA_MINALIGN
, size
);
908 memset(fec
->rbd_base
, 0, size
);
910 /* Allocate RX buffers. */
912 /* Maximum RX buffer size. */
913 size
= roundup(FEC_MAX_PKT_SIZE
, FEC_DMA_RX_MINALIGN
);
914 for (i
= 0; i
< FEC_RBD_NUM
; i
++) {
915 data
= memalign(FEC_DMA_RX_MINALIGN
, size
);
917 printf("%s: error allocating rxbuf %d\n", __func__
, i
);
921 memset(data
, 0, size
);
923 fec
->rbd_base
[i
].data_pointer
= (uint32_t)data
;
924 fec
->rbd_base
[i
].status
= FEC_RBD_EMPTY
;
925 fec
->rbd_base
[i
].data_length
= 0;
926 /* Flush the buffer to memory. */
927 flush_dcache_range((uint32_t)data
, (uint32_t)data
+ size
);
930 /* Mark the last RBD to close the ring. */
931 fec
->rbd_base
[i
- 1].status
= FEC_RBD_WRAP
| FEC_RBD_EMPTY
;
940 free((void *)fec
->rbd_base
[i
].data_pointer
);
948 static void fec_free_descs(struct fec_priv
*fec
)
952 for (i
= 0; i
< FEC_RBD_NUM
; i
++)
953 free((void *)fec
->rbd_base
[i
].data_pointer
);
959 int fec_probe(bd_t
*bd
, int dev_id
, uint32_t base_addr
,
960 struct mii_dev
*bus
, struct phy_device
*phydev
)
962 static int fec_probe(bd_t
*bd
, int dev_id
, uint32_t base_addr
,
963 struct mii_dev
*bus
, int phy_id
)
966 struct eth_device
*edev
;
967 struct fec_priv
*fec
;
968 unsigned char ethaddr
[6];
972 /* create and fill edev struct */
973 edev
= (struct eth_device
*)malloc(sizeof(struct eth_device
));
975 puts("fec_mxc: not enough malloc memory for eth_device\n");
980 fec
= (struct fec_priv
*)malloc(sizeof(struct fec_priv
));
982 puts("fec_mxc: not enough malloc memory for fec_priv\n");
987 memset(edev
, 0, sizeof(*edev
));
988 memset(fec
, 0, sizeof(*fec
));
990 ret
= fec_alloc_descs(fec
);
995 edev
->init
= fec_init
;
996 edev
->send
= fec_send
;
997 edev
->recv
= fec_recv
;
998 edev
->halt
= fec_halt
;
999 edev
->write_hwaddr
= fec_set_hwaddr
;
1001 fec
->eth
= (struct ethernet_regs
*)base_addr
;
1004 fec
->xcv_type
= CONFIG_FEC_XCV_TYPE
;
1007 writel(readl(&fec
->eth
->ecntrl
) | FEC_ECNTRL_RESET
, &fec
->eth
->ecntrl
);
1008 start
= get_timer(0);
1009 while (readl(&fec
->eth
->ecntrl
) & FEC_ECNTRL_RESET
) {
1010 if (get_timer(start
) > (CONFIG_SYS_HZ
* 5)) {
1011 printf("FEC MXC: Timeout reseting chip\n");
1018 fec_set_dev_name(edev
->name
, dev_id
);
1019 fec
->dev_id
= (dev_id
== -1) ? 0 : dev_id
;
1021 fec_mii_setspeed(bus
->priv
);
1022 #ifdef CONFIG_PHYLIB
1023 fec
->phydev
= phydev
;
1024 phy_connect_dev(phydev
, edev
);
1028 fec
->phy_id
= phy_id
;
1032 if (fec_get_hwaddr(edev
, dev_id
, ethaddr
) == 0) {
1033 debug("got MAC%d address from fuse: %pM\n", dev_id
, ethaddr
);
1034 memcpy(edev
->enetaddr
, ethaddr
, 6);
1035 if (!getenv("ethaddr"))
1036 eth_setenv_enetaddr("ethaddr", ethaddr
);
1040 fec_free_descs(fec
);
1049 struct mii_dev
*fec_get_miibus(uint32_t base_addr
, int dev_id
)
1051 struct ethernet_regs
*eth
= (struct ethernet_regs
*)base_addr
;
1052 struct mii_dev
*bus
;
1057 printf("mdio_alloc failed\n");
1060 bus
->read
= fec_phy_read
;
1061 bus
->write
= fec_phy_write
;
1063 fec_set_dev_name(bus
->name
, dev_id
);
1065 ret
= mdio_register(bus
);
1067 printf("mdio_register failed\n");
1071 fec_mii_setspeed(eth
);
1075 int fecmxc_initialize_multi(bd_t
*bd
, int dev_id
, int phy_id
, uint32_t addr
)
1078 struct mii_dev
*bus
= NULL
;
1079 #ifdef CONFIG_PHYLIB
1080 struct phy_device
*phydev
= NULL
;
1086 * The i.MX28 has two ethernet interfaces, but they are not equal.
1087 * Only the first one can access the MDIO bus.
1089 base_mii
= MXS_ENET0_BASE
;
1093 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id
, phy_id
, addr
);
1094 bus
= fec_get_miibus(base_mii
, dev_id
);
1097 #ifdef CONFIG_PHYLIB
1098 phydev
= phy_find_by_mask(bus
, 1 << phy_id
, PHY_INTERFACE_MODE_RGMII
);
1103 ret
= fec_probe(bd
, dev_id
, addr
, bus
, phydev
);
1105 ret
= fec_probe(bd
, dev_id
, addr
, bus
, phy_id
);
1108 #ifdef CONFIG_PHYLIB
1116 #ifdef CONFIG_FEC_MXC_PHYADDR
1117 int fecmxc_initialize(bd_t
*bd
)
1119 return fecmxc_initialize_multi(bd
, -1, CONFIG_FEC_MXC_PHYADDR
,
1124 #ifndef CONFIG_PHYLIB
1125 int fecmxc_register_mii_postcall(struct eth_device
*dev
, int (*cb
)(int))
1127 struct fec_priv
*fec
= (struct fec_priv
*)dev
->priv
;
1128 fec
->mii_postcall
= cb
;