2 * Copyright 2014 Broadcom Corporation.
4 * SPDX-License-Identifier: GPL-2.0+
20 #include "bcm-sf2-eth.h"
21 #include "bcm-sf2-eth-gmac.h"
23 #define SPINWAIT(exp, us) { \
24 uint countdown = (us) + 9; \
25 while ((exp) && (countdown >= 10)) {\
31 static int gmac_disable_dma(struct eth_dma
*dma
, int dir
);
32 static int gmac_enable_dma(struct eth_dma
*dma
, int dir
);
36 /* misc control bits */
38 /* buffer count and address extension */
40 /* memory address of the date buffer, bits 31:0 */
42 /* memory address of the date buffer, bits 63:32 */
46 uint32_t g_dmactrlflags
;
48 static uint32_t dma_ctrlflags(uint32_t mask
, uint32_t flags
)
50 debug("%s enter\n", __func__
);
52 g_dmactrlflags
&= ~mask
;
53 g_dmactrlflags
|= flags
;
55 /* If trying to enable parity, check if parity is actually supported */
56 if (g_dmactrlflags
& DMA_CTRL_PEN
) {
59 control
= readl(GMAC0_DMA_TX_CTRL_ADDR
);
60 writel(control
| D64_XC_PD
, GMAC0_DMA_TX_CTRL_ADDR
);
61 if (readl(GMAC0_DMA_TX_CTRL_ADDR
) & D64_XC_PD
) {
63 * We *can* disable it, therefore it is supported;
64 * restore control register
66 writel(control
, GMAC0_DMA_TX_CTRL_ADDR
);
68 /* Not supported, don't allow it to be enabled */
69 g_dmactrlflags
&= ~DMA_CTRL_PEN
;
73 return g_dmactrlflags
;
76 static inline void reg32_clear_bits(uint32_t reg
, uint32_t value
)
78 uint32_t v
= readl(reg
);
83 static inline void reg32_set_bits(uint32_t reg
, uint32_t value
)
85 uint32_t v
= readl(reg
);
91 static void dma_tx_dump(struct eth_dma
*dma
)
93 dma64dd_t
*descp
= NULL
;
97 printf("TX DMA Register:\n");
98 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
99 readl(GMAC0_DMA_TX_CTRL_ADDR
),
100 readl(GMAC0_DMA_TX_PTR_ADDR
),
101 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR
),
102 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR
),
103 readl(GMAC0_DMA_TX_STATUS0_ADDR
),
104 readl(GMAC0_DMA_TX_STATUS1_ADDR
));
106 printf("TX Descriptors:\n");
107 for (i
= 0; i
< TX_BUF_NUM
; i
++) {
108 descp
= (dma64dd_t
*)(dma
->tx_desc_aligned
) + i
;
109 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
110 descp
->ctrl1
, descp
->ctrl2
,
111 descp
->addrhigh
, descp
->addrlow
);
114 printf("TX Buffers:\n");
115 /* Initialize TX DMA descriptor table */
116 for (i
= 0; i
< TX_BUF_NUM
; i
++) {
117 bufp
= (uint8_t *)(dma
->tx_buf
+ i
* TX_BUF_SIZE
);
118 printf("buf%d:0x%x; ", i
, (uint32_t)bufp
);
123 static void dma_rx_dump(struct eth_dma
*dma
)
125 dma64dd_t
*descp
= NULL
;
129 printf("RX DMA Register:\n");
130 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
131 readl(GMAC0_DMA_RX_CTRL_ADDR
),
132 readl(GMAC0_DMA_RX_PTR_ADDR
),
133 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR
),
134 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR
),
135 readl(GMAC0_DMA_RX_STATUS0_ADDR
),
136 readl(GMAC0_DMA_RX_STATUS1_ADDR
));
138 printf("RX Descriptors:\n");
139 for (i
= 0; i
< RX_BUF_NUM
; i
++) {
140 descp
= (dma64dd_t
*)(dma
->rx_desc_aligned
) + i
;
141 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
142 descp
->ctrl1
, descp
->ctrl2
,
143 descp
->addrhigh
, descp
->addrlow
);
146 printf("RX Buffers:\n");
147 for (i
= 0; i
< RX_BUF_NUM
; i
++) {
148 bufp
= dma
->rx_buf
+ i
* RX_BUF_SIZE
;
149 printf("buf%d:0x%x; ", i
, (uint32_t)bufp
);
155 static int dma_tx_init(struct eth_dma
*dma
)
157 dma64dd_t
*descp
= NULL
;
162 debug("%s enter\n", __func__
);
164 /* clear descriptor memory */
165 memset((void *)(dma
->tx_desc_aligned
), 0,
166 TX_BUF_NUM
* sizeof(dma64dd_t
));
167 memset(dma
->tx_buf
, 0, TX_BUF_NUM
* TX_BUF_SIZE
);
169 /* Initialize TX DMA descriptor table */
170 for (i
= 0; i
< TX_BUF_NUM
; i
++) {
171 descp
= (dma64dd_t
*)(dma
->tx_desc_aligned
) + i
;
172 bufp
= dma
->tx_buf
+ i
* TX_BUF_SIZE
;
173 /* clear buffer memory */
174 memset((void *)bufp
, 0, TX_BUF_SIZE
);
177 /* if last descr set endOfTable */
178 if (i
== (TX_BUF_NUM
-1))
179 ctrl
= D64_CTRL1_EOT
;
182 descp
->addrlow
= (uint32_t)bufp
;
186 /* flush descriptor and buffer */
187 descp
= dma
->tx_desc_aligned
;
189 flush_dcache_range((unsigned long)descp
,
190 (unsigned long)(descp
+
191 sizeof(dma64dd_t
) * TX_BUF_NUM
));
192 flush_dcache_range((unsigned long)(bufp
),
193 (unsigned long)(bufp
+ TX_BUF_SIZE
* TX_BUF_NUM
));
195 /* initialize the DMA channel */
196 writel((uint32_t)(dma
->tx_desc_aligned
), GMAC0_DMA_TX_ADDR_LOW_ADDR
);
197 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR
);
199 /* now update the dma last descriptor */
200 writel(((uint32_t)(dma
->tx_desc_aligned
)) & D64_XP_LD_MASK
,
201 GMAC0_DMA_TX_PTR_ADDR
);
206 static int dma_rx_init(struct eth_dma
*dma
)
209 dma64dd_t
*descp
= NULL
;
214 debug("%s enter\n", __func__
);
216 /* clear descriptor memory */
217 memset((void *)(dma
->rx_desc_aligned
), 0,
218 RX_BUF_NUM
* sizeof(dma64dd_t
));
219 /* clear buffer memory */
220 memset(dma
->rx_buf
, 0, RX_BUF_NUM
* RX_BUF_SIZE
);
222 /* Initialize RX DMA descriptor table */
223 for (i
= 0; i
< RX_BUF_NUM
; i
++) {
224 descp
= (dma64dd_t
*)(dma
->rx_desc_aligned
) + i
;
225 bufp
= dma
->rx_buf
+ i
* RX_BUF_SIZE
;
227 /* if last descr set endOfTable */
228 if (i
== (RX_BUF_NUM
- 1))
229 ctrl
= D64_CTRL1_EOT
;
231 descp
->ctrl2
= RX_BUF_SIZE
;
232 descp
->addrlow
= (uint32_t)bufp
;
235 last_desc
= ((uint32_t)(descp
) & D64_XP_LD_MASK
)
239 descp
= dma
->rx_desc_aligned
;
241 /* flush descriptor and buffer */
242 flush_dcache_range((unsigned long)descp
,
243 (unsigned long)(descp
+
244 sizeof(dma64dd_t
) * RX_BUF_NUM
));
245 flush_dcache_range((unsigned long)(bufp
),
246 (unsigned long)(bufp
+ RX_BUF_SIZE
* RX_BUF_NUM
));
248 /* initailize the DMA channel */
249 writel((uint32_t)descp
, GMAC0_DMA_RX_ADDR_LOW_ADDR
);
250 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR
);
252 /* now update the dma last descriptor */
253 writel(last_desc
, GMAC0_DMA_RX_PTR_ADDR
);
258 static int dma_init(struct eth_dma
*dma
)
260 debug(" %s enter\n", __func__
);
263 * Default flags: For backwards compatibility both
264 * Rx Overflow Continue and Parity are DISABLED.
266 dma_ctrlflags(DMA_CTRL_ROC
| DMA_CTRL_PEN
, 0);
268 debug("rx burst len 0x%x\n",
269 (readl(GMAC0_DMA_RX_CTRL_ADDR
) & D64_RC_BL_MASK
)
271 debug("tx burst len 0x%x\n",
272 (readl(GMAC0_DMA_TX_CTRL_ADDR
) & D64_XC_BL_MASK
)
278 /* From end of chip_init() */
279 /* enable the overflow continue feature and disable parity */
280 dma_ctrlflags(DMA_CTRL_ROC
| DMA_CTRL_PEN
/* mask */,
281 DMA_CTRL_ROC
/* value */);
286 static int dma_deinit(struct eth_dma
*dma
)
288 debug(" %s enter\n", __func__
);
290 gmac_disable_dma(dma
, MAC_DMA_RX
);
291 gmac_disable_dma(dma
, MAC_DMA_TX
);
297 dma
->tx_desc_aligned
= NULL
;
303 dma
->rx_desc_aligned
= NULL
;
308 int gmac_tx_packet(struct eth_dma
*dma
, void *packet
, int length
)
310 uint8_t *bufp
= dma
->tx_buf
+ dma
->cur_tx_index
* TX_BUF_SIZE
;
312 /* kick off the dma */
314 int txout
= dma
->cur_tx_index
;
316 dma64dd_t
*descp
= NULL
;
318 uint32_t last_desc
= (((uint32_t)dma
->tx_desc_aligned
) +
319 sizeof(dma64dd_t
)) & D64_XP_LD_MASK
;
322 debug("%s enter\n", __func__
);
324 /* load the buffer */
325 memcpy(bufp
, packet
, len
);
327 /* Add 4 bytes for Ethernet FCS/CRC */
330 ctrl
= (buflen
& D64_CTRL2_BC_MASK
);
332 /* the transmit will only be one frame or set SOF, EOF */
333 /* also set int on completion */
334 flags
= D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
;
336 /* txout points to the descriptor to uset */
337 /* if last descriptor then set EOT */
338 if (txout
== (TX_BUF_NUM
- 1)) {
339 flags
|= D64_CTRL1_EOT
;
340 last_desc
= ((uint32_t)(dma
->tx_desc_aligned
)) & D64_XP_LD_MASK
;
343 /* write the descriptor */
344 descp
= ((dma64dd_t
*)(dma
->tx_desc_aligned
)) + txout
;
345 descp
->addrlow
= (uint32_t)bufp
;
347 descp
->ctrl1
= flags
;
350 /* flush descriptor and buffer */
351 flush_dcache_range((unsigned long)descp
,
352 (unsigned long)(descp
+ sizeof(dma64dd_t
)));
353 flush_dcache_range((unsigned long)bufp
,
354 (unsigned long)(bufp
+ TX_BUF_SIZE
));
356 /* now update the dma last descriptor */
357 writel(last_desc
, GMAC0_DMA_TX_PTR_ADDR
);
359 /* tx dma should be enabled so packet should go out */
362 dma
->cur_tx_index
= (txout
+ 1) & (TX_BUF_NUM
- 1);
367 bool gmac_check_tx_done(struct eth_dma
*dma
)
369 /* wait for tx to complete */
371 bool xfrdone
= false;
373 debug("%s enter\n", __func__
);
375 intstatus
= readl(GMAC0_INT_STATUS_ADDR
);
377 debug("int(0x%x)\n", intstatus
);
378 if (intstatus
& (I_XI0
| I_XI1
| I_XI2
| I_XI3
)) {
380 /* clear the int bits */
381 intstatus
&= ~(I_XI0
| I_XI1
| I_XI2
| I_XI3
);
382 writel(intstatus
, GMAC0_INT_STATUS_ADDR
);
384 debug("Tx int(0x%x)\n", intstatus
);
390 int gmac_check_rx_done(struct eth_dma
*dma
, uint8_t *buf
)
393 size_t rcvlen
= 0, buflen
= 0;
394 uint32_t stat0
= 0, stat1
= 0;
395 uint32_t control
, offset
;
396 uint8_t statbuf
[HWRXOFF
*2];
398 int index
, curr
, active
;
399 dma64dd_t
*descp
= NULL
;
404 * this api will check if a packet has been received.
405 * If so it will return the address of the buffer and current
406 * descriptor index will be incremented to the
407 * next descriptor. Once done with the frame the buffer should be
408 * added back onto the descriptor and the lastdscr should be updated
409 * to this descriptor.
411 index
= dma
->cur_rx_index
;
412 offset
= (uint32_t)(dma
->rx_desc_aligned
);
413 stat0
= readl(GMAC0_DMA_RX_STATUS0_ADDR
) & D64_RS0_CD_MASK
;
414 stat1
= readl(GMAC0_DMA_RX_STATUS1_ADDR
) & D64_RS0_CD_MASK
;
415 curr
= ((stat0
- offset
) & D64_RS0_CD_MASK
) / sizeof(dma64dd_t
);
416 active
= ((stat1
- offset
) & D64_RS0_CD_MASK
) / sizeof(dma64dd_t
);
418 /* check if any frame */
422 debug("received packet\n");
423 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index
, curr
, active
);
428 /* get the packet pointer that corresponds to the rx descriptor */
429 bufp
= dma
->rx_buf
+ index
* RX_BUF_SIZE
;
431 descp
= (dma64dd_t
*)(dma
->rx_desc_aligned
) + index
;
432 /* flush descriptor and buffer */
433 flush_dcache_range((unsigned long)descp
,
434 (unsigned long)(descp
+ sizeof(dma64dd_t
)));
435 flush_dcache_range((unsigned long)bufp
,
436 (unsigned long)(bufp
+ RX_BUF_SIZE
));
438 buflen
= (descp
->ctrl2
& D64_CTRL2_BC_MASK
);
440 stat0
= readl(GMAC0_DMA_RX_STATUS0_ADDR
);
441 stat1
= readl(GMAC0_DMA_RX_STATUS1_ADDR
);
443 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
444 (uint32_t)bufp
, index
, buflen
, stat0
, stat1
);
446 dma
->cur_rx_index
= (index
+ 1) & (RX_BUF_NUM
- 1);
448 /* get buffer offset */
449 control
= readl(GMAC0_DMA_RX_CTRL_ADDR
);
450 offset
= (control
& D64_RC_RO_MASK
) >> D64_RC_RO_SHIFT
;
451 rcvlen
= *(uint16_t *)bufp
;
453 debug("Received %d bytes\n", rcvlen
);
454 /* copy status into temp buf then copy data from rx buffer */
455 memcpy(statbuf
, bufp
, offset
);
456 datap
= (void *)((uint32_t)bufp
+ offset
);
457 memcpy(buf
, datap
, rcvlen
);
459 /* update descriptor that is being added back on ring */
460 descp
->ctrl2
= RX_BUF_SIZE
;
461 descp
->addrlow
= (uint32_t)bufp
;
463 /* flush descriptor */
464 flush_dcache_range((unsigned long)descp
,
465 (unsigned long)(descp
+ sizeof(dma64dd_t
)));
467 /* set the lastdscr for the rx ring */
468 writel(((uint32_t)descp
) & D64_XP_LD_MASK
, GMAC0_DMA_RX_PTR_ADDR
);
473 static int gmac_disable_dma(struct eth_dma
*dma
, int dir
)
477 debug("%s enter\n", __func__
);
479 if (dir
== MAC_DMA_TX
) {
480 /* address PR8249/PR7577 issue */
481 /* suspend tx DMA first */
482 writel(D64_XC_SE
, GMAC0_DMA_TX_CTRL_ADDR
);
483 SPINWAIT(((status
= (readl(GMAC0_DMA_TX_STATUS0_ADDR
) &
485 D64_XS0_XS_DISABLED
) &&
486 (status
!= D64_XS0_XS_IDLE
) &&
487 (status
!= D64_XS0_XS_STOPPED
), 10000);
490 * PR2414 WAR: DMA engines are not disabled until
493 writel(0, GMAC0_DMA_TX_CTRL_ADDR
);
494 SPINWAIT(((status
= (readl(GMAC0_DMA_TX_STATUS0_ADDR
) &
496 D64_XS0_XS_DISABLED
), 10000);
498 /* wait for the last transaction to complete */
501 status
= (status
== D64_XS0_XS_DISABLED
);
504 * PR2414 WAR: DMA engines are not disabled until
507 writel(0, GMAC0_DMA_RX_CTRL_ADDR
);
508 SPINWAIT(((status
= (readl(GMAC0_DMA_RX_STATUS0_ADDR
) &
510 D64_RS0_RS_DISABLED
), 10000);
512 status
= (status
== D64_RS0_RS_DISABLED
);
518 static int gmac_enable_dma(struct eth_dma
*dma
, int dir
)
522 debug("%s enter\n", __func__
);
524 if (dir
== MAC_DMA_TX
) {
525 dma
->cur_tx_index
= 0;
528 * These bits 20:18 (burstLen) of control register can be
529 * written but will take effect only if these bits are
530 * valid. So this will not affect previous versions
531 * of the DMA. They will continue to have those bits set to 0.
533 control
= readl(GMAC0_DMA_TX_CTRL_ADDR
);
535 control
|= D64_XC_XE
;
536 if ((g_dmactrlflags
& DMA_CTRL_PEN
) == 0)
537 control
|= D64_XC_PD
;
539 writel(control
, GMAC0_DMA_TX_CTRL_ADDR
);
541 /* initailize the DMA channel */
542 writel((uint32_t)(dma
->tx_desc_aligned
),
543 GMAC0_DMA_TX_ADDR_LOW_ADDR
);
544 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR
);
546 dma
->cur_rx_index
= 0;
548 control
= (readl(GMAC0_DMA_RX_CTRL_ADDR
) &
549 D64_RC_AE
) | D64_RC_RE
;
551 if ((g_dmactrlflags
& DMA_CTRL_PEN
) == 0)
552 control
|= D64_RC_PD
;
554 if (g_dmactrlflags
& DMA_CTRL_ROC
)
555 control
|= D64_RC_OC
;
558 * These bits 20:18 (burstLen) of control register can be
559 * written but will take effect only if these bits are
560 * valid. So this will not affect previous versions
561 * of the DMA. They will continue to have those bits set to 0.
563 control
&= ~D64_RC_BL_MASK
;
564 /* Keep default Rx burstlen */
565 control
|= readl(GMAC0_DMA_RX_CTRL_ADDR
) & D64_RC_BL_MASK
;
566 control
|= HWRXOFF
<< D64_RC_RO_SHIFT
;
568 writel(control
, GMAC0_DMA_RX_CTRL_ADDR
);
571 * the rx descriptor ring should have
572 * the addresses set properly;
573 * set the lastdscr for the rx ring
575 writel(((uint32_t)(dma
->rx_desc_aligned
) +
576 (RX_BUF_NUM
- 1) * RX_BUF_SIZE
) &
577 D64_XP_LD_MASK
, GMAC0_DMA_RX_PTR_ADDR
);
583 bool gmac_mii_busywait(unsigned int timeout
)
587 while (timeout
> 10) {
588 tmp
= readl(GMAC_MII_CTRL_ADDR
);
589 if (tmp
& (1 << GMAC_MII_BUSY_SHIFT
)) {
596 return tmp
& (1 << GMAC_MII_BUSY_SHIFT
);
599 int gmac_miiphy_read(struct mii_dev
*bus
, int phyaddr
, int devad
, int reg
)
604 /* Busy wait timeout is 1ms */
605 if (gmac_mii_busywait(1000)) {
606 error("%s: Prepare MII read: MII/MDIO busy\n", __func__
);
611 tmp
= GMAC_MII_DATA_READ_CMD
;
612 tmp
|= (phyaddr
<< GMAC_MII_PHY_ADDR_SHIFT
) |
613 (reg
<< GMAC_MII_PHY_REG_SHIFT
);
614 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp
, phyaddr
, reg
);
615 writel(tmp
, GMAC_MII_DATA_ADDR
);
617 if (gmac_mii_busywait(1000)) {
618 error("%s: MII read failure: MII/MDIO busy\n", __func__
);
622 value
= readl(GMAC_MII_DATA_ADDR
) & 0xffff;
623 debug("MII read data 0x%x\n", value
);
627 int gmac_miiphy_write(struct mii_dev
*bus
, int phyaddr
, int devad
, int reg
,
632 /* Busy wait timeout is 1ms */
633 if (gmac_mii_busywait(1000)) {
634 error("%s: Prepare MII write: MII/MDIO busy\n", __func__
);
638 /* Write operation */
639 tmp
= GMAC_MII_DATA_WRITE_CMD
| (value
& 0xffff);
640 tmp
|= ((phyaddr
<< GMAC_MII_PHY_ADDR_SHIFT
) |
641 (reg
<< GMAC_MII_PHY_REG_SHIFT
));
642 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
643 tmp
, phyaddr
, reg
, value
);
644 writel(tmp
, GMAC_MII_DATA_ADDR
);
646 if (gmac_mii_busywait(1000)) {
647 error("%s: MII write failure: MII/MDIO busy\n", __func__
);
654 void gmac_init_reset(void)
656 debug("%s enter\n", __func__
);
658 /* set command config reg CC_SR */
659 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR
, CC_SR
);
660 udelay(GMAC_RESET_DELAY
);
663 void gmac_clear_reset(void)
665 debug("%s enter\n", __func__
);
667 /* clear command config reg CC_SR */
668 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR
, CC_SR
);
669 udelay(GMAC_RESET_DELAY
);
672 static void gmac_enable_local(bool en
)
676 debug("%s enter\n", __func__
);
678 /* read command config reg */
679 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
681 /* put mac in reset */
686 /* first deassert rx_ena and tx_ena while in reset */
687 cmdcfg
&= ~(CC_RE
| CC_TE
);
688 /* write command config reg */
689 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
691 /* bring mac out of reset */
694 /* if not enable exit now */
698 /* enable the mac transmit and receive paths now */
701 cmdcfg
|= (CC_RE
| CC_TE
);
703 /* assert rx_ena and tx_ena when out of reset to enable the mac */
704 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
709 int gmac_enable(void)
711 gmac_enable_local(1);
713 /* clear interrupts */
714 writel(I_INTMASK
, GMAC0_INT_STATUS_ADDR
);
718 int gmac_disable(void)
720 gmac_enable_local(0);
724 int gmac_set_speed(int speed
, int duplex
)
730 hd_ena
= duplex
? 0 : CC_HD
;
733 } else if (speed
== 100) {
735 } else if (speed
== 10) {
738 error("%s: Invalid GMAC speed(%d)!\n", __func__
, speed
);
742 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
743 cmdcfg
&= ~(CC_ES_MASK
| CC_HD
);
744 cmdcfg
|= ((speed_cfg
<< CC_ES_SHIFT
) | hd_ena
);
746 printf("Change GMAC speed to %dMB\n", speed
);
747 debug("GMAC speed cfg 0x%x\n", cmdcfg
);
748 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
753 int gmac_set_mac_addr(unsigned char *mac
)
755 /* set our local address */
756 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
757 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
758 writel(htonl(*(uint32_t *)mac
), UNIMAC0_MAC_MSB_ADDR
);
759 writew(htons(*(uint32_t *)&mac
[4]), UNIMAC0_MAC_LSB_ADDR
);
764 int gmac_mac_init(struct eth_device
*dev
)
766 struct eth_info
*eth
= (struct eth_info
*)(dev
->priv
);
767 struct eth_dma
*dma
= &(eth
->dma
);
773 debug("%s enter\n", __func__
);
775 /* Always use GMAC0 */
776 printf("Using GMAC%d\n", 0);
778 /* Reset AMAC0 core */
779 writel(0, AMAC0_IDM_RESET_ADDR
);
780 tmp
= readl(AMAC0_IO_CTRL_DIRECT_ADDR
);
782 tmp
&= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT
);
783 tmp
|= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT
);
785 tmp
&= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT
);
786 writel(tmp
, AMAC0_IO_CTRL_DIRECT_ADDR
);
790 * As AMAC is just reset, NO need?
791 * set eth_data into loopback mode to ensure no rx traffic
792 * gmac_loopback(eth_data, TRUE);
793 * ET_TRACE(("%s gmac loopback\n", __func__));
797 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
798 cmdcfg
&= ~(CC_TE
| CC_RE
| CC_RPI
| CC_TAI
| CC_HD
| CC_ML
|
799 CC_CFE
| CC_RL
| CC_RED
| CC_PE
| CC_TPI
|
801 cmdcfg
|= (CC_PROM
| CC_NLC
| CC_CFE
);
802 /* put mac in reset */
804 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
807 /* enable clear MIB on read */
808 reg32_set_bits(GMAC0_DEV_CTRL_ADDR
, DC_MROR
);
809 /* PHY: set smi_master to drive mdc_clk */
810 reg32_set_bits(GMAC0_PHY_CTRL_ADDR
, PC_MTE
);
812 /* clear persistent sw intstatus */
813 writel(0, GMAC0_INT_STATUS_ADDR
);
815 if (dma_init(dma
) < 0) {
816 error("%s: GMAC dma_init failed\n", __func__
);
821 printf("%s: Chip ID: 0x%x\n", __func__
, chipid
);
823 /* set switch bypass mode */
824 tmp
= readl(SWITCH_GLOBAL_CONFIG_ADDR
);
825 tmp
|= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT
);
828 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
830 writel(tmp
, SWITCH_GLOBAL_CONFIG_ADDR
);
832 tmp
= readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR
);
833 tmp
&= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT
);
834 writel(tmp
, CRMU_CHIP_IO_PAD_CONTROL_ADDR
);
836 /* Set MDIO to internal GPHY */
837 tmp
= readl(GMAC_MII_CTRL_ADDR
);
838 /* Select internal MDC/MDIO bus*/
839 tmp
&= ~(1 << GMAC_MII_CTRL_BYP_SHIFT
);
840 /* select MDC/MDIO connecting to on-chip internal PHYs */
841 tmp
&= ~(1 << GMAC_MII_CTRL_EXT_SHIFT
);
843 * give bit[6:0](MDCDIV) with required divisor to set
844 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
848 writel(tmp
, GMAC_MII_CTRL_ADDR
);
850 if (gmac_mii_busywait(1000)) {
851 error("%s: Configure MDIO: MII/MDIO busy\n", __func__
);
855 /* Configure GMAC0 */
856 /* enable one rx interrupt per received frame */
857 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT
, GMAC0_INTR_RECV_LAZY_ADDR
);
859 /* read command config reg */
860 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
861 /* enable 802.3x tx flow control (honor received PAUSE frames) */
863 /* enable promiscuous mode */
865 /* Disable loopback mode */
868 cmdcfg
&= ~(CC_ES_MASK
| CC_HD
);
869 /* Set to 1Gbps and full duplex by default */
870 cmdcfg
|= (2 << CC_ES_SHIFT
);
872 /* put mac in reset */
875 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
876 /* bring mac out of reset */
879 /* set max frame lengths; account for possible vlan tag */
880 writel(PKTSIZE
+ 32, UNIMAC0_FRM_LENGTH_ADDR
);
889 int gmac_add(struct eth_device
*dev
)
891 struct eth_info
*eth
= (struct eth_info
*)(dev
->priv
);
892 struct eth_dma
*dma
= &(eth
->dma
);
896 * Desc has to be 16-byte aligned ?
897 * If it is 8-byte aligned by malloc, fail Tx
899 tmp
= malloc(sizeof(dma64dd_t
) * TX_BUF_NUM
+ 8);
901 printf("%s: Failed to allocate TX desc Buffer\n", __func__
);
905 dma
->tx_desc
= (void *)tmp
;
906 dma
->tx_desc_aligned
= (void *)(((uint32_t)tmp
) & (~0xf));
907 debug("TX Descriptor Buffer: %p; length: 0x%x\n",
908 dma
->tx_desc_aligned
, sizeof(dma64dd_t
) * TX_BUF_NUM
);
910 tmp
= malloc(TX_BUF_SIZE
* TX_BUF_NUM
);
912 printf("%s: Failed to allocate TX Data Buffer\n", __func__
);
916 dma
->tx_buf
= (uint8_t *)tmp
;
917 debug("TX Data Buffer: %p; length: 0x%x\n",
918 dma
->tx_buf
, TX_BUF_SIZE
* TX_BUF_NUM
);
920 /* Desc has to be 16-byte aligned ? */
921 tmp
= malloc(sizeof(dma64dd_t
) * RX_BUF_NUM
+ 8);
923 printf("%s: Failed to allocate RX Descriptor\n", __func__
);
929 dma
->rx_desc_aligned
= (void *)(((uint32_t)tmp
) & (~0xf));
930 debug("RX Descriptor Buffer: %p, length: 0x%x\n",
931 dma
->rx_desc_aligned
, sizeof(dma64dd_t
) * RX_BUF_NUM
);
933 tmp
= malloc(RX_BUF_SIZE
* RX_BUF_NUM
);
935 printf("%s: Failed to allocate RX Data Buffer\n", __func__
);
942 debug("RX Data Buffer: %p; length: 0x%x\n",
943 dma
->rx_buf
, RX_BUF_SIZE
* RX_BUF_NUM
);
947 eth
->phy_interface
= PHY_INTERFACE_MODE_GMII
;
949 dma
->tx_packet
= gmac_tx_packet
;
950 dma
->check_tx_done
= gmac_check_tx_done
;
952 dma
->check_rx_done
= gmac_check_rx_done
;
954 dma
->enable_dma
= gmac_enable_dma
;
955 dma
->disable_dma
= gmac_disable_dma
;
957 eth
->miiphy_read
= gmac_miiphy_read
;
958 eth
->miiphy_write
= gmac_miiphy_write
;
960 eth
->mac_init
= gmac_mac_init
;
961 eth
->disable_mac
= gmac_disable
;
962 eth
->enable_mac
= gmac_enable
;
963 eth
->set_mac_addr
= gmac_set_mac_addr
;
964 eth
->set_mac_speed
= gmac_set_speed
;