2 * Copyright 2014-2017 Broadcom.
4 * SPDX-License-Identifier: GPL-2.0+
20 #include "bcm-sf2-eth.h"
21 #include "bcm-sf2-eth-gmac.h"
23 #define SPINWAIT(exp, us) { \
24 uint countdown = (us) + 9; \
25 while ((exp) && (countdown >= 10)) {\
31 #define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
32 #define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
33 #define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
35 static int gmac_disable_dma(struct eth_dma
*dma
, int dir
);
36 static int gmac_enable_dma(struct eth_dma
*dma
, int dir
);
40 /* misc control bits */
42 /* buffer count and address extension */
44 /* memory address of the date buffer, bits 31:0 */
46 /* memory address of the date buffer, bits 63:32 */
50 uint32_t g_dmactrlflags
;
52 static uint32_t dma_ctrlflags(uint32_t mask
, uint32_t flags
)
54 debug("%s enter\n", __func__
);
56 g_dmactrlflags
&= ~mask
;
57 g_dmactrlflags
|= flags
;
59 /* If trying to enable parity, check if parity is actually supported */
60 if (g_dmactrlflags
& DMA_CTRL_PEN
) {
63 control
= readl(GMAC0_DMA_TX_CTRL_ADDR
);
64 writel(control
| D64_XC_PD
, GMAC0_DMA_TX_CTRL_ADDR
);
65 if (readl(GMAC0_DMA_TX_CTRL_ADDR
) & D64_XC_PD
) {
67 * We *can* disable it, therefore it is supported;
68 * restore control register
70 writel(control
, GMAC0_DMA_TX_CTRL_ADDR
);
72 /* Not supported, don't allow it to be enabled */
73 g_dmactrlflags
&= ~DMA_CTRL_PEN
;
77 return g_dmactrlflags
;
80 static inline void reg32_clear_bits(uint32_t reg
, uint32_t value
)
82 uint32_t v
= readl(reg
);
87 static inline void reg32_set_bits(uint32_t reg
, uint32_t value
)
89 uint32_t v
= readl(reg
);
95 static void dma_tx_dump(struct eth_dma
*dma
)
97 dma64dd_t
*descp
= NULL
;
101 printf("TX DMA Register:\n");
102 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
103 readl(GMAC0_DMA_TX_CTRL_ADDR
),
104 readl(GMAC0_DMA_TX_PTR_ADDR
),
105 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR
),
106 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR
),
107 readl(GMAC0_DMA_TX_STATUS0_ADDR
),
108 readl(GMAC0_DMA_TX_STATUS1_ADDR
));
110 printf("TX Descriptors:\n");
111 for (i
= 0; i
< TX_BUF_NUM
; i
++) {
112 descp
= (dma64dd_t
*)(dma
->tx_desc_aligned
) + i
;
113 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
114 descp
->ctrl1
, descp
->ctrl2
,
115 descp
->addrhigh
, descp
->addrlow
);
118 printf("TX Buffers:\n");
119 /* Initialize TX DMA descriptor table */
120 for (i
= 0; i
< TX_BUF_NUM
; i
++) {
121 bufp
= (uint8_t *)(dma
->tx_buf
+ i
* TX_BUF_SIZE_ALIGNED
);
122 printf("buf%d:0x%x; ", i
, (uint32_t)bufp
);
127 static void dma_rx_dump(struct eth_dma
*dma
)
129 dma64dd_t
*descp
= NULL
;
133 printf("RX DMA Register:\n");
134 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
135 readl(GMAC0_DMA_RX_CTRL_ADDR
),
136 readl(GMAC0_DMA_RX_PTR_ADDR
),
137 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR
),
138 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR
),
139 readl(GMAC0_DMA_RX_STATUS0_ADDR
),
140 readl(GMAC0_DMA_RX_STATUS1_ADDR
));
142 printf("RX Descriptors:\n");
143 for (i
= 0; i
< RX_BUF_NUM
; i
++) {
144 descp
= (dma64dd_t
*)(dma
->rx_desc_aligned
) + i
;
145 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
146 descp
->ctrl1
, descp
->ctrl2
,
147 descp
->addrhigh
, descp
->addrlow
);
150 printf("RX Buffers:\n");
151 for (i
= 0; i
< RX_BUF_NUM
; i
++) {
152 bufp
= dma
->rx_buf
+ i
* RX_BUF_SIZE_ALIGNED
;
153 printf("buf%d:0x%x; ", i
, (uint32_t)bufp
);
159 static int dma_tx_init(struct eth_dma
*dma
)
161 dma64dd_t
*descp
= NULL
;
166 debug("%s enter\n", __func__
);
168 /* clear descriptor memory */
169 memset((void *)(dma
->tx_desc_aligned
), 0,
170 TX_BUF_NUM
* DESCP_SIZE_ALIGNED
);
171 memset(dma
->tx_buf
, 0, TX_BUF_NUM
* TX_BUF_SIZE_ALIGNED
);
173 /* Initialize TX DMA descriptor table */
174 for (i
= 0; i
< TX_BUF_NUM
; i
++) {
175 descp
= (dma64dd_t
*)(dma
->tx_desc_aligned
) + i
;
176 bufp
= dma
->tx_buf
+ i
* TX_BUF_SIZE_ALIGNED
;
177 /* clear buffer memory */
178 memset((void *)bufp
, 0, TX_BUF_SIZE_ALIGNED
);
181 /* if last descr set endOfTable */
182 if (i
== (TX_BUF_NUM
-1))
183 ctrl
= D64_CTRL1_EOT
;
186 descp
->addrlow
= (uint32_t)bufp
;
190 /* flush descriptor and buffer */
191 descp
= dma
->tx_desc_aligned
;
193 flush_dcache_range((unsigned long)descp
,
194 (unsigned long)descp
+
195 DESCP_SIZE_ALIGNED
* TX_BUF_NUM
);
196 flush_dcache_range((unsigned long)bufp
,
197 (unsigned long)bufp
+
198 TX_BUF_SIZE_ALIGNED
* TX_BUF_NUM
);
200 /* initialize the DMA channel */
201 writel((uint32_t)(dma
->tx_desc_aligned
), GMAC0_DMA_TX_ADDR_LOW_ADDR
);
202 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR
);
204 /* now update the dma last descriptor */
205 writel(((uint32_t)(dma
->tx_desc_aligned
)) & D64_XP_LD_MASK
,
206 GMAC0_DMA_TX_PTR_ADDR
);
211 static int dma_rx_init(struct eth_dma
*dma
)
214 dma64dd_t
*descp
= NULL
;
219 debug("%s enter\n", __func__
);
221 /* clear descriptor memory */
222 memset((void *)(dma
->rx_desc_aligned
), 0,
223 RX_BUF_NUM
* DESCP_SIZE_ALIGNED
);
224 /* clear buffer memory */
225 memset(dma
->rx_buf
, 0, RX_BUF_NUM
* RX_BUF_SIZE_ALIGNED
);
227 /* Initialize RX DMA descriptor table */
228 for (i
= 0; i
< RX_BUF_NUM
; i
++) {
229 descp
= (dma64dd_t
*)(dma
->rx_desc_aligned
) + i
;
230 bufp
= dma
->rx_buf
+ i
* RX_BUF_SIZE_ALIGNED
;
232 /* if last descr set endOfTable */
233 if (i
== (RX_BUF_NUM
- 1))
234 ctrl
= D64_CTRL1_EOT
;
236 descp
->ctrl2
= RX_BUF_SIZE_ALIGNED
;
237 descp
->addrlow
= (uint32_t)bufp
;
240 last_desc
= ((uint32_t)(descp
) & D64_XP_LD_MASK
)
244 descp
= dma
->rx_desc_aligned
;
246 /* flush descriptor and buffer */
247 flush_dcache_range((unsigned long)descp
,
248 (unsigned long)descp
+
249 DESCP_SIZE_ALIGNED
* RX_BUF_NUM
);
250 flush_dcache_range((unsigned long)(bufp
),
251 (unsigned long)bufp
+
252 RX_BUF_SIZE_ALIGNED
* RX_BUF_NUM
);
254 /* initailize the DMA channel */
255 writel((uint32_t)descp
, GMAC0_DMA_RX_ADDR_LOW_ADDR
);
256 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR
);
258 /* now update the dma last descriptor */
259 writel(last_desc
, GMAC0_DMA_RX_PTR_ADDR
);
264 static int dma_init(struct eth_dma
*dma
)
266 debug(" %s enter\n", __func__
);
269 * Default flags: For backwards compatibility both
270 * Rx Overflow Continue and Parity are DISABLED.
272 dma_ctrlflags(DMA_CTRL_ROC
| DMA_CTRL_PEN
, 0);
274 debug("rx burst len 0x%x\n",
275 (readl(GMAC0_DMA_RX_CTRL_ADDR
) & D64_RC_BL_MASK
)
277 debug("tx burst len 0x%x\n",
278 (readl(GMAC0_DMA_TX_CTRL_ADDR
) & D64_XC_BL_MASK
)
284 /* From end of chip_init() */
285 /* enable the overflow continue feature and disable parity */
286 dma_ctrlflags(DMA_CTRL_ROC
| DMA_CTRL_PEN
/* mask */,
287 DMA_CTRL_ROC
/* value */);
292 static int dma_deinit(struct eth_dma
*dma
)
294 debug(" %s enter\n", __func__
);
296 gmac_disable_dma(dma
, MAC_DMA_RX
);
297 gmac_disable_dma(dma
, MAC_DMA_TX
);
301 free(dma
->tx_desc_aligned
);
302 dma
->tx_desc_aligned
= NULL
;
306 free(dma
->rx_desc_aligned
);
307 dma
->rx_desc_aligned
= NULL
;
312 int gmac_tx_packet(struct eth_dma
*dma
, void *packet
, int length
)
314 uint8_t *bufp
= dma
->tx_buf
+ dma
->cur_tx_index
* TX_BUF_SIZE_ALIGNED
;
316 /* kick off the dma */
318 int txout
= dma
->cur_tx_index
;
320 dma64dd_t
*descp
= NULL
;
322 uint32_t last_desc
= (((uint32_t)dma
->tx_desc_aligned
) +
323 sizeof(dma64dd_t
)) & D64_XP_LD_MASK
;
326 debug("%s enter\n", __func__
);
328 /* load the buffer */
329 memcpy(bufp
, packet
, len
);
331 /* Add 4 bytes for Ethernet FCS/CRC */
334 ctrl
= (buflen
& D64_CTRL2_BC_MASK
);
336 /* the transmit will only be one frame or set SOF, EOF */
337 /* also set int on completion */
338 flags
= D64_CTRL1_SOF
| D64_CTRL1_IOC
| D64_CTRL1_EOF
;
340 /* txout points to the descriptor to uset */
341 /* if last descriptor then set EOT */
342 if (txout
== (TX_BUF_NUM
- 1)) {
343 flags
|= D64_CTRL1_EOT
;
344 last_desc
= ((uint32_t)(dma
->tx_desc_aligned
)) & D64_XP_LD_MASK
;
347 /* write the descriptor */
348 descp
= ((dma64dd_t
*)(dma
->tx_desc_aligned
)) + txout
;
349 descp
->addrlow
= (uint32_t)bufp
;
351 descp
->ctrl1
= flags
;
354 /* flush descriptor and buffer */
355 flush_dcache_range((unsigned long)dma
->tx_desc_aligned
,
356 (unsigned long)dma
->tx_desc_aligned
+
357 DESCP_SIZE_ALIGNED
* TX_BUF_NUM
);
358 flush_dcache_range((unsigned long)bufp
,
359 (unsigned long)bufp
+ TX_BUF_SIZE_ALIGNED
);
361 /* now update the dma last descriptor */
362 writel(last_desc
, GMAC0_DMA_TX_PTR_ADDR
);
364 /* tx dma should be enabled so packet should go out */
367 dma
->cur_tx_index
= (txout
+ 1) & (TX_BUF_NUM
- 1);
372 bool gmac_check_tx_done(struct eth_dma
*dma
)
374 /* wait for tx to complete */
376 bool xfrdone
= false;
378 debug("%s enter\n", __func__
);
380 intstatus
= readl(GMAC0_INT_STATUS_ADDR
);
382 debug("int(0x%x)\n", intstatus
);
383 if (intstatus
& (I_XI0
| I_XI1
| I_XI2
| I_XI3
)) {
385 /* clear the int bits */
386 intstatus
&= ~(I_XI0
| I_XI1
| I_XI2
| I_XI3
);
387 writel(intstatus
, GMAC0_INT_STATUS_ADDR
);
389 debug("Tx int(0x%x)\n", intstatus
);
395 int gmac_check_rx_done(struct eth_dma
*dma
, uint8_t *buf
)
398 size_t rcvlen
= 0, buflen
= 0;
399 uint32_t stat0
= 0, stat1
= 0;
400 uint32_t control
, offset
;
401 uint8_t statbuf
[HWRXOFF
*2];
403 int index
, curr
, active
;
404 dma64dd_t
*descp
= NULL
;
409 * this api will check if a packet has been received.
410 * If so it will return the address of the buffer and current
411 * descriptor index will be incremented to the
412 * next descriptor. Once done with the frame the buffer should be
413 * added back onto the descriptor and the lastdscr should be updated
414 * to this descriptor.
416 index
= dma
->cur_rx_index
;
417 offset
= (uint32_t)(dma
->rx_desc_aligned
);
418 stat0
= readl(GMAC0_DMA_RX_STATUS0_ADDR
) & D64_RS0_CD_MASK
;
419 stat1
= readl(GMAC0_DMA_RX_STATUS1_ADDR
) & D64_RS0_CD_MASK
;
420 curr
= ((stat0
- offset
) & D64_RS0_CD_MASK
) / sizeof(dma64dd_t
);
421 active
= ((stat1
- offset
) & D64_RS0_CD_MASK
) / sizeof(dma64dd_t
);
423 /* check if any frame */
427 debug("received packet\n");
428 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index
, curr
, active
);
433 /* get the packet pointer that corresponds to the rx descriptor */
434 bufp
= dma
->rx_buf
+ index
* RX_BUF_SIZE_ALIGNED
;
436 descp
= (dma64dd_t
*)(dma
->rx_desc_aligned
) + index
;
437 /* flush descriptor and buffer */
438 flush_dcache_range((unsigned long)dma
->rx_desc_aligned
,
439 (unsigned long)dma
->rx_desc_aligned
+
440 DESCP_SIZE_ALIGNED
* RX_BUF_NUM
);
441 flush_dcache_range((unsigned long)bufp
,
442 (unsigned long)bufp
+ RX_BUF_SIZE_ALIGNED
);
444 buflen
= (descp
->ctrl2
& D64_CTRL2_BC_MASK
);
446 stat0
= readl(GMAC0_DMA_RX_STATUS0_ADDR
);
447 stat1
= readl(GMAC0_DMA_RX_STATUS1_ADDR
);
449 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
450 (uint32_t)bufp
, index
, buflen
, stat0
, stat1
);
452 dma
->cur_rx_index
= (index
+ 1) & (RX_BUF_NUM
- 1);
454 /* get buffer offset */
455 control
= readl(GMAC0_DMA_RX_CTRL_ADDR
);
456 offset
= (control
& D64_RC_RO_MASK
) >> D64_RC_RO_SHIFT
;
457 rcvlen
= *(uint16_t *)bufp
;
459 debug("Received %d bytes\n", rcvlen
);
460 /* copy status into temp buf then copy data from rx buffer */
461 memcpy(statbuf
, bufp
, offset
);
462 datap
= (void *)((uint32_t)bufp
+ offset
);
463 memcpy(buf
, datap
, rcvlen
);
465 /* update descriptor that is being added back on ring */
466 descp
->ctrl2
= RX_BUF_SIZE_ALIGNED
;
467 descp
->addrlow
= (uint32_t)bufp
;
469 /* flush descriptor */
470 flush_dcache_range((unsigned long)dma
->rx_desc_aligned
,
471 (unsigned long)dma
->rx_desc_aligned
+
472 DESCP_SIZE_ALIGNED
* RX_BUF_NUM
);
474 /* set the lastdscr for the rx ring */
475 writel(((uint32_t)descp
) & D64_XP_LD_MASK
, GMAC0_DMA_RX_PTR_ADDR
);
480 static int gmac_disable_dma(struct eth_dma
*dma
, int dir
)
484 debug("%s enter\n", __func__
);
486 if (dir
== MAC_DMA_TX
) {
487 /* address PR8249/PR7577 issue */
488 /* suspend tx DMA first */
489 writel(D64_XC_SE
, GMAC0_DMA_TX_CTRL_ADDR
);
490 SPINWAIT(((status
= (readl(GMAC0_DMA_TX_STATUS0_ADDR
) &
492 D64_XS0_XS_DISABLED
) &&
493 (status
!= D64_XS0_XS_IDLE
) &&
494 (status
!= D64_XS0_XS_STOPPED
), 10000);
497 * PR2414 WAR: DMA engines are not disabled until
500 writel(0, GMAC0_DMA_TX_CTRL_ADDR
);
501 SPINWAIT(((status
= (readl(GMAC0_DMA_TX_STATUS0_ADDR
) &
503 D64_XS0_XS_DISABLED
), 10000);
505 /* wait for the last transaction to complete */
508 status
= (status
== D64_XS0_XS_DISABLED
);
511 * PR2414 WAR: DMA engines are not disabled until
514 writel(0, GMAC0_DMA_RX_CTRL_ADDR
);
515 SPINWAIT(((status
= (readl(GMAC0_DMA_RX_STATUS0_ADDR
) &
517 D64_RS0_RS_DISABLED
), 10000);
519 status
= (status
== D64_RS0_RS_DISABLED
);
525 static int gmac_enable_dma(struct eth_dma
*dma
, int dir
)
529 debug("%s enter\n", __func__
);
531 if (dir
== MAC_DMA_TX
) {
532 dma
->cur_tx_index
= 0;
535 * These bits 20:18 (burstLen) of control register can be
536 * written but will take effect only if these bits are
537 * valid. So this will not affect previous versions
538 * of the DMA. They will continue to have those bits set to 0.
540 control
= readl(GMAC0_DMA_TX_CTRL_ADDR
);
542 control
|= D64_XC_XE
;
543 if ((g_dmactrlflags
& DMA_CTRL_PEN
) == 0)
544 control
|= D64_XC_PD
;
546 writel(control
, GMAC0_DMA_TX_CTRL_ADDR
);
548 /* initailize the DMA channel */
549 writel((uint32_t)(dma
->tx_desc_aligned
),
550 GMAC0_DMA_TX_ADDR_LOW_ADDR
);
551 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR
);
553 dma
->cur_rx_index
= 0;
555 control
= (readl(GMAC0_DMA_RX_CTRL_ADDR
) &
556 D64_RC_AE
) | D64_RC_RE
;
558 if ((g_dmactrlflags
& DMA_CTRL_PEN
) == 0)
559 control
|= D64_RC_PD
;
561 if (g_dmactrlflags
& DMA_CTRL_ROC
)
562 control
|= D64_RC_OC
;
565 * These bits 20:18 (burstLen) of control register can be
566 * written but will take effect only if these bits are
567 * valid. So this will not affect previous versions
568 * of the DMA. They will continue to have those bits set to 0.
570 control
&= ~D64_RC_BL_MASK
;
571 /* Keep default Rx burstlen */
572 control
|= readl(GMAC0_DMA_RX_CTRL_ADDR
) & D64_RC_BL_MASK
;
573 control
|= HWRXOFF
<< D64_RC_RO_SHIFT
;
575 writel(control
, GMAC0_DMA_RX_CTRL_ADDR
);
578 * the rx descriptor ring should have
579 * the addresses set properly;
580 * set the lastdscr for the rx ring
582 writel(((uint32_t)(dma
->rx_desc_aligned
) +
583 (RX_BUF_NUM
- 1) * RX_BUF_SIZE_ALIGNED
) &
584 D64_XP_LD_MASK
, GMAC0_DMA_RX_PTR_ADDR
);
590 bool gmac_mii_busywait(unsigned int timeout
)
594 while (timeout
> 10) {
595 tmp
= readl(GMAC_MII_CTRL_ADDR
);
596 if (tmp
& (1 << GMAC_MII_BUSY_SHIFT
)) {
603 return tmp
& (1 << GMAC_MII_BUSY_SHIFT
);
606 int gmac_miiphy_read(struct mii_dev
*bus
, int phyaddr
, int devad
, int reg
)
611 /* Busy wait timeout is 1ms */
612 if (gmac_mii_busywait(1000)) {
613 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__
);
618 tmp
= GMAC_MII_DATA_READ_CMD
;
619 tmp
|= (phyaddr
<< GMAC_MII_PHY_ADDR_SHIFT
) |
620 (reg
<< GMAC_MII_PHY_REG_SHIFT
);
621 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp
, phyaddr
, reg
);
622 writel(tmp
, GMAC_MII_DATA_ADDR
);
624 if (gmac_mii_busywait(1000)) {
625 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__
);
629 value
= readl(GMAC_MII_DATA_ADDR
) & 0xffff;
630 debug("MII read data 0x%x\n", value
);
634 int gmac_miiphy_write(struct mii_dev
*bus
, int phyaddr
, int devad
, int reg
,
639 /* Busy wait timeout is 1ms */
640 if (gmac_mii_busywait(1000)) {
641 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__
);
645 /* Write operation */
646 tmp
= GMAC_MII_DATA_WRITE_CMD
| (value
& 0xffff);
647 tmp
|= ((phyaddr
<< GMAC_MII_PHY_ADDR_SHIFT
) |
648 (reg
<< GMAC_MII_PHY_REG_SHIFT
));
649 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
650 tmp
, phyaddr
, reg
, value
);
651 writel(tmp
, GMAC_MII_DATA_ADDR
);
653 if (gmac_mii_busywait(1000)) {
654 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__
);
661 void gmac_init_reset(void)
663 debug("%s enter\n", __func__
);
665 /* set command config reg CC_SR */
666 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR
, CC_SR
);
667 udelay(GMAC_RESET_DELAY
);
670 void gmac_clear_reset(void)
672 debug("%s enter\n", __func__
);
674 /* clear command config reg CC_SR */
675 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR
, CC_SR
);
676 udelay(GMAC_RESET_DELAY
);
679 static void gmac_enable_local(bool en
)
683 debug("%s enter\n", __func__
);
685 /* read command config reg */
686 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
688 /* put mac in reset */
693 /* first deassert rx_ena and tx_ena while in reset */
694 cmdcfg
&= ~(CC_RE
| CC_TE
);
695 /* write command config reg */
696 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
698 /* bring mac out of reset */
701 /* if not enable exit now */
705 /* enable the mac transmit and receive paths now */
708 cmdcfg
|= (CC_RE
| CC_TE
);
710 /* assert rx_ena and tx_ena when out of reset to enable the mac */
711 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
716 int gmac_enable(void)
718 gmac_enable_local(1);
720 /* clear interrupts */
721 writel(I_INTMASK
, GMAC0_INT_STATUS_ADDR
);
725 int gmac_disable(void)
727 gmac_enable_local(0);
731 int gmac_set_speed(int speed
, int duplex
)
737 hd_ena
= duplex
? 0 : CC_HD
;
740 } else if (speed
== 100) {
742 } else if (speed
== 10) {
745 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__
, speed
);
749 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
750 cmdcfg
&= ~(CC_ES_MASK
| CC_HD
);
751 cmdcfg
|= ((speed_cfg
<< CC_ES_SHIFT
) | hd_ena
);
753 printf("Change GMAC speed to %dMB\n", speed
);
754 debug("GMAC speed cfg 0x%x\n", cmdcfg
);
755 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
760 int gmac_set_mac_addr(unsigned char *mac
)
762 /* set our local address */
763 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
764 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
765 writel(htonl(*(uint32_t *)mac
), UNIMAC0_MAC_MSB_ADDR
);
766 writew(htons(*(uint32_t *)&mac
[4]), UNIMAC0_MAC_LSB_ADDR
);
771 int gmac_mac_init(struct eth_device
*dev
)
773 struct eth_info
*eth
= (struct eth_info
*)(dev
->priv
);
774 struct eth_dma
*dma
= &(eth
->dma
);
780 debug("%s enter\n", __func__
);
782 /* Always use GMAC0 */
783 printf("Using GMAC%d\n", 0);
785 /* Reset AMAC0 core */
786 writel(0, AMAC0_IDM_RESET_ADDR
);
787 tmp
= readl(AMAC0_IO_CTRL_DIRECT_ADDR
);
789 tmp
&= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT
);
790 tmp
|= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT
);
792 tmp
&= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT
);
793 writel(tmp
, AMAC0_IO_CTRL_DIRECT_ADDR
);
797 * As AMAC is just reset, NO need?
798 * set eth_data into loopback mode to ensure no rx traffic
799 * gmac_loopback(eth_data, TRUE);
800 * ET_TRACE(("%s gmac loopback\n", __func__));
804 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
805 cmdcfg
&= ~(CC_TE
| CC_RE
| CC_RPI
| CC_TAI
| CC_HD
| CC_ML
|
806 CC_CFE
| CC_RL
| CC_RED
| CC_PE
| CC_TPI
|
808 cmdcfg
|= (CC_PROM
| CC_NLC
| CC_CFE
);
809 /* put mac in reset */
811 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
814 /* enable clear MIB on read */
815 reg32_set_bits(GMAC0_DEV_CTRL_ADDR
, DC_MROR
);
816 /* PHY: set smi_master to drive mdc_clk */
817 reg32_set_bits(GMAC0_PHY_CTRL_ADDR
, PC_MTE
);
819 /* clear persistent sw intstatus */
820 writel(0, GMAC0_INT_STATUS_ADDR
);
822 if (dma_init(dma
) < 0) {
823 pr_err("%s: GMAC dma_init failed\n", __func__
);
828 printf("%s: Chip ID: 0x%x\n", __func__
, chipid
);
830 /* set switch bypass mode */
831 tmp
= readl(SWITCH_GLOBAL_CONFIG_ADDR
);
832 tmp
|= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT
);
835 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
837 writel(tmp
, SWITCH_GLOBAL_CONFIG_ADDR
);
839 tmp
= readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR
);
840 tmp
&= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT
);
841 writel(tmp
, CRMU_CHIP_IO_PAD_CONTROL_ADDR
);
843 /* Set MDIO to internal GPHY */
844 tmp
= readl(GMAC_MII_CTRL_ADDR
);
845 /* Select internal MDC/MDIO bus*/
846 tmp
&= ~(1 << GMAC_MII_CTRL_BYP_SHIFT
);
847 /* select MDC/MDIO connecting to on-chip internal PHYs */
848 tmp
&= ~(1 << GMAC_MII_CTRL_EXT_SHIFT
);
850 * give bit[6:0](MDCDIV) with required divisor to set
851 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
855 writel(tmp
, GMAC_MII_CTRL_ADDR
);
857 if (gmac_mii_busywait(1000)) {
858 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__
);
862 /* Configure GMAC0 */
863 /* enable one rx interrupt per received frame */
864 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT
, GMAC0_INTR_RECV_LAZY_ADDR
);
866 /* read command config reg */
867 cmdcfg
= readl(UNIMAC0_CMD_CFG_ADDR
);
868 /* enable 802.3x tx flow control (honor received PAUSE frames) */
870 /* enable promiscuous mode */
872 /* Disable loopback mode */
875 cmdcfg
&= ~(CC_ES_MASK
| CC_HD
);
876 /* Set to 1Gbps and full duplex by default */
877 cmdcfg
|= (2 << CC_ES_SHIFT
);
879 /* put mac in reset */
882 writel(cmdcfg
, UNIMAC0_CMD_CFG_ADDR
);
883 /* bring mac out of reset */
886 /* set max frame lengths; account for possible vlan tag */
887 writel(PKTSIZE
+ 32, UNIMAC0_FRM_LENGTH_ADDR
);
896 int gmac_add(struct eth_device
*dev
)
898 struct eth_info
*eth
= (struct eth_info
*)(dev
->priv
);
899 struct eth_dma
*dma
= &(eth
->dma
);
903 * Desc has to be 16-byte aligned. But for dcache flush it must be
904 * aligned to ARCH_DMA_MINALIGN.
906 tmp
= memalign(ARCH_DMA_MINALIGN
, DESCP_SIZE_ALIGNED
* TX_BUF_NUM
);
908 printf("%s: Failed to allocate TX desc Buffer\n", __func__
);
912 dma
->tx_desc_aligned
= (void *)tmp
;
913 debug("TX Descriptor Buffer: %p; length: 0x%x\n",
914 dma
->tx_desc_aligned
, DESCP_SIZE_ALIGNED
* TX_BUF_NUM
);
916 tmp
= memalign(ARCH_DMA_MINALIGN
, TX_BUF_SIZE_ALIGNED
* TX_BUF_NUM
);
918 printf("%s: Failed to allocate TX Data Buffer\n", __func__
);
919 free(dma
->tx_desc_aligned
);
922 dma
->tx_buf
= (uint8_t *)tmp
;
923 debug("TX Data Buffer: %p; length: 0x%x\n",
924 dma
->tx_buf
, TX_BUF_SIZE_ALIGNED
* TX_BUF_NUM
);
926 /* Desc has to be 16-byte aligned */
927 tmp
= memalign(ARCH_DMA_MINALIGN
, DESCP_SIZE_ALIGNED
* RX_BUF_NUM
);
929 printf("%s: Failed to allocate RX Descriptor\n", __func__
);
930 free(dma
->tx_desc_aligned
);
934 dma
->rx_desc_aligned
= (void *)tmp
;
935 debug("RX Descriptor Buffer: %p, length: 0x%x\n",
936 dma
->rx_desc_aligned
, DESCP_SIZE_ALIGNED
* RX_BUF_NUM
);
938 tmp
= memalign(ARCH_DMA_MINALIGN
, RX_BUF_SIZE_ALIGNED
* RX_BUF_NUM
);
940 printf("%s: Failed to allocate RX Data Buffer\n", __func__
);
941 free(dma
->tx_desc_aligned
);
943 free(dma
->rx_desc_aligned
);
946 dma
->rx_buf
= (uint8_t *)tmp
;
947 debug("RX Data Buffer: %p; length: 0x%x\n",
948 dma
->rx_buf
, RX_BUF_SIZE_ALIGNED
* RX_BUF_NUM
);
952 eth
->phy_interface
= PHY_INTERFACE_MODE_GMII
;
954 dma
->tx_packet
= gmac_tx_packet
;
955 dma
->check_tx_done
= gmac_check_tx_done
;
957 dma
->check_rx_done
= gmac_check_rx_done
;
959 dma
->enable_dma
= gmac_enable_dma
;
960 dma
->disable_dma
= gmac_disable_dma
;
962 eth
->miiphy_read
= gmac_miiphy_read
;
963 eth
->miiphy_write
= gmac_miiphy_write
;
965 eth
->mac_init
= gmac_mac_init
;
966 eth
->disable_mac
= gmac_disable
;
967 eth
->enable_mac
= gmac_enable
;
968 eth
->set_mac_addr
= gmac_set_mac_addr
;
969 eth
->set_mac_speed
= gmac_set_speed
;