1 /*-----------------------------------------------------------------------------+
3 * This source code has been made available to you by IBM on an AS-IS
4 * basis. Anyone receiving this source is licensed under IBM
5 * copyrights to use it in any way he or she deems fit, including
6 * copying it, modifying it, compiling it, and redistributing it either
7 * with or without modifications. No license under IBM patents or
8 * patent applications is to be implied by the copyright license.
10 * Any user of this software should understand that IBM cannot provide
11 * technical support for this software and will not be responsible for
12 * any consequences resulting from the use of this software.
14 * Any person who transfers this source code or any derivative work
15 * must include the IBM copyright notice, this paragraph, and the
16 * preceding two paragraphs in the transferred software.
18 * COPYRIGHT I B M CORPORATION 1995
19 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
20 *-----------------------------------------------------------------------------*/
21 /*-----------------------------------------------------------------------------+
23 * File Name: enetemac.c
25 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
31 * Date Description of Change BY
32 * --------- --------------------- ---
33 * 05-May-99 Created MKW
34 * 27-Jun-99 Clean up JWB
35 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
36 * 29-Jul-99 Added Full duplex support MKW
37 * 06-Aug-99 Changed names for Mal CR reg MKW
38 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
39 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
40 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
41 * to avoid chaining maximum sized packets. Push starting
42 * RX descriptor address up to the next cache line boundary.
43 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
44 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
45 * EMAC_RXM register. JWB
46 * 12-Mar-01 anne-sophie.harnois@nextream.fr
47 * - Variables are compatible with those already defined in
49 * - Receive buffer descriptor ring is used to send buffers
51 * - Info print about send/received/handled packet number if
52 * INFO_405_ENET is set
53 * 17-Apr-01 stefan.roese@esd-electronics.com
54 * - MAL reset in "eth_halt" included
55 * - Enet speed and duplex output now in one line
56 * 08-May-01 stefan.roese@esd-electronics.com
57 * - MAL error handling added (eth_init called again)
58 * 13-Nov-01 stefan.roese@esd-electronics.com
59 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
60 * 04-Jan-02 stefan.roese@esd-electronics.com
61 * - Wait for PHY auto negotiation to complete added
62 * 06-Feb-02 stefan.roese@esd-electronics.com
63 * - Bug fixed in waiting for auto negotiation to complete
64 * 26-Feb-02 stefan.roese@esd-electronics.com
65 * - rx and tx buffer descriptors now allocated (no fixed address
67 * 17-Jun-02 stefan.roese@esd-electronics.com
68 * - MAL error debug printf 'M' removed (rx de interrupt may
69 * occur upon many incoming packets with only 4 rx buffers).
70 *-----------------------------------------------------------------------------*
71 * 17-Nov-03 travis.sawyer@sandburst.com
72 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
73 * in the 440GX. This port should work with the 440GP
75 *-----------------------------------------------------------------------------*/
78 #if defined(CONFIG_440) && defined(CONFIG_NET_MULTI)
82 #include <asm/processor.h>
85 #include <440gx_enet.h>
92 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
93 #define PHY_AUTONEGOTIATE_TIMEOUT 4000 /* 4000 ms autonegotiate timeout */
96 /* Ethernet Transmit and Receive Buffers */
98 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
99 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
101 #define ENET_MAX_MTU PKTSIZE
102 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
105 /* define the number of channels implemented */
106 #define EMAC_RXCHL EMAC_NUM_DEV
107 #define EMAC_TXCHL EMAC_NUM_DEV
109 /*-----------------------------------------------------------------------------+
110 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
111 * Interrupt Controller).
112 *-----------------------------------------------------------------------------*/
113 #define MAL_UIC_ERR ( UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
114 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
115 #define EMAC_UIC_DEF UIC_ENET
119 /*-----------------------------------------------------------------------------+
120 * Global variables. TX and RX descriptors and buffers.
121 *-----------------------------------------------------------------------------*/
123 static uint32_t mal_ier
;
125 /*-----------------------------------------------------------------------------+
126 * Prototypes and externals.
127 *-----------------------------------------------------------------------------*/
128 static void enet_rcv (struct eth_device
*dev
, unsigned long malisr
);
130 int enetInt (struct eth_device
*dev
);
131 static void mal_err (struct eth_device
*dev
, unsigned long isr
,
132 unsigned long uic
, unsigned long maldef
,
133 unsigned long mal_errr
);
134 static void emac_err (struct eth_device
*dev
, unsigned long isr
);
136 /*-----------------------------------------------------------------------------+
138 | Disable MAL channel, and EMACn
141 +-----------------------------------------------------------------------------*/
142 static void ppc_440x_eth_halt (struct eth_device
*dev
)
144 EMAC_440GX_HW_PST hw_p
= dev
->priv
;
145 uint32_t failsafe
= 10000;
147 out32 (EMAC_IER
+ hw_p
->hw_addr
, 0x00000000); /* disable emac interrupts */
149 /* 1st reset MAL channel */
150 /* Note: writing a 0 to a channel has no effect */
151 mtdcr (maltxcarr
, (MAL_CR_MMSR
>> hw_p
->devnum
));
152 mtdcr (malrxcarr
, (MAL_CR_MMSR
>> hw_p
->devnum
));
155 while (mfdcr (maltxcasr
) & (MAL_CR_MMSR
>> hw_p
->devnum
)) {
156 udelay (1000); /* Delay 1 MS so as not to hammer the register */
164 out32 (EMAC_M0
+ hw_p
->hw_addr
, EMAC_M0_SRST
);
166 hw_p
->print_speed
= 1; /* print speed message again next time */
171 extern int phy_setup_aneg (unsigned char addr
);
172 extern int miiphy_reset (unsigned char addr
);
174 static int ppc_440x_eth_init (struct eth_device
*dev
, bd_t
* bis
)
180 unsigned long duplex
;
181 unsigned long failsafe
;
183 unsigned short devnum
;
184 unsigned short reg_short
;
187 EMAC_440GX_HW_PST hw_p
= dev
->priv
;
189 /* before doing anything, figure out if we have a MAC address */
191 if (memcmp (dev
->enetaddr
, "\0\0\0\0\0\0", 6) == 0)
194 /* Need to get the OPB frequency so we can access the PHY */
195 get_sys_info (&sysinfo
);
199 mtmsr (msr
& ~(MSR_EE
)); /* disable interrupts */
201 devnum
= hw_p
->devnum
;
206 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
207 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
208 * is possible that new packets (without relationship with
209 * current transfer) have got the time to arrived before
210 * netloop calls eth_halt
212 printf ("About preceeding transfer (eth%d):\n"
213 "- Sent packet number %d\n"
214 "- Received packet number %d\n"
215 "- Handled packet number %d\n",
218 hw_p
->stats
.pkts_rx
, hw_p
->stats
.pkts_handled
);
220 hw_p
->stats
.pkts_tx
= 0;
221 hw_p
->stats
.pkts_rx
= 0;
222 hw_p
->stats
.pkts_handled
= 0;
225 /* MAL Channel RESET */
226 /* 1st reset MAL channel */
227 /* Note: writing a 0 to a channel has no effect */
228 mtdcr (maltxcarr
, (MAL_TXRX_CASR
>> hw_p
->devnum
));
229 mtdcr (malrxcarr
, (MAL_TXRX_CASR
>> hw_p
->devnum
));
232 /* TBS: should have udelay and failsafe here */
235 while (mfdcr (maltxcasr
) & (MAL_CR_MMSR
>> hw_p
->devnum
)) {
236 udelay (1000); /* Delay 1 MS so as not to hammer the register */
243 hw_p
->tx_err_index
= 0; /* Transmit Error Index for tx_err_log */
244 hw_p
->rx_err_index
= 0; /* Receive Error Index for rx_err_log */
246 hw_p
->rx_slot
= 0; /* MAL Receive Slot */
247 hw_p
->rx_i_index
= 0; /* Receive Interrupt Queue Index */
248 hw_p
->rx_u_index
= 0; /* Receive User Queue Index */
250 hw_p
->tx_slot
= 0; /* MAL Transmit Slot */
251 hw_p
->tx_i_index
= 0; /* Transmit Interrupt Queue Index */
252 hw_p
->tx_u_index
= 0; /* Transmit User Queue Index */
255 /* NOTE: 440GX spec states that mode is mutually exclusive */
256 /* NOTE: Therefore, disable all other EMACS, since we handle */
257 /* NOTE: only one emac at a time */
261 out32 (ZMII_FER
, ZMII_FER_MDI
<< ZMII_FER_V (devnum
));
262 out32 (ZMII_SSR
, 0x11110000);
263 /* reset emac so we have access to the phy */
264 __asm__
volatile ("eieio");
266 out32 (EMAC_M0
+ hw_p
->hw_addr
, EMAC_M0_SRST
);
267 __asm__
volatile ("eieio");
269 if ((devnum
== 2) || (devnum
== 3))
270 out32 (RGMII_FER
, ((RGMII_FER_RGMII
<< RGMII_FER_V (2)) |
271 (RGMII_FER_RGMII
<< RGMII_FER_V (3))));
272 __asm__
volatile ("eieio");
275 while ((in32 (EMAC_M0
+ hw_p
->hw_addr
) & (EMAC_M0_SRST
)) && failsafe
) {
280 /* Whack the M1 register */
282 mode_reg
&= ~0x00000038;
283 if (sysinfo
.freqOPB
<= 50000000);
284 else if (sysinfo
.freqOPB
<= 66666667)
285 mode_reg
|= EMAC_M1_OBCI_66
;
286 else if (sysinfo
.freqOPB
<= 83333333)
287 mode_reg
|= EMAC_M1_OBCI_83
;
288 else if (sysinfo
.freqOPB
<= 100000000)
289 mode_reg
|= EMAC_M1_OBCI_100
;
291 mode_reg
|= EMAC_M1_OBCI_GT100
;
293 out32 (EMAC_M1
+ hw_p
->hw_addr
, mode_reg
);
296 /* wait for PHY to complete auto negotiation */
298 #ifndef CONFIG_CS8952_PHY
301 reg
= CONFIG_PHY_ADDR
;
304 reg
= CONFIG_PHY1_ADDR
;
306 #if defined (CONFIG_440_GX)
308 reg
= CONFIG_PHY2_ADDR
;
311 reg
= CONFIG_PHY3_ADDR
;
315 reg
= CONFIG_PHY_ADDR
;
322 /* Start/Restart autonegotiation */
323 /* miiphy_write(reg, PHY_BMCR, 0x9340); */
324 phy_setup_aneg (reg
);
327 miiphy_read (reg
, PHY_BMSR
, ®_short
);
330 * Wait if PHY is able of autonegotiation and autonegotiation is not complete
332 if ((reg_short
& PHY_BMSR_AUTN_ABLE
)
333 && !(reg_short
& PHY_BMSR_AUTN_COMP
)) {
334 puts ("Waiting for PHY auto negotiation to complete");
336 while (!(reg_short
& PHY_BMSR_AUTN_COMP
)) {
340 if (i
> PHY_AUTONEGOTIATE_TIMEOUT
) {
341 puts (" TIMEOUT !\n");
345 if ((i
++ % 1000) == 0) {
348 udelay (1000); /* 1 ms */
349 miiphy_read (reg
, PHY_BMSR
, ®_short
);
353 udelay (500000); /* another 500 ms (results in faster booting) */
356 speed
= miiphy_speed (reg
);
357 duplex
= miiphy_duplex (reg
);
359 if (hw_p
->print_speed
) {
360 hw_p
->print_speed
= 0;
361 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
362 (int) speed
, (duplex
== HALF
) ? "HALF" : "FULL");
365 /* Set ZMII/RGMII speed according to the phy link speed */
366 reg
= in32 (ZMII_SSR
);
368 out32 (ZMII_SSR
, reg
| (ZMII_SSR_SP
<< ZMII_SSR_V (devnum
)));
371 reg
& (~(ZMII_SSR_SP
<< ZMII_SSR_V (devnum
))));
373 if ((devnum
== 2) || (devnum
== 3)) {
375 reg
= (RGMII_SSR_SP_1000MBPS
<< RGMII_SSR_V (devnum
));
376 else if (speed
== 100)
377 reg
= (RGMII_SSR_SP_100MBPS
<< RGMII_SSR_V (devnum
));
379 reg
= (RGMII_SSR_SP_10MBPS
<< RGMII_SSR_V (devnum
));
381 out32 (RGMII_SSR
, reg
);
384 /* set the Mal configuration reg */
385 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
386 if (get_pvr () == PVR_440GP_RB
)
388 MAL_CR_OPBBL
| MAL_CR_LEA
| MAL_CR_PLBLT_DEFAULT
);
391 MAL_CR_PLBB
| MAL_CR_OPBBL
| MAL_CR_LEA
|
392 MAL_CR_PLBLT_DEFAULT
| MAL_CR_EOPIE
| 0x00330000);
394 /* Free "old" buffers */
395 if (hw_p
->alloc_tx_buf
)
396 free (hw_p
->alloc_tx_buf
);
397 if (hw_p
->alloc_rx_buf
)
398 free (hw_p
->alloc_rx_buf
);
401 * Malloc MAL buffer desciptors, make sure they are
402 * aligned on cache line boundary size
403 * (401/403/IOP480 = 16, 405 = 32)
404 * and doesn't cross cache block boundaries.
407 (mal_desc_t
*) malloc ((sizeof (mal_desc_t
) * NUM_TX_BUFF
) +
408 ((2 * CFG_CACHELINE_SIZE
) - 2));
409 if (((int) hw_p
->alloc_tx_buf
& CACHELINE_MASK
) != 0) {
411 (mal_desc_t
*) ((int) hw_p
->alloc_tx_buf
+
414 alloc_tx_buf
& CACHELINE_MASK
));
416 hw_p
->tx
= hw_p
->alloc_tx_buf
;
420 (mal_desc_t
*) malloc ((sizeof (mal_desc_t
) * NUM_RX_BUFF
) +
421 ((2 * CFG_CACHELINE_SIZE
) - 2));
422 if (((int) hw_p
->alloc_rx_buf
& CACHELINE_MASK
) != 0) {
424 (mal_desc_t
*) ((int) hw_p
->alloc_rx_buf
+
427 alloc_rx_buf
& CACHELINE_MASK
));
429 hw_p
->rx
= hw_p
->alloc_rx_buf
;
432 for (i
= 0; i
< NUM_TX_BUFF
; i
++) {
433 hw_p
->tx
[i
].ctrl
= 0;
434 hw_p
->tx
[i
].data_len
= 0;
435 if (hw_p
->first_init
== 0)
437 (char *) malloc (ENET_MAX_MTU_ALIGNED
);
438 hw_p
->tx
[i
].data_ptr
= hw_p
->txbuf_ptr
;
439 if ((NUM_TX_BUFF
- 1) == i
)
440 hw_p
->tx
[i
].ctrl
|= MAL_TX_CTRL_WRAP
;
441 hw_p
->tx_run
[i
] = -1;
443 printf ("TX_BUFF %d @ 0x%08lx\n", i
,
444 (ulong
) hw_p
->tx
[i
].data_ptr
);
448 for (i
= 0; i
< NUM_RX_BUFF
; i
++) {
449 hw_p
->rx
[i
].ctrl
= 0;
450 hw_p
->rx
[i
].data_len
= 0;
451 /* rx[i].data_ptr = (char *) &rx_buff[i]; */
452 hw_p
->rx
[i
].data_ptr
= (char *) NetRxPackets
[i
];
453 if ((NUM_RX_BUFF
- 1) == i
)
454 hw_p
->rx
[i
].ctrl
|= MAL_RX_CTRL_WRAP
;
455 hw_p
->rx
[i
].ctrl
|= MAL_RX_CTRL_EMPTY
| MAL_RX_CTRL_INTR
;
456 hw_p
->rx_ready
[i
] = -1;
458 printf ("RX_BUFF %d @ 0x%08lx\n", i
, (ulong
) rx
[i
].data_ptr
);
464 reg
|= dev
->enetaddr
[0]; /* set high address */
466 reg
|= dev
->enetaddr
[1];
468 out32 (EMAC_IAH
+ hw_p
->hw_addr
, reg
);
471 reg
|= dev
->enetaddr
[2]; /* set low address */
473 reg
|= dev
->enetaddr
[3];
475 reg
|= dev
->enetaddr
[4];
477 reg
|= dev
->enetaddr
[5];
479 out32 (EMAC_IAL
+ hw_p
->hw_addr
, reg
);
483 /* setup MAL tx & rx channel pointers */
484 mtdcr (maltxbattr
, 0x0);
485 mtdcr (maltxctp1r
, hw_p
->tx
);
486 mtdcr (malrxbattr
, 0x0);
487 mtdcr (malrxctp1r
, hw_p
->rx
);
488 /* set RX buffer size */
489 mtdcr (malrcbs1
, ENET_MAX_MTU_ALIGNED
/ 16);
491 #if defined (CONFIG_440_GX)
493 /* setup MAL tx & rx channel pointers */
494 mtdcr (maltxbattr
, 0x0);
495 mtdcr (maltxctp2r
, hw_p
->tx
);
496 mtdcr (malrxbattr
, 0x0);
497 mtdcr (malrxctp2r
, hw_p
->rx
);
498 /* set RX buffer size */
499 mtdcr (malrcbs2
, ENET_MAX_MTU_ALIGNED
/ 16);
502 /* setup MAL tx & rx channel pointers */
503 mtdcr (maltxbattr
, 0x0);
504 mtdcr (maltxctp3r
, hw_p
->tx
);
505 mtdcr (malrxbattr
, 0x0);
506 mtdcr (malrxctp3r
, hw_p
->rx
);
507 /* set RX buffer size */
508 mtdcr (malrcbs3
, ENET_MAX_MTU_ALIGNED
/ 16);
510 #endif /*CONFIG_440_GX */
513 /* setup MAL tx & rx channel pointers */
514 mtdcr (maltxbattr
, 0x0);
515 mtdcr (maltxctp0r
, hw_p
->tx
);
516 mtdcr (malrxbattr
, 0x0);
517 mtdcr (malrxctp0r
, hw_p
->rx
);
518 /* set RX buffer size */
519 mtdcr (malrcbs0
, ENET_MAX_MTU_ALIGNED
/ 16);
523 /* Enable MAL transmit and receive channels */
524 mtdcr (maltxcasr
, (MAL_TXRX_CASR
>> hw_p
->devnum
));
525 mtdcr (malrxcasr
, (MAL_TXRX_CASR
>> hw_p
->devnum
));
527 /* set transmit enable & receive enable */
528 out32 (EMAC_M0
+ hw_p
->hw_addr
, EMAC_M0_TXE
| EMAC_M0_RXE
);
530 /* set receive fifo to 4k and tx fifo to 2k */
531 mode_reg
= in32 (EMAC_M1
+ hw_p
->hw_addr
);
532 mode_reg
|= EMAC_M1_RFS_4K
| EMAC_M1_TX_FIFO_2K
;
536 if (speed
== _100BASET
)
537 mode_reg
= mode_reg
| EMAC_M1_MF_100MBPS
| EMAC_M1_IST
;
539 mode_reg
= mode_reg
& ~0x00C00000; /* 10 MBPS */
541 mode_reg
= mode_reg
| 0x80000000 | EMAC_M1_IST
;
543 out32 (EMAC_M1
+ hw_p
->hw_addr
, mode_reg
);
545 /* Enable broadcast and indvidual address */
546 /* TBS: enabling runts as some misbehaved nics will send runts */
547 out32 (EMAC_RXM
+ hw_p
->hw_addr
, EMAC_RMR_BAE
| EMAC_RMR_IAE
);
549 /* we probably need to set the tx mode1 reg? maybe at tx time */
551 /* set transmit request threshold register */
552 out32 (EMAC_TRTR
+ hw_p
->hw_addr
, 0x18000000); /* 256 byte threshold */
554 /* set receive low/high water mark register */
555 /* 440GP has a 64 byte burst length */
556 out32 (EMAC_RX_HI_LO_WMARK
+ hw_p
->hw_addr
, 0x80009000);
557 out32 (EMAC_TXM1
+ hw_p
->hw_addr
, 0xf8640000);
559 /* Set fifo limit entry in tx mode 0 */
560 out32 (EMAC_TXM0
+ hw_p
->hw_addr
, 0x00000003);
562 out32 (EMAC_I_FRAME_GAP_REG
+ hw_p
->hw_addr
, 0x00000008);
565 hw_p
->emac_ier
= EMAC_ISR_PTLE
| EMAC_ISR_BFCS
|
566 EMAC_ISR_PTLE
| EMAC_ISR_ORE
| EMAC_ISR_IRE
;
567 if (speed
== _100BASET
)
568 hw_p
->emac_ier
= hw_p
->emac_ier
| EMAC_ISR_SYE
;
570 out32 (EMAC_ISR
+ hw_p
->hw_addr
, 0xffffffff); /* clear pending interrupts */
571 out32 (EMAC_IER
+ hw_p
->hw_addr
, hw_p
->emac_ier
);
573 if (hw_p
->first_init
== 0) {
575 * Connect interrupt service routines
577 irq_install_handler (VECNUM_EWU0
+ (hw_p
->devnum
* 2),
578 (interrupt_handler_t
*) enetInt
, dev
);
579 irq_install_handler (VECNUM_ETH0
+ (hw_p
->devnum
* 2),
580 (interrupt_handler_t
*) enetInt
, dev
);
582 #if 0 /* done by irq_install_handler */
583 /* set up interrupt handler */
584 /* setup interrupt controller to take interrupts from the MAL &
586 mtdcr (uicsr
, 0xffffffff); /* clear pending interrupts */
587 mtdcr (uicer
, mfdcr (uicer
) | MAL_UIC_DEF
| EMAC_UIC_DEF
);
590 mtmsr (msr
); /* enable interrupts again */
593 hw_p
->first_init
= 1;
599 static int ppc_440x_eth_send (struct eth_device
*dev
, volatile void *ptr
,
602 struct enet_frame
*ef_ptr
;
603 ulong time_start
, time_now
;
604 unsigned long temp_txm0
;
605 EMAC_440GX_HW_PST hw_p
= dev
->priv
;
607 ef_ptr
= (struct enet_frame
*) ptr
;
609 /*-----------------------------------------------------------------------+
610 * Copy in our address into the frame.
611 *-----------------------------------------------------------------------*/
612 (void) memcpy (ef_ptr
->source_addr
, dev
->enetaddr
, ENET_ADDR_LENGTH
);
614 /*-----------------------------------------------------------------------+
615 * If frame is too long or too short, modify length.
616 *-----------------------------------------------------------------------*/
617 /* TBS: where does the fragment go???? */
618 if (len
> ENET_MAX_MTU
)
621 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
622 memcpy ((void *) hw_p
->txbuf_ptr
, (const void *) ptr
, len
);
624 /*-----------------------------------------------------------------------+
625 * set TX Buffer busy, and send it
626 *-----------------------------------------------------------------------*/
627 hw_p
->tx
[hw_p
->tx_slot
].ctrl
= (MAL_TX_CTRL_LAST
|
628 EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
) &
629 ~(EMAC_TX_CTRL_ISA
| EMAC_TX_CTRL_RSA
);
630 if ((NUM_TX_BUFF
- 1) == hw_p
->tx_slot
)
631 hw_p
->tx
[hw_p
->tx_slot
].ctrl
|= MAL_TX_CTRL_WRAP
;
633 hw_p
->tx
[hw_p
->tx_slot
].data_len
= (short) len
;
634 hw_p
->tx
[hw_p
->tx_slot
].ctrl
|= MAL_TX_CTRL_READY
;
636 __asm__
volatile ("eieio");
638 out32 (EMAC_TXM0
+ hw_p
->hw_addr
,
639 in32 (EMAC_TXM0
+ hw_p
->hw_addr
) | EMAC_TXM0_GNP0
);
641 hw_p
->stats
.pkts_tx
++;
644 /*-----------------------------------------------------------------------+
645 * poll unitl the packet is sent and then make sure it is OK
646 *-----------------------------------------------------------------------*/
647 time_start
= get_timer (0);
649 temp_txm0
= in32 (EMAC_TXM0
+ hw_p
->hw_addr
);
650 /* loop until either TINT turns on or 3 seconds elapse */
651 if ((temp_txm0
& EMAC_TXM0_GNP0
) != 0) {
652 /* transmit is done, so now check for errors
653 * If there is an error, an interrupt should
654 * happen when we return
656 time_now
= get_timer (0);
657 if ((time_now
- time_start
) > 3000) {
667 int enetInt (struct eth_device
*dev
)
670 int rc
= -1; /* default to not us */
671 unsigned long mal_isr
;
672 unsigned long emac_isr
= 0;
673 unsigned long mal_rx_eob
;
674 unsigned long my_uic0msr
, my_uic1msr
;
676 #if defined(CONFIG_440_GX)
677 unsigned long my_uic2msr
;
679 EMAC_440GX_HW_PST hw_p
;
682 * Because the mal is generic, we need to get the current
685 dev
= eth_get_dev ();
690 /* enter loop that stays in interrupt code until nothing to service */
694 my_uic0msr
= mfdcr (uic0msr
);
695 my_uic1msr
= mfdcr (uic1msr
);
696 #if defined(CONFIG_440_GX)
697 my_uic2msr
= mfdcr (uic2msr
);
699 if (!(my_uic0msr
& (UIC_MRE
| UIC_MTE
))
701 (UIC_ETH0
| UIC_ETH1
| UIC_MS
| UIC_MTDE
|
706 #if defined (CONFIG_440_GX)
707 if (!(my_uic0msr
& (UIC_MRE
| UIC_MTE
))
708 && !(my_uic2msr
& (UIC_ETH2
| UIC_ETH3
))) {
713 /* get and clear controller status interrupts */
714 /* look at Mal and EMAC interrupts */
715 if ((my_uic0msr
& (UIC_MRE
| UIC_MTE
))
716 || (my_uic1msr
& (UIC_MS
| UIC_MTDE
| UIC_MRDE
))) {
717 /* we have a MAL interrupt */
718 mal_isr
= mfdcr (malesr
);
719 /* look for mal error */
720 if (my_uic1msr
& (UIC_MS
| UIC_MTDE
| UIC_MRDE
)) {
721 mal_err (dev
, mal_isr
, my_uic0msr
,
722 MAL_UIC_DEF
, MAL_UIC_ERR
);
728 /* port by port dispatch of emac interrupts */
729 if (hw_p
->devnum
== 0) {
730 if (UIC_ETH0
& my_uic1msr
) { /* look for EMAC errors */
731 emac_isr
= in32 (EMAC_ISR
+ hw_p
->hw_addr
);
732 if ((hw_p
->emac_ier
& emac_isr
) != 0) {
733 emac_err (dev
, emac_isr
);
738 if ((hw_p
->emac_ier
& emac_isr
)
739 || (my_uic1msr
& (UIC_MS
| UIC_MTDE
| UIC_MRDE
))) {
740 mtdcr (uic0sr
, UIC_MRE
| UIC_MTE
); /* Clear */
741 mtdcr (uic1sr
, UIC_ETH0
| UIC_MS
| UIC_MTDE
| UIC_MRDE
); /* Clear */
742 return (rc
); /* we had errors so get out */
746 if (hw_p
->devnum
== 1) {
747 if (UIC_ETH1
& my_uic1msr
) { /* look for EMAC errors */
748 emac_isr
= in32 (EMAC_ISR
+ hw_p
->hw_addr
);
749 if ((hw_p
->emac_ier
& emac_isr
) != 0) {
750 emac_err (dev
, emac_isr
);
755 if ((hw_p
->emac_ier
& emac_isr
)
756 || (my_uic1msr
& (UIC_MS
| UIC_MTDE
| UIC_MRDE
))) {
757 mtdcr (uic0sr
, UIC_MRE
| UIC_MTE
); /* Clear */
758 mtdcr (uic1sr
, UIC_ETH1
| UIC_MS
| UIC_MTDE
| UIC_MRDE
); /* Clear */
759 return (rc
); /* we had errors so get out */
762 #if defined (CONFIG_440_GX)
763 if (hw_p
->devnum
== 2) {
764 if (UIC_ETH2
& my_uic2msr
) { /* look for EMAC errors */
765 emac_isr
= in32 (EMAC_ISR
+ hw_p
->hw_addr
);
766 if ((hw_p
->emac_ier
& emac_isr
) != 0) {
767 emac_err (dev
, emac_isr
);
772 if ((hw_p
->emac_ier
& emac_isr
)
773 || (my_uic1msr
& (UIC_MS
| UIC_MTDE
| UIC_MRDE
))) {
774 mtdcr (uic0sr
, UIC_MRE
| UIC_MTE
); /* Clear */
775 mtdcr (uic1sr
, UIC_MS
| UIC_MTDE
| UIC_MRDE
); /* Clear */
776 mtdcr (uic2sr
, UIC_ETH2
);
777 return (rc
); /* we had errors so get out */
781 if (hw_p
->devnum
== 3) {
782 if (UIC_ETH3
& my_uic2msr
) { /* look for EMAC errors */
783 emac_isr
= in32 (EMAC_ISR
+ hw_p
->hw_addr
);
784 if ((hw_p
->emac_ier
& emac_isr
) != 0) {
785 emac_err (dev
, emac_isr
);
790 if ((hw_p
->emac_ier
& emac_isr
)
791 || (my_uic1msr
& (UIC_MS
| UIC_MTDE
| UIC_MRDE
))) {
792 mtdcr (uic0sr
, UIC_MRE
| UIC_MTE
); /* Clear */
793 mtdcr (uic1sr
, UIC_MS
| UIC_MTDE
| UIC_MRDE
); /* Clear */
794 mtdcr (uic2sr
, UIC_ETH3
);
795 return (rc
); /* we had errors so get out */
798 #endif /* CONFIG_440_GX */
799 /* handle MAX TX EOB interrupt from a tx */
800 if (my_uic0msr
& UIC_MTE
) {
801 mal_rx_eob
= mfdcr (maltxeobisr
);
802 mtdcr (maltxeobisr
, mal_rx_eob
);
803 mtdcr (uic0sr
, UIC_MTE
);
805 /* handle MAL RX EOB interupt from a receive */
806 /* check for EOB on valid channels */
807 if (my_uic0msr
& UIC_MRE
) {
808 mal_rx_eob
= mfdcr (malrxeobisr
);
809 if ((mal_rx_eob
& (0x80000000 >> hw_p
->devnum
)) != 0) { /* call emac routine for channel x */
811 mtdcr(malrxeobisr, mal_rx_eob); */
812 enet_rcv (dev
, emac_isr
);
813 /* indicate that we serviced an interrupt */
818 mtdcr (uic0sr
, UIC_MRE
); /* Clear */
819 mtdcr (uic1sr
, UIC_MS
| UIC_MTDE
| UIC_MRDE
); /* Clear */
820 switch (hw_p
->devnum
) {
822 mtdcr (uic1sr
, UIC_ETH0
);
825 mtdcr (uic1sr
, UIC_ETH1
);
827 #if defined (CONFIG_440_GX)
829 mtdcr (uic2sr
, UIC_ETH2
);
832 mtdcr (uic2sr
, UIC_ETH3
);
834 #endif /* CONFIG_440_GX */
843 /*-----------------------------------------------------------------------------+
845 *-----------------------------------------------------------------------------*/
846 static void mal_err (struct eth_device
*dev
, unsigned long isr
,
847 unsigned long uic
, unsigned long maldef
,
848 unsigned long mal_errr
)
850 EMAC_440GX_HW_PST hw_p
= dev
->priv
;
852 mtdcr (malesr
, isr
); /* clear interrupt */
854 /* clear DE interrupt */
855 mtdcr (maltxdeir
, 0xC0000000);
856 mtdcr (malrxdeir
, 0x80000000);
859 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr
, uic
, maldef
, mal_errr
);
862 eth_init (hw_p
->bis
); /* start again... */
865 /*-----------------------------------------------------------------------------+
867 *-----------------------------------------------------------------------------*/
868 static void emac_err (struct eth_device
*dev
, unsigned long isr
)
870 EMAC_440GX_HW_PST hw_p
= dev
->priv
;
872 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p
->devnum
, isr
);
873 out32 (EMAC_ISR
+ hw_p
->hw_addr
, isr
);
876 /*-----------------------------------------------------------------------------+
877 * enet_rcv() handles the ethernet receive data
878 *-----------------------------------------------------------------------------*/
879 static void enet_rcv (struct eth_device
*dev
, unsigned long malisr
)
881 struct enet_frame
*ef_ptr
;
882 unsigned long data_len
;
883 unsigned long rx_eob_isr
;
884 EMAC_440GX_HW_PST hw_p
= dev
->priv
;
890 rx_eob_isr
= mfdcr (malrxeobisr
);
891 if ((0x80000000 >> hw_p
->devnum
) & rx_eob_isr
) {
893 mtdcr (malrxeobisr
, rx_eob_isr
);
896 while (1) { /* do all */
899 if ((MAL_RX_CTRL_EMPTY
& hw_p
->rx
[i
].ctrl
)
900 || (loop_count
>= NUM_RX_BUFF
))
904 if (NUM_RX_BUFF
== hw_p
->rx_slot
)
907 data_len
= (unsigned long) hw_p
->rx
[i
].data_len
; /* Get len */
909 if (data_len
> ENET_MAX_MTU
) /* Check len */
912 if (EMAC_RX_ERRORS
& hw_p
->rx
[i
].ctrl
) { /* Check Errors */
914 hw_p
->stats
.rx_err_log
[hw_p
->
917 hw_p
->rx_err_index
++;
918 if (hw_p
->rx_err_index
==
923 } /* data_len < max mtu */
925 if (!data_len
) { /* no data */
926 hw_p
->rx
[i
].ctrl
|= MAL_RX_CTRL_EMPTY
; /* Free Recv Buffer */
928 hw_p
->stats
.data_len_err
++; /* Error at Rx */
933 /* Check if user has already eaten buffer */
934 /* if not => ERROR */
935 else if (hw_p
->rx_ready
[hw_p
->rx_i_index
] != -1) {
936 if (hw_p
->is_receiving
)
937 printf ("ERROR : Receive buffers are full!\n");
940 hw_p
->stats
.rx_frames
++;
941 hw_p
->stats
.rx
+= data_len
;
942 ef_ptr
= (struct enet_frame
*) hw_p
->rx
[i
].
945 hw_p
->stats
.pkts_rx
++;
950 hw_p
->rx_ready
[hw_p
->rx_i_index
] = i
;
952 if (NUM_RX_BUFF
== hw_p
->rx_i_index
)
953 hw_p
->rx_i_index
= 0;
955 /* printf("X"); /|* test-only *|/ */
958 * free receive buffer only when
959 * buffer has been handled (eth_rx)
960 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
964 } /* if EMACK_RXCHL */
968 static int ppc_440x_eth_rx (struct eth_device
*dev
)
973 EMAC_440GX_HW_PST hw_p
= dev
->priv
;
975 hw_p
->is_receiving
= 1; /* tell driver */
979 * use ring buffer and
980 * get index from rx buffer desciptor queue
982 user_index
= hw_p
->rx_ready
[hw_p
->rx_u_index
];
983 if (user_index
== -1) {
985 break; /* nothing received - leave for() loop */
989 mtmsr (msr
& ~(MSR_EE
));
991 length
= hw_p
->rx
[user_index
].data_len
;
993 /* Pass the packet up to the protocol layers. */
994 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
995 /* NetReceive(NetRxPackets[i], length); */
996 NetReceive (NetRxPackets
[user_index
], length
- 4);
997 /* Free Recv Buffer */
998 hw_p
->rx
[user_index
].ctrl
|= MAL_RX_CTRL_EMPTY
;
999 /* Free rx buffer descriptor queue */
1000 hw_p
->rx_ready
[hw_p
->rx_u_index
] = -1;
1002 if (NUM_RX_BUFF
== hw_p
->rx_u_index
)
1003 hw_p
->rx_u_index
= 0;
1005 #ifdef INFO_440_ENET
1006 hw_p
->stats
.pkts_handled
++;
1009 mtmsr (msr
); /* Enable IRQ's */
1012 hw_p
->is_receiving
= 0; /* tell driver */
1017 int ppc_440x_eth_initialize (bd_t
* bis
)
1019 static int virgin
= 0;
1021 struct eth_device
*dev
;
1024 EMAC_440GX_HW_PST hw
= NULL
;
1026 mfsdr (sdr_pfc1
, pfc1
);
1027 pfc1
&= ~(0x01e00000);
1029 mtsdr (sdr_pfc1
, pfc1
);
1031 for (eth_num
= 0; eth_num
< EMAC_NUM_DEV
; eth_num
++) {
1033 /* See if we can actually bring up the interface, otherwise, skip it */
1036 if (memcmp (bis
->bi_enetaddr
, "\0\0\0\0\0\0", 6) == 0)
1040 if (memcmp (bis
->bi_enet1addr
, "\0\0\0\0\0\0", 6) ==
1045 if (memcmp (bis
->bi_enet2addr
, "\0\0\0\0\0\0", 6) ==
1050 if (memcmp (bis
->bi_enet3addr
, "\0\0\0\0\0\0", 6) ==
1055 if (memcmp (bis
->bi_enetaddr
, "\0\0\0\0\0\0", 6) == 0)
1060 /* Allocate device structure */
1061 dev
= (struct eth_device
*) malloc (sizeof (*dev
));
1063 printf (__FUNCTION__
1064 ": Cannot allocate eth_device %d\n", eth_num
);
1068 /* Allocate our private use data */
1069 hw
= (EMAC_440GX_HW_PST
) malloc (sizeof (*hw
));
1071 printf (__FUNCTION__
1072 ": Cannot allocate private hw data for eth_device %d",
1081 memcpy (dev
->enetaddr
, bis
->bi_enetaddr
, 6);
1084 hw
->hw_addr
= 0x100;
1085 memcpy (dev
->enetaddr
, bis
->bi_enet1addr
, 6);
1088 hw
->hw_addr
= 0x400;
1089 memcpy (dev
->enetaddr
, bis
->bi_enet2addr
, 6);
1092 hw
->hw_addr
= 0x600;
1093 memcpy (dev
->enetaddr
, bis
->bi_enet3addr
, 6);
1097 memcpy (dev
->enetaddr
, bis
->bi_enetaddr
, 6);
1101 hw
->devnum
= eth_num
;
1103 sprintf (dev
->name
, "ppc_440x_eth%d", eth_num
);
1104 dev
->priv
= (void *) hw
;
1105 dev
->init
= ppc_440x_eth_init
;
1106 dev
->halt
= ppc_440x_eth_halt
;
1107 dev
->send
= ppc_440x_eth_send
;
1108 dev
->recv
= ppc_440x_eth_rx
;
1111 /* set the MAL IER ??? names may change with new spec ??? */
1113 MAL_IER_DE
| MAL_IER_NE
| MAL_IER_TE
|
1114 MAL_IER_OPBE
| MAL_IER_PLBE
;
1115 mtdcr (malesr
, 0xffffffff); /* clear pending interrupts */
1116 mtdcr (maltxdeir
, 0xffffffff); /* clear pending interrupts */
1117 mtdcr (malrxdeir
, 0xffffffff); /* clear pending interrupts */
1118 mtdcr (malier
, mal_ier
);
1120 /* install MAL interrupt handler */
1121 irq_install_handler (VECNUM_MS
,
1122 (interrupt_handler_t
*) enetInt
,
1124 irq_install_handler (VECNUM_MTE
,
1125 (interrupt_handler_t
*) enetInt
,
1127 irq_install_handler (VECNUM_MRE
,
1128 (interrupt_handler_t
*) enetInt
,
1130 irq_install_handler (VECNUM_TXDE
,
1131 (interrupt_handler_t
*) enetInt
,
1133 irq_install_handler (VECNUM_RXDE
,
1134 (interrupt_handler_t
*) enetInt
,
1141 } /* end for each supported device */
1144 #endif /* CONFIG_440 && CONFIG_NET_MULTI */