]> git.ipfire.org Git - people/ms/u-boot.git/blame - cpu/ppc4xx/440gx_enet.c
* Patch by Rune Torgersen, 27 Feb 2004:
[people/ms/u-boot.git] / cpu / ppc4xx / 440gx_enet.c
CommitLineData
ba56f625
WD
1/*-----------------------------------------------------------------------------+
2 *
3 * This source code has been made available to you by IBM on an AS-IS
4 * basis. Anyone receiving this source is licensed under IBM
5 * copyrights to use it in any way he or she deems fit, including
6 * copying it, modifying it, compiling it, and redistributing it either
7 * with or without modifications. No license under IBM patents or
8 * patent applications is to be implied by the copyright license.
9 *
10 * Any user of this software should understand that IBM cannot provide
11 * technical support for this software and will not be responsible for
12 * any consequences resulting from the use of this software.
13 *
14 * Any person who transfers this source code or any derivative work
15 * must include the IBM copyright notice, this paragraph, and the
16 * preceding two paragraphs in the transferred software.
17 *
18 * COPYRIGHT I B M CORPORATION 1995
19 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
20 *-----------------------------------------------------------------------------*/
21/*-----------------------------------------------------------------------------+
22 *
23 * File Name: enetemac.c
24 *
25 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
26 *
27 * Author: Mark Wisner
28 *
29 * Change Activity-
30 *
31 * Date Description of Change BY
32 * --------- --------------------- ---
33 * 05-May-99 Created MKW
34 * 27-Jun-99 Clean up JWB
35 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
36 * 29-Jul-99 Added Full duplex support MKW
37 * 06-Aug-99 Changed names for Mal CR reg MKW
38 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
39 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
40 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
41 * to avoid chaining maximum sized packets. Push starting
42 * RX descriptor address up to the next cache line boundary.
43 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
44 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
45 * EMAC_RXM register. JWB
46 * 12-Mar-01 anne-sophie.harnois@nextream.fr
47 * - Variables are compatible with those already defined in
48 * include/net.h
49 * - Receive buffer descriptor ring is used to send buffers
50 * to the user
51 * - Info print about send/received/handled packet number if
52 * INFO_405_ENET is set
53 * 17-Apr-01 stefan.roese@esd-electronics.com
54 * - MAL reset in "eth_halt" included
55 * - Enet speed and duplex output now in one line
56 * 08-May-01 stefan.roese@esd-electronics.com
57 * - MAL error handling added (eth_init called again)
58 * 13-Nov-01 stefan.roese@esd-electronics.com
59 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
60 * 04-Jan-02 stefan.roese@esd-electronics.com
61 * - Wait for PHY auto negotiation to complete added
62 * 06-Feb-02 stefan.roese@esd-electronics.com
63 * - Bug fixed in waiting for auto negotiation to complete
64 * 26-Feb-02 stefan.roese@esd-electronics.com
65 * - rx and tx buffer descriptors now allocated (no fixed address
66 * used anymore)
67 * 17-Jun-02 stefan.roese@esd-electronics.com
68 * - MAL error debug printf 'M' removed (rx de interrupt may
69 * occur upon many incoming packets with only 4 rx buffers).
70 *-----------------------------------------------------------------------------*
71 * 17-Nov-03 travis.sawyer@sandburst.com
72 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
73 * in the 440GX. This port should work with the 440GP
74 * (2 EMACs) also
75 *-----------------------------------------------------------------------------*/
76
77#include <config.h>
78#if defined(CONFIG_440) && defined(CONFIG_NET_MULTI)
79
80#include <common.h>
81#include <net.h>
82#include <asm/processor.h>
83#include <ppc440.h>
84#include <commproc.h>
85#include <440gx_enet.h>
86#include <405_mal.h>
87#include <miiphy.h>
88#include <malloc.h>
89#include "vecnum.h"
90
91
92#define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
93#define PHY_AUTONEGOTIATE_TIMEOUT 4000 /* 4000 ms autonegotiate timeout */
94
95
96/* Ethernet Transmit and Receive Buffers */
97/* AS.HARNOIS
98 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
99 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
100 */
101#define ENET_MAX_MTU PKTSIZE
102#define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
103
104
105/* define the number of channels implemented */
106#define EMAC_RXCHL EMAC_NUM_DEV
107#define EMAC_TXCHL EMAC_NUM_DEV
108
109/*-----------------------------------------------------------------------------+
110 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
111 * Interrupt Controller).
112 *-----------------------------------------------------------------------------*/
113#define MAL_UIC_ERR ( UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
114#define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
115#define EMAC_UIC_DEF UIC_ENET
116
117#undef INFO_440_ENET
118
3c74e32a
WD
119#define BI_PHYMODE_NONE 0
120#define BI_PHYMODE_ZMII 1
121#define BI_PHYMODE_RGMII 2
122
ba56f625
WD
123/*-----------------------------------------------------------------------------+
124 * Global variables. TX and RX descriptors and buffers.
125 *-----------------------------------------------------------------------------*/
126/* IER globals */
127static uint32_t mal_ier;
128
129/*-----------------------------------------------------------------------------+
130 * Prototypes and externals.
131 *-----------------------------------------------------------------------------*/
132static void enet_rcv (struct eth_device *dev, unsigned long malisr);
133
134int enetInt (struct eth_device *dev);
135static void mal_err (struct eth_device *dev, unsigned long isr,
136 unsigned long uic, unsigned long maldef,
137 unsigned long mal_errr);
138static void emac_err (struct eth_device *dev, unsigned long isr);
139
140/*-----------------------------------------------------------------------------+
141| ppc_440x_eth_halt
142| Disable MAL channel, and EMACn
143|
144|
145+-----------------------------------------------------------------------------*/
146static void ppc_440x_eth_halt (struct eth_device *dev)
147{
148 EMAC_440GX_HW_PST hw_p = dev->priv;
149 uint32_t failsafe = 10000;
150
151 out32 (EMAC_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
152
153 /* 1st reset MAL channel */
154 /* Note: writing a 0 to a channel has no effect */
155 mtdcr (maltxcarr, (MAL_CR_MMSR >> hw_p->devnum));
156 mtdcr (malrxcarr, (MAL_CR_MMSR >> hw_p->devnum));
157
158 /* wait for reset */
159 while (mfdcr (maltxcasr) & (MAL_CR_MMSR >> hw_p->devnum)) {
160 udelay (1000); /* Delay 1 MS so as not to hammer the register */
161 failsafe--;
162 if (failsafe == 0)
163 break;
164
165 }
166
167 /* EMAC RESET */
168 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
169
170 hw_p->print_speed = 1; /* print speed message again next time */
171
172 return;
173}
174
175extern int phy_setup_aneg (unsigned char addr);
176extern int miiphy_reset (unsigned char addr);
177
178static int ppc_440x_eth_init (struct eth_device *dev, bd_t * bis)
179{
180 int i;
181 unsigned long reg;
182 unsigned long msr;
183 unsigned long speed;
184 unsigned long duplex;
185 unsigned long failsafe;
186 unsigned mode_reg;
187 unsigned short devnum;
188 unsigned short reg_short;
189 sys_info_t sysinfo;
190
191 EMAC_440GX_HW_PST hw_p = dev->priv;
192
193 /* before doing anything, figure out if we have a MAC address */
194 /* if not, bail */
195 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0)
196 return -1;
197
198 /* Need to get the OPB frequency so we can access the PHY */
199 get_sys_info (&sysinfo);
200
201
202 msr = mfmsr ();
203 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
204
205 devnum = hw_p->devnum;
206
207#ifdef INFO_440_ENET
208 /* AS.HARNOIS
209 * We should have :
210 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
211 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
212 * is possible that new packets (without relationship with
213 * current transfer) have got the time to arrived before
214 * netloop calls eth_halt
215 */
216 printf ("About preceeding transfer (eth%d):\n"
217 "- Sent packet number %d\n"
218 "- Received packet number %d\n"
219 "- Handled packet number %d\n",
220 hw_p->devnum,
221 hw_p->stats.pkts_tx,
222 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
223
224 hw_p->stats.pkts_tx = 0;
225 hw_p->stats.pkts_rx = 0;
226 hw_p->stats.pkts_handled = 0;
227#endif
228
229 /* MAL Channel RESET */
230 /* 1st reset MAL channel */
231 /* Note: writing a 0 to a channel has no effect */
232 mtdcr (maltxcarr, (MAL_TXRX_CASR >> hw_p->devnum));
233 mtdcr (malrxcarr, (MAL_TXRX_CASR >> hw_p->devnum));
234
235 /* wait for reset */
236 /* TBS: should have udelay and failsafe here */
237 failsafe = 10000;
238 /* wait for reset */
239 while (mfdcr (maltxcasr) & (MAL_CR_MMSR >> hw_p->devnum)) {
240 udelay (1000); /* Delay 1 MS so as not to hammer the register */
241 failsafe--;
242 if (failsafe == 0)
243 break;
244
245 }
246
247 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
248 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
249
250 hw_p->rx_slot = 0; /* MAL Receive Slot */
251 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
252 hw_p->rx_u_index = 0; /* Receive User Queue Index */
253
254 hw_p->tx_slot = 0; /* MAL Transmit Slot */
255 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
256 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
257
258 /* set RMII mode */
259 /* NOTE: 440GX spec states that mode is mutually exclusive */
260 /* NOTE: Therefore, disable all other EMACS, since we handle */
261 /* NOTE: only one emac at a time */
262 reg = 0;
263 out32 (ZMII_FER, 0);
264 udelay (100);
265 out32 (ZMII_FER, ZMII_FER_MDI << ZMII_FER_V (devnum));
266 out32 (ZMII_SSR, 0x11110000);
267 /* reset emac so we have access to the phy */
268 __asm__ volatile ("eieio");
269
270 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
271 __asm__ volatile ("eieio");
272
273 if ((devnum == 2) || (devnum == 3))
274 out32 (RGMII_FER, ((RGMII_FER_RGMII << RGMII_FER_V (2)) |
275 (RGMII_FER_RGMII << RGMII_FER_V (3))));
276 __asm__ volatile ("eieio");
277
278 failsafe = 1000;
279 while ((in32 (EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) {
280 udelay (1000);
281 failsafe--;
282 }
283
284 /* Whack the M1 register */
285 mode_reg = 0x0;
286 mode_reg &= ~0x00000038;
287 if (sysinfo.freqOPB <= 50000000);
288 else if (sysinfo.freqOPB <= 66666667)
289 mode_reg |= EMAC_M1_OBCI_66;
290 else if (sysinfo.freqOPB <= 83333333)
291 mode_reg |= EMAC_M1_OBCI_83;
292 else if (sysinfo.freqOPB <= 100000000)
293 mode_reg |= EMAC_M1_OBCI_100;
294 else
295 mode_reg |= EMAC_M1_OBCI_GT100;
296
297 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
298
299
300 /* wait for PHY to complete auto negotiation */
301 reg_short = 0;
302#ifndef CONFIG_CS8952_PHY
303 switch (devnum) {
304 case 0:
305 reg = CONFIG_PHY_ADDR;
306 break;
307 case 1:
308 reg = CONFIG_PHY1_ADDR;
309 break;
310#if defined (CONFIG_440_GX)
311 case 2:
312 reg = CONFIG_PHY2_ADDR;
313 break;
314 case 3:
315 reg = CONFIG_PHY3_ADDR;
316 break;
317#endif
318 default:
319 reg = CONFIG_PHY_ADDR;
320 break;
321 }
322
3c74e32a
WD
323 bis->bi_phynum[devnum] = reg;
324
ba56f625
WD
325 /* Reset the phy */
326 miiphy_reset (reg);
327
328 /* Start/Restart autonegotiation */
ba56f625
WD
329 phy_setup_aneg (reg);
330 udelay (1000);
331
332 miiphy_read (reg, PHY_BMSR, &reg_short);
333
334 /*
335 * Wait if PHY is able of autonegotiation and autonegotiation is not complete
336 */
337 if ((reg_short & PHY_BMSR_AUTN_ABLE)
338 && !(reg_short & PHY_BMSR_AUTN_COMP)) {
339 puts ("Waiting for PHY auto negotiation to complete");
340 i = 0;
341 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
342 /*
343 * Timeout reached ?
344 */
345 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
346 puts (" TIMEOUT !\n");
347 break;
348 }
349
350 if ((i++ % 1000) == 0) {
351 putc ('.');
352 }
353 udelay (1000); /* 1 ms */
354 miiphy_read (reg, PHY_BMSR, &reg_short);
355
356 }
357 puts (" done\n");
358 udelay (500000); /* another 500 ms (results in faster booting) */
359 }
360#endif
361 speed = miiphy_speed (reg);
362 duplex = miiphy_duplex (reg);
363
364 if (hw_p->print_speed) {
365 hw_p->print_speed = 0;
366 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
367 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
368 }
369
370 /* Set ZMII/RGMII speed according to the phy link speed */
371 reg = in32 (ZMII_SSR);
372 if (speed == 100)
373 out32 (ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));
374 else
375 out32 (ZMII_SSR,
376 reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));
377
378 if ((devnum == 2) || (devnum == 3)) {
379 if (speed == 1000)
380 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
381 else if (speed == 100)
382 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
383 else
384 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
385
386 out32 (RGMII_SSR, reg);
387 }
388
389 /* set the Mal configuration reg */
390 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
391 if (get_pvr () == PVR_440GP_RB)
392 mtdcr (malmcr,
393 MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
394 else
395 mtdcr (malmcr,
396 MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
397 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
398
399 /* Free "old" buffers */
400 if (hw_p->alloc_tx_buf)
401 free (hw_p->alloc_tx_buf);
402 if (hw_p->alloc_rx_buf)
403 free (hw_p->alloc_rx_buf);
404
405 /*
406 * Malloc MAL buffer desciptors, make sure they are
407 * aligned on cache line boundary size
408 * (401/403/IOP480 = 16, 405 = 32)
409 * and doesn't cross cache block boundaries.
410 */
411 hw_p->alloc_tx_buf =
412 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_TX_BUFF) +
413 ((2 * CFG_CACHELINE_SIZE) - 2));
414 if (((int) hw_p->alloc_tx_buf & CACHELINE_MASK) != 0) {
415 hw_p->tx =
416 (mal_desc_t *) ((int) hw_p->alloc_tx_buf +
417 CFG_CACHELINE_SIZE -
418 ((int) hw_p->
419 alloc_tx_buf & CACHELINE_MASK));
420 } else {
421 hw_p->tx = hw_p->alloc_tx_buf;
422 }
423
424 hw_p->alloc_rx_buf =
425 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_RX_BUFF) +
426 ((2 * CFG_CACHELINE_SIZE) - 2));
427 if (((int) hw_p->alloc_rx_buf & CACHELINE_MASK) != 0) {
428 hw_p->rx =
429 (mal_desc_t *) ((int) hw_p->alloc_rx_buf +
430 CFG_CACHELINE_SIZE -
431 ((int) hw_p->
432 alloc_rx_buf & CACHELINE_MASK));
433 } else {
434 hw_p->rx = hw_p->alloc_rx_buf;
435 }
436
437 for (i = 0; i < NUM_TX_BUFF; i++) {
438 hw_p->tx[i].ctrl = 0;
439 hw_p->tx[i].data_len = 0;
440 if (hw_p->first_init == 0)
441 hw_p->txbuf_ptr =
442 (char *) malloc (ENET_MAX_MTU_ALIGNED);
443 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
444 if ((NUM_TX_BUFF - 1) == i)
445 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
446 hw_p->tx_run[i] = -1;
447#if 0
448 printf ("TX_BUFF %d @ 0x%08lx\n", i,
449 (ulong) hw_p->tx[i].data_ptr);
450#endif
451 }
452
453 for (i = 0; i < NUM_RX_BUFF; i++) {
454 hw_p->rx[i].ctrl = 0;
455 hw_p->rx[i].data_len = 0;
456 /* rx[i].data_ptr = (char *) &rx_buff[i]; */
457 hw_p->rx[i].data_ptr = (char *) NetRxPackets[i];
458 if ((NUM_RX_BUFF - 1) == i)
459 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
460 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
461 hw_p->rx_ready[i] = -1;
462#if 0
463 printf ("RX_BUFF %d @ 0x%08lx\n", i, (ulong) rx[i].data_ptr);
464#endif
465 }
466
467 reg = 0x00000000;
468
469 reg |= dev->enetaddr[0]; /* set high address */
470 reg = reg << 8;
471 reg |= dev->enetaddr[1];
472
473 out32 (EMAC_IAH + hw_p->hw_addr, reg);
474
475 reg = 0x00000000;
476 reg |= dev->enetaddr[2]; /* set low address */
477 reg = reg << 8;
478 reg |= dev->enetaddr[3];
479 reg = reg << 8;
480 reg |= dev->enetaddr[4];
481 reg = reg << 8;
482 reg |= dev->enetaddr[5];
483
484 out32 (EMAC_IAL + hw_p->hw_addr, reg);
485
486 switch (devnum) {
487 case 1:
488 /* setup MAL tx & rx channel pointers */
489 mtdcr (maltxbattr, 0x0);
490 mtdcr (maltxctp1r, hw_p->tx);
491 mtdcr (malrxbattr, 0x0);
492 mtdcr (malrxctp1r, hw_p->rx);
493 /* set RX buffer size */
494 mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16);
495 break;
496#if defined (CONFIG_440_GX)
497 case 2:
498 /* setup MAL tx & rx channel pointers */
499 mtdcr (maltxbattr, 0x0);
500 mtdcr (maltxctp2r, hw_p->tx);
501 mtdcr (malrxbattr, 0x0);
502 mtdcr (malrxctp2r, hw_p->rx);
503 /* set RX buffer size */
504 mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16);
505 break;
506 case 3:
507 /* setup MAL tx & rx channel pointers */
508 mtdcr (maltxbattr, 0x0);
509 mtdcr (maltxctp3r, hw_p->tx);
510 mtdcr (malrxbattr, 0x0);
511 mtdcr (malrxctp3r, hw_p->rx);
512 /* set RX buffer size */
513 mtdcr (malrcbs3, ENET_MAX_MTU_ALIGNED / 16);
514 break;
515#endif /*CONFIG_440_GX */
516 case 0:
517 default:
518 /* setup MAL tx & rx channel pointers */
519 mtdcr (maltxbattr, 0x0);
520 mtdcr (maltxctp0r, hw_p->tx);
521 mtdcr (malrxbattr, 0x0);
522 mtdcr (malrxctp0r, hw_p->rx);
523 /* set RX buffer size */
524 mtdcr (malrcbs0, ENET_MAX_MTU_ALIGNED / 16);
525 break;
526 }
527
528 /* Enable MAL transmit and receive channels */
529 mtdcr (maltxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
530 mtdcr (malrxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
531
532 /* set transmit enable & receive enable */
533 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);
534
535 /* set receive fifo to 4k and tx fifo to 2k */
536 mode_reg = in32 (EMAC_M1 + hw_p->hw_addr);
537 mode_reg |= EMAC_M1_RFS_4K | EMAC_M1_TX_FIFO_2K;
538
539 /* set speed */
540 /* TBS: do 1GbE */
541 if (speed == _100BASET)
542 mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;
543 else
544 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
545 if (duplex == FULL)
546 mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;
547
548 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
549
550 /* Enable broadcast and indvidual address */
551 /* TBS: enabling runts as some misbehaved nics will send runts */
552 out32 (EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
553
554 /* we probably need to set the tx mode1 reg? maybe at tx time */
555
556 /* set transmit request threshold register */
557 out32 (EMAC_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
558
559 /* set receive low/high water mark register */
560 /* 440GP has a 64 byte burst length */
561 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
562 out32 (EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);
563
564 /* Set fifo limit entry in tx mode 0 */
565 out32 (EMAC_TXM0 + hw_p->hw_addr, 0x00000003);
566 /* Frame gap set */
567 out32 (EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
568
569 /* Set EMAC IER */
570 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS |
571 EMAC_ISR_PTLE | EMAC_ISR_ORE | EMAC_ISR_IRE;
572 if (speed == _100BASET)
573 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
574
575 out32 (EMAC_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
576 out32 (EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);
577
578 if (hw_p->first_init == 0) {
579 /*
580 * Connect interrupt service routines
581 */
582 irq_install_handler (VECNUM_EWU0 + (hw_p->devnum * 2),
583 (interrupt_handler_t *) enetInt, dev);
584 irq_install_handler (VECNUM_ETH0 + (hw_p->devnum * 2),
585 (interrupt_handler_t *) enetInt, dev);
586 }
ba56f625
WD
587
588 mtmsr (msr); /* enable interrupts again */
589
590 hw_p->bis = bis;
591 hw_p->first_init = 1;
592
593 return (1);
594}
595
596
597static int ppc_440x_eth_send (struct eth_device *dev, volatile void *ptr,
598 int len)
599{
600 struct enet_frame *ef_ptr;
601 ulong time_start, time_now;
602 unsigned long temp_txm0;
603 EMAC_440GX_HW_PST hw_p = dev->priv;
604
605 ef_ptr = (struct enet_frame *) ptr;
606
607 /*-----------------------------------------------------------------------+
608 * Copy in our address into the frame.
609 *-----------------------------------------------------------------------*/
610 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
611
612 /*-----------------------------------------------------------------------+
613 * If frame is too long or too short, modify length.
614 *-----------------------------------------------------------------------*/
615 /* TBS: where does the fragment go???? */
616 if (len > ENET_MAX_MTU)
617 len = ENET_MAX_MTU;
618
619 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
620 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
621
622 /*-----------------------------------------------------------------------+
623 * set TX Buffer busy, and send it
624 *-----------------------------------------------------------------------*/
625 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
626 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
627 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
628 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
629 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
630
631 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
632 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
633
634 __asm__ volatile ("eieio");
635
636 out32 (EMAC_TXM0 + hw_p->hw_addr,
637 in32 (EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);
638#ifdef INFO_440_ENET
639 hw_p->stats.pkts_tx++;
640#endif
641
642 /*-----------------------------------------------------------------------+
643 * poll unitl the packet is sent and then make sure it is OK
644 *-----------------------------------------------------------------------*/
645 time_start = get_timer (0);
646 while (1) {
647 temp_txm0 = in32 (EMAC_TXM0 + hw_p->hw_addr);
648 /* loop until either TINT turns on or 3 seconds elapse */
649 if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {
650 /* transmit is done, so now check for errors
651 * If there is an error, an interrupt should
652 * happen when we return
653 */
654 time_now = get_timer (0);
655 if ((time_now - time_start) > 3000) {
656 return (-1);
657 }
658 } else {
659 return (len);
660 }
661 }
662}
663
664
665int enetInt (struct eth_device *dev)
666{
667 int serviced;
668 int rc = -1; /* default to not us */
669 unsigned long mal_isr;
670 unsigned long emac_isr = 0;
671 unsigned long mal_rx_eob;
672 unsigned long my_uic0msr, my_uic1msr;
673
674#if defined(CONFIG_440_GX)
675 unsigned long my_uic2msr;
676#endif
677 EMAC_440GX_HW_PST hw_p;
678
679 /*
680 * Because the mal is generic, we need to get the current
681 * eth device
682 */
683 dev = eth_get_dev ();
684
685 hw_p = dev->priv;
686
687
688 /* enter loop that stays in interrupt code until nothing to service */
689 do {
690 serviced = 0;
691
692 my_uic0msr = mfdcr (uic0msr);
693 my_uic1msr = mfdcr (uic1msr);
694#if defined(CONFIG_440_GX)
695 my_uic2msr = mfdcr (uic2msr);
696#endif
697 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
698 && !(my_uic1msr &
699 (UIC_ETH0 | UIC_ETH1 | UIC_MS | UIC_MTDE |
700 UIC_MRDE))) {
701 /* not for us */
702 return (rc);
703 }
704#if defined (CONFIG_440_GX)
705 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
706 && !(my_uic2msr & (UIC_ETH2 | UIC_ETH3))) {
707 /* not for us */
708 return (rc);
709 }
710#endif
711 /* get and clear controller status interrupts */
712 /* look at Mal and EMAC interrupts */
713 if ((my_uic0msr & (UIC_MRE | UIC_MTE))
714 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
715 /* we have a MAL interrupt */
716 mal_isr = mfdcr (malesr);
717 /* look for mal error */
718 if (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE)) {
719 mal_err (dev, mal_isr, my_uic0msr,
720 MAL_UIC_DEF, MAL_UIC_ERR);
721 serviced = 1;
722 rc = 0;
723 }
724 }
725
726 /* port by port dispatch of emac interrupts */
727 if (hw_p->devnum == 0) {
728 if (UIC_ETH0 & my_uic1msr) { /* look for EMAC errors */
729 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
730 if ((hw_p->emac_ier & emac_isr) != 0) {
731 emac_err (dev, emac_isr);
732 serviced = 1;
733 rc = 0;
734 }
735 }
736 if ((hw_p->emac_ier & emac_isr)
737 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
738 mtdcr (uic0sr, UIC_MRE | UIC_MTE); /* Clear */
739 mtdcr (uic1sr, UIC_ETH0 | UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
740 return (rc); /* we had errors so get out */
741 }
742 }
743
744 if (hw_p->devnum == 1) {
745 if (UIC_ETH1 & my_uic1msr) { /* look for EMAC errors */
746 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
747 if ((hw_p->emac_ier & emac_isr) != 0) {
748 emac_err (dev, emac_isr);
749 serviced = 1;
750 rc = 0;
751 }
752 }
753 if ((hw_p->emac_ier & emac_isr)
754 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
755 mtdcr (uic0sr, UIC_MRE | UIC_MTE); /* Clear */
756 mtdcr (uic1sr, UIC_ETH1 | UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
757 return (rc); /* we had errors so get out */
758 }
759 }
760#if defined (CONFIG_440_GX)
761 if (hw_p->devnum == 2) {
762 if (UIC_ETH2 & my_uic2msr) { /* look for EMAC errors */
763 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
764 if ((hw_p->emac_ier & emac_isr) != 0) {
765 emac_err (dev, emac_isr);
766 serviced = 1;
767 rc = 0;
768 }
769 }
770 if ((hw_p->emac_ier & emac_isr)
771 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
772 mtdcr (uic0sr, UIC_MRE | UIC_MTE); /* Clear */
773 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
774 mtdcr (uic2sr, UIC_ETH2);
775 return (rc); /* we had errors so get out */
776 }
777 }
778
779 if (hw_p->devnum == 3) {
780 if (UIC_ETH3 & my_uic2msr) { /* look for EMAC errors */
781 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
782 if ((hw_p->emac_ier & emac_isr) != 0) {
783 emac_err (dev, emac_isr);
784 serviced = 1;
785 rc = 0;
786 }
787 }
788 if ((hw_p->emac_ier & emac_isr)
789 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
790 mtdcr (uic0sr, UIC_MRE | UIC_MTE); /* Clear */
791 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
792 mtdcr (uic2sr, UIC_ETH3);
793 return (rc); /* we had errors so get out */
794 }
795 }
796#endif /* CONFIG_440_GX */
797 /* handle MAX TX EOB interrupt from a tx */
798 if (my_uic0msr & UIC_MTE) {
799 mal_rx_eob = mfdcr (maltxeobisr);
800 mtdcr (maltxeobisr, mal_rx_eob);
801 mtdcr (uic0sr, UIC_MTE);
802 }
803 /* handle MAL RX EOB interupt from a receive */
804 /* check for EOB on valid channels */
805 if (my_uic0msr & UIC_MRE) {
806 mal_rx_eob = mfdcr (malrxeobisr);
807 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
808 /* clear EOB
809 mtdcr(malrxeobisr, mal_rx_eob); */
810 enet_rcv (dev, emac_isr);
811 /* indicate that we serviced an interrupt */
812 serviced = 1;
813 rc = 0;
814 }
815 }
816 mtdcr (uic0sr, UIC_MRE); /* Clear */
817 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
818 switch (hw_p->devnum) {
819 case 0:
820 mtdcr (uic1sr, UIC_ETH0);
821 break;
822 case 1:
823 mtdcr (uic1sr, UIC_ETH1);
824 break;
825#if defined (CONFIG_440_GX)
826 case 2:
827 mtdcr (uic2sr, UIC_ETH2);
828 break;
829 case 3:
830 mtdcr (uic2sr, UIC_ETH3);
831 break;
832#endif /* CONFIG_440_GX */
833 default:
834 break;
835 }
836 } while (serviced);
837
838 return (rc);
839}
840
841/*-----------------------------------------------------------------------------+
842 * MAL Error Routine
843 *-----------------------------------------------------------------------------*/
844static void mal_err (struct eth_device *dev, unsigned long isr,
845 unsigned long uic, unsigned long maldef,
846 unsigned long mal_errr)
847{
848 EMAC_440GX_HW_PST hw_p = dev->priv;
849
850 mtdcr (malesr, isr); /* clear interrupt */
851
852 /* clear DE interrupt */
853 mtdcr (maltxdeir, 0xC0000000);
854 mtdcr (malrxdeir, 0x80000000);
855
856#ifdef INFO_440_ENET
857 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
858#endif
859
860 eth_init (hw_p->bis); /* start again... */
861}
862
863/*-----------------------------------------------------------------------------+
864 * EMAC Error Routine
865 *-----------------------------------------------------------------------------*/
866static void emac_err (struct eth_device *dev, unsigned long isr)
867{
868 EMAC_440GX_HW_PST hw_p = dev->priv;
869
870 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
871 out32 (EMAC_ISR + hw_p->hw_addr, isr);
872}
873
874/*-----------------------------------------------------------------------------+
875 * enet_rcv() handles the ethernet receive data
876 *-----------------------------------------------------------------------------*/
877static void enet_rcv (struct eth_device *dev, unsigned long malisr)
878{
879 struct enet_frame *ef_ptr;
880 unsigned long data_len;
881 unsigned long rx_eob_isr;
882 EMAC_440GX_HW_PST hw_p = dev->priv;
883
884 int handled = 0;
885 int i;
886 int loop_count = 0;
887
888 rx_eob_isr = mfdcr (malrxeobisr);
889 if ((0x80000000 >> hw_p->devnum) & rx_eob_isr) {
890 /* clear EOB */
891 mtdcr (malrxeobisr, rx_eob_isr);
892
893 /* EMAC RX done */
894 while (1) { /* do all */
895 i = hw_p->rx_slot;
896
897 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
898 || (loop_count >= NUM_RX_BUFF))
899 break;
900 loop_count++;
901 hw_p->rx_slot++;
902 if (NUM_RX_BUFF == hw_p->rx_slot)
903 hw_p->rx_slot = 0;
904 handled++;
905 data_len = (unsigned long) hw_p->rx[i].data_len; /* Get len */
906 if (data_len) {
907 if (data_len > ENET_MAX_MTU) /* Check len */
908 data_len = 0;
909 else {
910 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
911 data_len = 0;
912 hw_p->stats.rx_err_log[hw_p->
913 rx_err_index]
914 = hw_p->rx[i].ctrl;
915 hw_p->rx_err_index++;
916 if (hw_p->rx_err_index ==
917 MAX_ERR_LOG)
918 hw_p->rx_err_index =
919 0;
920 } /* emac_erros */
921 } /* data_len < max mtu */
922 } /* if data_len */
923 if (!data_len) { /* no data */
924 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
925
926 hw_p->stats.data_len_err++; /* Error at Rx */
927 }
928
929 /* !data_len */
930 /* AS.HARNOIS */
931 /* Check if user has already eaten buffer */
932 /* if not => ERROR */
933 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
934 if (hw_p->is_receiving)
935 printf ("ERROR : Receive buffers are full!\n");
936 break;
937 } else {
938 hw_p->stats.rx_frames++;
939 hw_p->stats.rx += data_len;
940 ef_ptr = (struct enet_frame *) hw_p->rx[i].
941 data_ptr;
942#ifdef INFO_440_ENET
943 hw_p->stats.pkts_rx++;
944#endif
945 /* AS.HARNOIS
946 * use ring buffer
947 */
948 hw_p->rx_ready[hw_p->rx_i_index] = i;
949 hw_p->rx_i_index++;
950 if (NUM_RX_BUFF == hw_p->rx_i_index)
951 hw_p->rx_i_index = 0;
952
953 /* printf("X"); /|* test-only *|/ */
954
955 /* AS.HARNOIS
956 * free receive buffer only when
957 * buffer has been handled (eth_rx)
958 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
959 */
960 } /* if data_len */
961 } /* while */
962 } /* if EMACK_RXCHL */
963}
964
965
966static int ppc_440x_eth_rx (struct eth_device *dev)
967{
968 int length;
969 int user_index;
970 unsigned long msr;
971 EMAC_440GX_HW_PST hw_p = dev->priv;
972
973 hw_p->is_receiving = 1; /* tell driver */
974
975 for (;;) {
976 /* AS.HARNOIS
977 * use ring buffer and
978 * get index from rx buffer desciptor queue
979 */
980 user_index = hw_p->rx_ready[hw_p->rx_u_index];
981 if (user_index == -1) {
982 length = -1;
983 break; /* nothing received - leave for() loop */
984 }
985
986 msr = mfmsr ();
987 mtmsr (msr & ~(MSR_EE));
988
989 length = hw_p->rx[user_index].data_len;
990
991 /* Pass the packet up to the protocol layers. */
992 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
993 /* NetReceive(NetRxPackets[i], length); */
994 NetReceive (NetRxPackets[user_index], length - 4);
995 /* Free Recv Buffer */
996 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
997 /* Free rx buffer descriptor queue */
998 hw_p->rx_ready[hw_p->rx_u_index] = -1;
999 hw_p->rx_u_index++;
1000 if (NUM_RX_BUFF == hw_p->rx_u_index)
1001 hw_p->rx_u_index = 0;
1002
1003#ifdef INFO_440_ENET
1004 hw_p->stats.pkts_handled++;
1005#endif
1006
1007 mtmsr (msr); /* Enable IRQ's */
1008 }
1009
1010 hw_p->is_receiving = 0; /* tell driver */
1011
1012 return length;
1013}
1014
1015int ppc_440x_eth_initialize (bd_t * bis)
1016{
1017 static int virgin = 0;
1018 unsigned long pfc1;
1019 struct eth_device *dev;
1020 int eth_num = 0;
1021
1022 EMAC_440GX_HW_PST hw = NULL;
1023
1024 mfsdr (sdr_pfc1, pfc1);
1025 pfc1 &= ~(0x01e00000);
1026 pfc1 |= 0x01200000;
1027 mtsdr (sdr_pfc1, pfc1);
3c74e32a
WD
1028 /* set phy num and mode */
1029 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1030 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1031 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1032 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1033 bis->bi_phymode[0] = 0;
1034 bis->bi_phymode[1] = 0;
1035 bis->bi_phymode[2] = 2;
1036 bis->bi_phymode[3] = 2;
ba56f625
WD
1037
1038 for (eth_num = 0; eth_num < EMAC_NUM_DEV; eth_num++) {
1039
1040 /* See if we can actually bring up the interface, otherwise, skip it */
1041 switch (eth_num) {
1042 case 0:
3c74e32a
WD
1043 if (memcmp (bis->bi_enetaddr, "\0\0\0\0\0\0", 6) == 0) {
1044 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
ba56f625 1045 continue;
3c74e32a 1046 }
ba56f625
WD
1047 break;
1048 case 1:
3c74e32a
WD
1049 if (memcmp (bis->bi_enet1addr, "\0\0\0\0\0\0", 6) == 0) {
1050 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
ba56f625 1051 continue;
3c74e32a 1052 }
ba56f625
WD
1053 break;
1054 case 2:
3c74e32a
WD
1055 if (memcmp (bis->bi_enet2addr, "\0\0\0\0\0\0", 6) == 0) {
1056 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
ba56f625 1057 continue;
3c74e32a 1058 }
ba56f625
WD
1059 break;
1060 case 3:
3c74e32a
WD
1061 if (memcmp (bis->bi_enet3addr, "\0\0\0\0\0\0", 6) == 0) {
1062 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
ba56f625 1063 continue;
3c74e32a 1064 }
ba56f625
WD
1065 break;
1066 default:
3c74e32a
WD
1067 if (memcmp (bis->bi_enetaddr, "\0\0\0\0\0\0", 6) == 0) {
1068 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
ba56f625 1069 continue;
3c74e32a 1070 }
ba56f625
WD
1071 break;
1072 }
1073
1074 /* Allocate device structure */
1075 dev = (struct eth_device *) malloc (sizeof (*dev));
1076 if (dev == NULL) {
3f85ce27
WD
1077 printf ("ppc_440x_eth_initialize: "
1078 "Cannot allocate eth_device %d\n", eth_num);
ba56f625
WD
1079 return (-1);
1080 }
1081
1082 /* Allocate our private use data */
1083 hw = (EMAC_440GX_HW_PST) malloc (sizeof (*hw));
1084 if (hw == NULL) {
3f85ce27
WD
1085 printf ("ppc_440x_eth_initialize: "
1086 "Cannot allocate private hw data for eth_device %d",
ba56f625
WD
1087 eth_num);
1088 free (dev);
1089 return (-1);
1090 }
1091
1092 switch (eth_num) {
1093 case 0:
1094 hw->hw_addr = 0;
1095 memcpy (dev->enetaddr, bis->bi_enetaddr, 6);
1096 break;
1097 case 1:
1098 hw->hw_addr = 0x100;
1099 memcpy (dev->enetaddr, bis->bi_enet1addr, 6);
1100 break;
1101 case 2:
1102 hw->hw_addr = 0x400;
1103 memcpy (dev->enetaddr, bis->bi_enet2addr, 6);
1104 break;
1105 case 3:
1106 hw->hw_addr = 0x600;
1107 memcpy (dev->enetaddr, bis->bi_enet3addr, 6);
1108 break;
1109 default:
1110 hw->hw_addr = 0;
1111 memcpy (dev->enetaddr, bis->bi_enetaddr, 6);
1112 break;
1113 }
1114
1115 hw->devnum = eth_num;
1116
1117 sprintf (dev->name, "ppc_440x_eth%d", eth_num);
1118 dev->priv = (void *) hw;
1119 dev->init = ppc_440x_eth_init;
1120 dev->halt = ppc_440x_eth_halt;
1121 dev->send = ppc_440x_eth_send;
1122 dev->recv = ppc_440x_eth_rx;
1123
1124 if (0 == virgin) {
1125 /* set the MAL IER ??? names may change with new spec ??? */
1126 mal_ier =
1127 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
1128 MAL_IER_OPBE | MAL_IER_PLBE;
1129 mtdcr (malesr, 0xffffffff); /* clear pending interrupts */
1130 mtdcr (maltxdeir, 0xffffffff); /* clear pending interrupts */
1131 mtdcr (malrxdeir, 0xffffffff); /* clear pending interrupts */
1132 mtdcr (malier, mal_ier);
1133
1134 /* install MAL interrupt handler */
1135 irq_install_handler (VECNUM_MS,
1136 (interrupt_handler_t *) enetInt,
1137 dev);
1138 irq_install_handler (VECNUM_MTE,
1139 (interrupt_handler_t *) enetInt,
1140 dev);
1141 irq_install_handler (VECNUM_MRE,
1142 (interrupt_handler_t *) enetInt,
1143 dev);
1144 irq_install_handler (VECNUM_TXDE,
1145 (interrupt_handler_t *) enetInt,
1146 dev);
1147 irq_install_handler (VECNUM_RXDE,
1148 (interrupt_handler_t *) enetInt,
1149 dev);
1150 virgin = 1;
1151 }
1152
1153 eth_register (dev);
1154
1155 } /* end for each supported device */
1156 return (1);
1157}
1158#endif /* CONFIG_440 && CONFIG_NET_MULTI */