]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/ppc4xx/4xx_enet.c
Add support for AMCC 440SP, add support for AMCC Luan 440SP eval board.
[people/ms/u-boot.git] / cpu / ppc4xx / 4xx_enet.c
1 /*-----------------------------------------------------------------------------+
2 *
3 * This source code has been made available to you by IBM on an AS-IS
4 * basis. Anyone receiving this source is licensed under IBM
5 * copyrights to use it in any way he or she deems fit, including
6 * copying it, modifying it, compiling it, and redistributing it either
7 * with or without modifications. No license under IBM patents or
8 * patent applications is to be implied by the copyright license.
9 *
10 * Any user of this software should understand that IBM cannot provide
11 * technical support for this software and will not be responsible for
12 * any consequences resulting from the use of this software.
13 *
14 * Any person who transfers this source code or any derivative work
15 * must include the IBM copyright notice, this paragraph, and the
16 * preceding two paragraphs in the transferred software.
17 *
18 * COPYRIGHT I B M CORPORATION 1995
19 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
20 *-----------------------------------------------------------------------------*/
21 /*-----------------------------------------------------------------------------+
22 *
23 * File Name: enetemac.c
24 *
25 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
26 *
27 * Author: Mark Wisner
28 *
29 * Change Activity-
30 *
31 * Date Description of Change BY
32 * --------- --------------------- ---
33 * 05-May-99 Created MKW
34 * 27-Jun-99 Clean up JWB
35 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
36 * 29-Jul-99 Added Full duplex support MKW
37 * 06-Aug-99 Changed names for Mal CR reg MKW
38 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
39 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
40 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
41 * to avoid chaining maximum sized packets. Push starting
42 * RX descriptor address up to the next cache line boundary.
43 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
44 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
45 * EMAC_RXM register. JWB
46 * 12-Mar-01 anne-sophie.harnois@nextream.fr
47 * - Variables are compatible with those already defined in
48 * include/net.h
49 * - Receive buffer descriptor ring is used to send buffers
50 * to the user
51 * - Info print about send/received/handled packet number if
52 * INFO_405_ENET is set
53 * 17-Apr-01 stefan.roese@esd-electronics.com
54 * - MAL reset in "eth_halt" included
55 * - Enet speed and duplex output now in one line
56 * 08-May-01 stefan.roese@esd-electronics.com
57 * - MAL error handling added (eth_init called again)
58 * 13-Nov-01 stefan.roese@esd-electronics.com
59 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
60 * 04-Jan-02 stefan.roese@esd-electronics.com
61 * - Wait for PHY auto negotiation to complete added
62 * 06-Feb-02 stefan.roese@esd-electronics.com
63 * - Bug fixed in waiting for auto negotiation to complete
64 * 26-Feb-02 stefan.roese@esd-electronics.com
65 * - rx and tx buffer descriptors now allocated (no fixed address
66 * used anymore)
67 * 17-Jun-02 stefan.roese@esd-electronics.com
68 * - MAL error debug printf 'M' removed (rx de interrupt may
69 * occur upon many incoming packets with only 4 rx buffers).
70 *-----------------------------------------------------------------------------*
71 * 17-Nov-03 travis.sawyer@sandburst.com
72 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
73 * in the 440GX. This port should work with the 440GP
74 * (2 EMACs) also
75 * 15-Aug-05 sr@denx.de
76 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
77 now handling all 4xx cpu's.
78 *-----------------------------------------------------------------------------*/
79
80 #include <config.h>
81 #include <common.h>
82 #include <net.h>
83 #include <asm/processor.h>
84 #include <commproc.h>
85 #include <ppc4xx.h>
86 #include <ppc4xx_enet.h>
87 #include <405_mal.h>
88 #include <miiphy.h>
89 #include <malloc.h>
90 #include "vecnum.h"
91
92 /*
93 * Only compile for platform with AMCC EMAC ethernet controller and
94 * network support enabled.
95 * Remark: CONFIG_405 describes Xilinx PPC405 FPGA without EMAC controller!
96 */
97 #if (CONFIG_COMMANDS & CFG_CMD_NET) && !defined(CONFIG_405) && !defined(CONFIG_IOP480)
98
99 #if !(defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII))
100 #error "CONFIG_MII has to be defined!"
101 #endif
102
103 #if defined(CONFIG_NETCONSOLE) && !defined(CONFIG_NET_MULTI)
104 #error "CONFIG_NET_MULTI has to be defined for NetConsole"
105 #endif
106
107 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
108 #define PHY_AUTONEGOTIATE_TIMEOUT 4000 /* 4000 ms autonegotiate timeout */
109
110 /* Ethernet Transmit and Receive Buffers */
111 /* AS.HARNOIS
112 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
113 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
114 */
115 #define ENET_MAX_MTU PKTSIZE
116 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
117
118 /*-----------------------------------------------------------------------------+
119 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
120 * Interrupt Controller).
121 *-----------------------------------------------------------------------------*/
122 #define MAL_UIC_ERR ( UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
123 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
124 #define EMAC_UIC_DEF UIC_ENET
125 #define EMAC_UIC_DEF1 UIC_ENET1
126 #define SEL_UIC_DEF(p) (p ? UIC_ENET1 : UIC_ENET )
127
128 #undef INFO_4XX_ENET
129
130 #define BI_PHYMODE_NONE 0
131 #define BI_PHYMODE_ZMII 1
132 #define BI_PHYMODE_RGMII 2
133
134
135 /*-----------------------------------------------------------------------------+
136 * Global variables. TX and RX descriptors and buffers.
137 *-----------------------------------------------------------------------------*/
138 /* IER globals */
139 static uint32_t mal_ier;
140
141 #if !defined(CONFIG_NET_MULTI)
142 struct eth_device *emac0_dev = NULL;
143 #endif
144
145 /*
146 * Get count of EMAC devices (doesn't have to be the max. possible number
147 * supported by the cpu)
148 */
149 #if defined(CONFIG_HAS_ETH3)
150 #define LAST_EMAC_NUM 4
151 #elif defined(CONFIG_HAS_ETH2)
152 #define LAST_EMAC_NUM 3
153 #elif defined(CONFIG_HAS_ETH1)
154 #define LAST_EMAC_NUM 2
155 #else
156 #define LAST_EMAC_NUM 1
157 #endif
158
159 /*-----------------------------------------------------------------------------+
160 * Prototypes and externals.
161 *-----------------------------------------------------------------------------*/
162 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
163
164 int enetInt (struct eth_device *dev);
165 static void mal_err (struct eth_device *dev, unsigned long isr,
166 unsigned long uic, unsigned long maldef,
167 unsigned long mal_errr);
168 static void emac_err (struct eth_device *dev, unsigned long isr);
169
170 extern int phy_setup_aneg (char *devname, unsigned char addr);
171 extern int emac4xx_miiphy_read (char *devname, unsigned char addr,
172 unsigned char reg, unsigned short *value);
173 extern int emac4xx_miiphy_write (char *devname, unsigned char addr,
174 unsigned char reg, unsigned short value);
175
176 /*-----------------------------------------------------------------------------+
177 | ppc_4xx_eth_halt
178 | Disable MAL channel, and EMACn
179 +-----------------------------------------------------------------------------*/
180 static void ppc_4xx_eth_halt (struct eth_device *dev)
181 {
182 EMAC_4XX_HW_PST hw_p = dev->priv;
183 uint32_t failsafe = 10000;
184
185 out32 (EMAC_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
186
187 /* 1st reset MAL channel */
188 /* Note: writing a 0 to a channel has no effect */
189 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
190 mtdcr (maltxcarr, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
191 #else
192 mtdcr (maltxcarr, (MAL_CR_MMSR >> hw_p->devnum));
193 #endif
194 mtdcr (malrxcarr, (MAL_CR_MMSR >> hw_p->devnum));
195
196 /* wait for reset */
197 while (mfdcr (malrxcasr) & (MAL_CR_MMSR >> hw_p->devnum)) {
198 udelay (1000); /* Delay 1 MS so as not to hammer the register */
199 failsafe--;
200 if (failsafe == 0)
201 break;
202 }
203
204 /* EMAC RESET */
205 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
206
207 #ifndef CONFIG_NETCONSOLE
208 hw_p->print_speed = 1; /* print speed message again next time */
209 #endif
210
211 return;
212 }
213
214 #if defined (CONFIG_440GX)
215 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
216 {
217 unsigned long pfc1;
218 unsigned long zmiifer;
219 unsigned long rmiifer;
220
221 mfsdr(sdr_pfc1, pfc1);
222 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
223
224 zmiifer = 0;
225 rmiifer = 0;
226
227 switch (pfc1) {
228 case 1:
229 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
230 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
231 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
232 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
233 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
234 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
235 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
236 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
237 break;
238 case 2:
239 zmiifer = ZMII_FER_SMII << ZMII_FER_V(0);
240 zmiifer = ZMII_FER_SMII << ZMII_FER_V(1);
241 zmiifer = ZMII_FER_SMII << ZMII_FER_V(2);
242 zmiifer = ZMII_FER_SMII << ZMII_FER_V(3);
243 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
244 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
245 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
246 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
247 break;
248 case 3:
249 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
250 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
251 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
252 bis->bi_phymode[1] = BI_PHYMODE_NONE;
253 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
254 bis->bi_phymode[3] = BI_PHYMODE_NONE;
255 break;
256 case 4:
257 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
258 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
259 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
260 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
261 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
262 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
263 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
264 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
265 break;
266 case 5:
267 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
268 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
269 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
270 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
271 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
272 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
273 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
274 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
275 break;
276 case 6:
277 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
278 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
279 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
280 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
281 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
282 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
283 break;
284 case 0:
285 default:
286 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
287 rmiifer = 0x0;
288 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
289 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
290 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
291 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
292 break;
293 }
294
295 /* Ensure we setup mdio for this devnum and ONLY this devnum */
296 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
297
298 out32 (ZMII_FER, zmiifer);
299 out32 (RGMII_FER, rmiifer);
300
301 return ((int)pfc1);
302
303 }
304 #endif
305
306 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
307 {
308 int i, j;
309 unsigned long reg = 0;
310 unsigned long msr;
311 unsigned long speed;
312 unsigned long duplex;
313 unsigned long failsafe;
314 unsigned mode_reg;
315 unsigned short devnum;
316 unsigned short reg_short;
317 #if defined(CONFIG_440GX) || defined(CONFIG_440SP)
318 sys_info_t sysinfo;
319 #if defined(CONFIG_440GX)
320 int ethgroup = -1;
321 #endif
322 #endif
323
324 EMAC_4XX_HW_PST hw_p = dev->priv;
325
326 /* before doing anything, figure out if we have a MAC address */
327 /* if not, bail */
328 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
329 printf("ERROR: ethaddr not set!\n");
330 return -1;
331 }
332
333 #if defined(CONFIG_440GX) || defined(CONFIG_440SP)
334 /* Need to get the OPB frequency so we can access the PHY */
335 get_sys_info (&sysinfo);
336 #endif
337
338 msr = mfmsr ();
339 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
340
341 devnum = hw_p->devnum;
342
343 #ifdef INFO_4XX_ENET
344 /* AS.HARNOIS
345 * We should have :
346 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
347 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
348 * is possible that new packets (without relationship with
349 * current transfer) have got the time to arrived before
350 * netloop calls eth_halt
351 */
352 printf ("About preceeding transfer (eth%d):\n"
353 "- Sent packet number %d\n"
354 "- Received packet number %d\n"
355 "- Handled packet number %d\n",
356 hw_p->devnum,
357 hw_p->stats.pkts_tx,
358 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
359
360 hw_p->stats.pkts_tx = 0;
361 hw_p->stats.pkts_rx = 0;
362 hw_p->stats.pkts_handled = 0;
363 #endif
364
365 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
366 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
367
368 hw_p->rx_slot = 0; /* MAL Receive Slot */
369 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
370 hw_p->rx_u_index = 0; /* Receive User Queue Index */
371
372 hw_p->tx_slot = 0; /* MAL Transmit Slot */
373 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
374 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
375
376 #if defined(CONFIG_440) && !defined(CONFIG_440SP)
377 /* set RMII mode */
378 /* NOTE: 440GX spec states that mode is mutually exclusive */
379 /* NOTE: Therefore, disable all other EMACS, since we handle */
380 /* NOTE: only one emac at a time */
381 reg = 0;
382 out32 (ZMII_FER, 0);
383 udelay (100);
384
385 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
386 out32 (ZMII_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
387 #elif defined(CONFIG_440GX)
388 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
389 #elif defined(CONFIG_440GP)
390 /* set RMII mode */
391 out32 (ZMII_FER, ZMII_RMII | ZMII_MDI0);
392 #else
393 if ((devnum == 0) || (devnum == 1)) {
394 out32 (ZMII_FER, (ZMII_FER_SMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
395 }
396 else { /* ((devnum == 2) || (devnum == 3)) */
397 out32 (ZMII_FER, ZMII_FER_MDI << ZMII_FER_V (devnum));
398 out32 (RGMII_FER, ((RGMII_FER_RGMII << RGMII_FER_V (2)) |
399 (RGMII_FER_RGMII << RGMII_FER_V (3))));
400 }
401 #endif
402
403 out32 (ZMII_SSR, ZMII_SSR_SP << ZMII_SSR_V(devnum));
404 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
405
406 __asm__ volatile ("eieio");
407
408 /* reset emac so we have access to the phy */
409
410 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
411 __asm__ volatile ("eieio");
412
413 failsafe = 1000;
414 while ((in32 (EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) {
415 udelay (1000);
416 failsafe--;
417 }
418
419 #if defined(CONFIG_440GX) || defined(CONFIG_440SP)
420 /* Whack the M1 register */
421 mode_reg = 0x0;
422 mode_reg &= ~0x00000038;
423 if (sysinfo.freqOPB <= 50000000);
424 else if (sysinfo.freqOPB <= 66666667)
425 mode_reg |= EMAC_M1_OBCI_66;
426 else if (sysinfo.freqOPB <= 83333333)
427 mode_reg |= EMAC_M1_OBCI_83;
428 else if (sysinfo.freqOPB <= 100000000)
429 mode_reg |= EMAC_M1_OBCI_100;
430 else
431 mode_reg |= EMAC_M1_OBCI_GT100;
432
433 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
434 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
435
436 /* wait for PHY to complete auto negotiation */
437 reg_short = 0;
438 #ifndef CONFIG_CS8952_PHY
439 switch (devnum) {
440 case 0:
441 reg = CONFIG_PHY_ADDR;
442 break;
443 #if defined (CONFIG_PHY1_ADDR)
444 case 1:
445 reg = CONFIG_PHY1_ADDR;
446 break;
447 #endif
448 #if defined (CONFIG_440GX)
449 case 2:
450 reg = CONFIG_PHY2_ADDR;
451 break;
452 case 3:
453 reg = CONFIG_PHY3_ADDR;
454 break;
455 #endif
456 default:
457 reg = CONFIG_PHY_ADDR;
458 break;
459 }
460
461 bis->bi_phynum[devnum] = reg;
462
463 #if defined(CONFIG_PHY_RESET)
464 /*
465 * Reset the phy, only if its the first time through
466 * otherwise, just check the speeds & feeds
467 */
468 if (hw_p->first_init == 0) {
469 miiphy_reset (dev->name, reg);
470
471 #if defined(CONFIG_440GX) || defined(CONFIG_440SP)
472 #if defined(CONFIG_CIS8201_PHY)
473 /*
474 * Cicada 8201 PHY needs to have an extended register whacked
475 * for RGMII mode.
476 */
477 if ( ((devnum == 2) || (devnum ==3)) && (4 == ethgroup) ) {
478 #if defined(CONFIG_CIS8201_SHORT_ETCH)
479 miiphy_write (dev->name, reg, 23, 0x1300);
480 #else
481 miiphy_write (dev->name, reg, 23, 0x1000);
482 #endif
483 /*
484 * Vitesse VSC8201/Cicada CIS8201 errata:
485 * Interoperability problem with Intel 82547EI phys
486 * This work around (provided by Vitesse) changes
487 * the default timer convergence from 8ms to 12ms
488 */
489 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
490 miiphy_write (dev->name, reg, 0x08, 0x0200);
491 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
492 miiphy_write (dev->name, reg, 0x02, 0x0004);
493 miiphy_write (dev->name, reg, 0x01, 0x0671);
494 miiphy_write (dev->name, reg, 0x00, 0x8fae);
495 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
496 miiphy_write (dev->name, reg, 0x08, 0x0000);
497 miiphy_write (dev->name, reg, 0x1f, 0x0000);
498 /* end Vitesse/Cicada errata */
499 }
500 #endif
501 #endif
502 /* Start/Restart autonegotiation */
503 phy_setup_aneg (dev->name, reg);
504 udelay (1000);
505 }
506 #endif /* defined(CONFIG_PHY_RESET) */
507
508 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
509
510 /*
511 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
512 */
513 if ((reg_short & PHY_BMSR_AUTN_ABLE)
514 && !(reg_short & PHY_BMSR_AUTN_COMP)) {
515 puts ("Waiting for PHY auto negotiation to complete");
516 i = 0;
517 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
518 /*
519 * Timeout reached ?
520 */
521 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
522 puts (" TIMEOUT !\n");
523 break;
524 }
525
526 if ((i++ % 1000) == 0) {
527 putc ('.');
528 }
529 udelay (1000); /* 1 ms */
530 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
531
532 }
533 puts (" done\n");
534 udelay (500000); /* another 500 ms (results in faster booting) */
535 }
536 #endif /* #ifndef CONFIG_CS8952_PHY */
537
538 speed = miiphy_speed (dev->name, reg);
539 duplex = miiphy_duplex (dev->name, reg);
540
541 if (hw_p->print_speed) {
542 hw_p->print_speed = 0;
543 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
544 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
545 }
546
547 #if defined(CONFIG_440) && !defined(CONFIG_440SP)
548 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
549 mfsdr(sdr_mfr, reg);
550 if (speed == 100) {
551 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
552 } else {
553 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
554 }
555 mtsdr(sdr_mfr, reg);
556 #endif
557
558 /* Set ZMII/RGMII speed according to the phy link speed */
559 reg = in32 (ZMII_SSR);
560 if ( (speed == 100) || (speed == 1000) )
561 out32 (ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));
562 else
563 out32 (ZMII_SSR, reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));
564
565 if ((devnum == 2) || (devnum == 3)) {
566 if (speed == 1000)
567 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
568 else if (speed == 100)
569 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
570 else
571 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
572
573 out32 (RGMII_SSR, reg);
574 }
575 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
576
577 /* set the Mal configuration reg */
578 #if defined(CONFIG_440GX) || defined(CONFIG_440SP)
579 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
580 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
581 #else
582 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
583 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
584 if (get_pvr() == PVR_440GP_RB) {
585 mtdcr (malmcr, mfdcr(malmcr) & ~MAL_CR_PLBB);
586 }
587 #endif
588
589 /* Free "old" buffers */
590 if (hw_p->alloc_tx_buf)
591 free (hw_p->alloc_tx_buf);
592 if (hw_p->alloc_rx_buf)
593 free (hw_p->alloc_rx_buf);
594
595 /*
596 * Malloc MAL buffer desciptors, make sure they are
597 * aligned on cache line boundary size
598 * (401/403/IOP480 = 16, 405 = 32)
599 * and doesn't cross cache block boundaries.
600 */
601 hw_p->alloc_tx_buf =
602 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_TX_BUFF) +
603 ((2 * CFG_CACHELINE_SIZE) - 2));
604 if (NULL == hw_p->alloc_tx_buf)
605 return -1;
606 if (((int) hw_p->alloc_tx_buf & CACHELINE_MASK) != 0) {
607 hw_p->tx =
608 (mal_desc_t *) ((int) hw_p->alloc_tx_buf +
609 CFG_CACHELINE_SIZE -
610 ((int) hw_p->
611 alloc_tx_buf & CACHELINE_MASK));
612 } else {
613 hw_p->tx = hw_p->alloc_tx_buf;
614 }
615
616 hw_p->alloc_rx_buf =
617 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_RX_BUFF) +
618 ((2 * CFG_CACHELINE_SIZE) - 2));
619 if (NULL == hw_p->alloc_rx_buf) {
620 free(hw_p->alloc_tx_buf);
621 hw_p->alloc_tx_buf = NULL;
622 return -1;
623 }
624
625 if (((int) hw_p->alloc_rx_buf & CACHELINE_MASK) != 0) {
626 hw_p->rx =
627 (mal_desc_t *) ((int) hw_p->alloc_rx_buf +
628 CFG_CACHELINE_SIZE -
629 ((int) hw_p->
630 alloc_rx_buf & CACHELINE_MASK));
631 } else {
632 hw_p->rx = hw_p->alloc_rx_buf;
633 }
634
635 for (i = 0; i < NUM_TX_BUFF; i++) {
636 hw_p->tx[i].ctrl = 0;
637 hw_p->tx[i].data_len = 0;
638 if (hw_p->first_init == 0) {
639 hw_p->txbuf_ptr =
640 (char *) malloc (ENET_MAX_MTU_ALIGNED);
641 if (NULL == hw_p->txbuf_ptr) {
642 free(hw_p->alloc_rx_buf);
643 free(hw_p->alloc_tx_buf);
644 hw_p->alloc_rx_buf = NULL;
645 hw_p->alloc_tx_buf = NULL;
646 for(j = 0; j < i; j++) {
647 free(hw_p->tx[i].data_ptr);
648 hw_p->tx[i].data_ptr = NULL;
649 }
650 }
651 }
652 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
653 if ((NUM_TX_BUFF - 1) == i)
654 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
655 hw_p->tx_run[i] = -1;
656 #if 0
657 printf ("TX_BUFF %d @ 0x%08lx\n", i,
658 (ulong) hw_p->tx[i].data_ptr);
659 #endif
660 }
661
662 for (i = 0; i < NUM_RX_BUFF; i++) {
663 hw_p->rx[i].ctrl = 0;
664 hw_p->rx[i].data_len = 0;
665 /* rx[i].data_ptr = (char *) &rx_buff[i]; */
666 hw_p->rx[i].data_ptr = (char *) NetRxPackets[i];
667 if ((NUM_RX_BUFF - 1) == i)
668 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
669 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
670 hw_p->rx_ready[i] = -1;
671 #if 0
672 printf ("RX_BUFF %d @ 0x%08lx\n", i, (ulong) rx[i].data_ptr);
673 #endif
674 }
675
676 reg = 0x00000000;
677
678 reg |= dev->enetaddr[0]; /* set high address */
679 reg = reg << 8;
680 reg |= dev->enetaddr[1];
681
682 out32 (EMAC_IAH + hw_p->hw_addr, reg);
683
684 reg = 0x00000000;
685 reg |= dev->enetaddr[2]; /* set low address */
686 reg = reg << 8;
687 reg |= dev->enetaddr[3];
688 reg = reg << 8;
689 reg |= dev->enetaddr[4];
690 reg = reg << 8;
691 reg |= dev->enetaddr[5];
692
693 out32 (EMAC_IAL + hw_p->hw_addr, reg);
694
695 switch (devnum) {
696 case 1:
697 /* setup MAL tx & rx channel pointers */
698 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
699 mtdcr (maltxctp2r, hw_p->tx);
700 #else
701 mtdcr (maltxctp1r, hw_p->tx);
702 #endif
703 #if defined(CONFIG_440)
704 mtdcr (maltxbattr, 0x0);
705 mtdcr (malrxbattr, 0x0);
706 #endif
707 mtdcr (malrxctp1r, hw_p->rx);
708 /* set RX buffer size */
709 mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16);
710 break;
711 #if defined (CONFIG_440GX)
712 case 2:
713 /* setup MAL tx & rx channel pointers */
714 mtdcr (maltxbattr, 0x0);
715 mtdcr (malrxbattr, 0x0);
716 mtdcr (maltxctp2r, hw_p->tx);
717 mtdcr (malrxctp2r, hw_p->rx);
718 /* set RX buffer size */
719 mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16);
720 break;
721 case 3:
722 /* setup MAL tx & rx channel pointers */
723 mtdcr (maltxbattr, 0x0);
724 mtdcr (maltxctp3r, hw_p->tx);
725 mtdcr (malrxbattr, 0x0);
726 mtdcr (malrxctp3r, hw_p->rx);
727 /* set RX buffer size */
728 mtdcr (malrcbs3, ENET_MAX_MTU_ALIGNED / 16);
729 break;
730 #endif /* CONFIG_440GX */
731 case 0:
732 default:
733 /* setup MAL tx & rx channel pointers */
734 #if defined(CONFIG_440)
735 mtdcr (maltxbattr, 0x0);
736 mtdcr (malrxbattr, 0x0);
737 #endif
738 mtdcr (maltxctp0r, hw_p->tx);
739 mtdcr (malrxctp0r, hw_p->rx);
740 /* set RX buffer size */
741 mtdcr (malrcbs0, ENET_MAX_MTU_ALIGNED / 16);
742 break;
743 }
744
745 /* Enable MAL transmit and receive channels */
746 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
747 mtdcr (maltxcasr, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
748 #else
749 mtdcr (maltxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
750 #endif
751 mtdcr (malrxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
752
753 /* set transmit enable & receive enable */
754 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);
755
756 /* set receive fifo to 4k and tx fifo to 2k */
757 mode_reg = in32 (EMAC_M1 + hw_p->hw_addr);
758 mode_reg |= EMAC_M1_RFS_4K | EMAC_M1_TX_FIFO_2K;
759
760 /* set speed */
761 if (speed == _1000BASET) {
762 #if defined(CONFIG_440SP)
763 #define SDR0_PFC1_EM_1000 0x00200000
764 unsigned long pfc1;
765 mfsdr (sdr_pfc1, pfc1);
766 pfc1 |= SDR0_PFC1_EM_1000;
767 mtsdr (sdr_pfc1, pfc1);
768 #endif
769 mode_reg = mode_reg | EMAC_M1_MF_1000MBPS | EMAC_M1_IST;
770 } else if (speed == _100BASET)
771 mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;
772 else
773 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
774 if (duplex == FULL)
775 mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;
776
777 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
778
779 /* Enable broadcast and indvidual address */
780 /* TBS: enabling runts as some misbehaved nics will send runts */
781 out32 (EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
782
783 /* we probably need to set the tx mode1 reg? maybe at tx time */
784
785 /* set transmit request threshold register */
786 out32 (EMAC_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
787
788 /* set receive low/high water mark register */
789 #if defined(CONFIG_440)
790 /* 440GP has a 64 byte burst length */
791 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
792 #else
793 /* 405s have a 16 byte burst length */
794 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
795 #endif /* defined(CONFIG_440) */
796 out32 (EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);
797
798 /* Set fifo limit entry in tx mode 0 */
799 out32 (EMAC_TXM0 + hw_p->hw_addr, 0x00000003);
800 /* Frame gap set */
801 out32 (EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
802
803 /* Set EMAC IER */
804 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
805 if (speed == _100BASET)
806 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
807
808 out32 (EMAC_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
809 out32 (EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);
810
811 if (hw_p->first_init == 0) {
812 /*
813 * Connect interrupt service routines
814 */
815 irq_install_handler (VECNUM_ETH0 + (hw_p->devnum * 2),
816 (interrupt_handler_t *) enetInt, dev);
817 }
818
819 mtmsr (msr); /* enable interrupts again */
820
821 hw_p->bis = bis;
822 hw_p->first_init = 1;
823
824 return (1);
825 }
826
827
828 static int ppc_4xx_eth_send (struct eth_device *dev, volatile void *ptr,
829 int len)
830 {
831 struct enet_frame *ef_ptr;
832 ulong time_start, time_now;
833 unsigned long temp_txm0;
834 EMAC_4XX_HW_PST hw_p = dev->priv;
835
836 ef_ptr = (struct enet_frame *) ptr;
837
838 /*-----------------------------------------------------------------------+
839 * Copy in our address into the frame.
840 *-----------------------------------------------------------------------*/
841 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
842
843 /*-----------------------------------------------------------------------+
844 * If frame is too long or too short, modify length.
845 *-----------------------------------------------------------------------*/
846 /* TBS: where does the fragment go???? */
847 if (len > ENET_MAX_MTU)
848 len = ENET_MAX_MTU;
849
850 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
851 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
852
853 /*-----------------------------------------------------------------------+
854 * set TX Buffer busy, and send it
855 *-----------------------------------------------------------------------*/
856 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
857 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
858 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
859 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
860 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
861
862 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
863 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
864
865 __asm__ volatile ("eieio");
866
867 out32 (EMAC_TXM0 + hw_p->hw_addr,
868 in32 (EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);
869 #ifdef INFO_4XX_ENET
870 hw_p->stats.pkts_tx++;
871 #endif
872
873 /*-----------------------------------------------------------------------+
874 * poll unitl the packet is sent and then make sure it is OK
875 *-----------------------------------------------------------------------*/
876 time_start = get_timer (0);
877 while (1) {
878 temp_txm0 = in32 (EMAC_TXM0 + hw_p->hw_addr);
879 /* loop until either TINT turns on or 3 seconds elapse */
880 if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {
881 /* transmit is done, so now check for errors
882 * If there is an error, an interrupt should
883 * happen when we return
884 */
885 time_now = get_timer (0);
886 if ((time_now - time_start) > 3000) {
887 return (-1);
888 }
889 } else {
890 return (len);
891 }
892 }
893 }
894
895
896 #if defined (CONFIG_440)
897
898 #if defined(CONFIG_440SP)
899 /*
900 * Hack: On 440SP all enet irq sources are located on UIC1
901 * Needs some cleanup. --sr
902 */
903 #define UIC0MSR uic1msr
904 #define UIC0SR uic1sr
905 #else
906 #define UIC0MSR uic0msr
907 #define UIC0SR uic0sr
908 #endif
909
910 int enetInt (struct eth_device *dev)
911 {
912 int serviced;
913 int rc = -1; /* default to not us */
914 unsigned long mal_isr;
915 unsigned long emac_isr = 0;
916 unsigned long mal_rx_eob;
917 unsigned long my_uic0msr, my_uic1msr;
918
919 #if defined(CONFIG_440GX)
920 unsigned long my_uic2msr;
921 #endif
922 EMAC_4XX_HW_PST hw_p;
923
924 /*
925 * Because the mal is generic, we need to get the current
926 * eth device
927 */
928 #if defined(CONFIG_NET_MULTI)
929 dev = eth_get_dev();
930 #else
931 dev = emac0_dev;
932 #endif
933
934 hw_p = dev->priv;
935
936 /* enter loop that stays in interrupt code until nothing to service */
937 do {
938 serviced = 0;
939
940 my_uic0msr = mfdcr (UIC0MSR);
941 my_uic1msr = mfdcr (uic1msr);
942 #if defined(CONFIG_440GX)
943 my_uic2msr = mfdcr (uic2msr);
944 #endif
945 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
946 && !(my_uic1msr & (UIC_ETH0 | UIC_ETH1 | UIC_MS | UIC_MTDE | UIC_MRDE))) {
947 /* not for us */
948 return (rc);
949 }
950 #if defined (CONFIG_440GX)
951 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
952 && !(my_uic2msr & (UIC_ETH2 | UIC_ETH3))) {
953 /* not for us */
954 return (rc);
955 }
956 #endif
957 /* get and clear controller status interrupts */
958 /* look at Mal and EMAC interrupts */
959 if ((my_uic0msr & (UIC_MRE | UIC_MTE))
960 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
961 /* we have a MAL interrupt */
962 mal_isr = mfdcr (malesr);
963 /* look for mal error */
964 if (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE)) {
965 mal_err (dev, mal_isr, my_uic0msr,
966 MAL_UIC_DEF, MAL_UIC_ERR);
967 serviced = 1;
968 rc = 0;
969 }
970 }
971
972 /* port by port dispatch of emac interrupts */
973 if (hw_p->devnum == 0) {
974 if (UIC_ETH0 & my_uic1msr) { /* look for EMAC errors */
975 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
976 if ((hw_p->emac_ier & emac_isr) != 0) {
977 emac_err (dev, emac_isr);
978 serviced = 1;
979 rc = 0;
980 }
981 }
982 if ((hw_p->emac_ier & emac_isr)
983 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
984 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
985 mtdcr (uic1sr, UIC_ETH0 | UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
986 return (rc); /* we had errors so get out */
987 }
988 }
989
990 #if !defined(CONFIG_440SP)
991 if (hw_p->devnum == 1) {
992 if (UIC_ETH1 & my_uic1msr) { /* look for EMAC errors */
993 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
994 if ((hw_p->emac_ier & emac_isr) != 0) {
995 emac_err (dev, emac_isr);
996 serviced = 1;
997 rc = 0;
998 }
999 }
1000 if ((hw_p->emac_ier & emac_isr)
1001 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1002 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1003 mtdcr (uic1sr, UIC_ETH1 | UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1004 return (rc); /* we had errors so get out */
1005 }
1006 }
1007 #if defined (CONFIG_440GX)
1008 if (hw_p->devnum == 2) {
1009 if (UIC_ETH2 & my_uic2msr) { /* look for EMAC errors */
1010 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1011 if ((hw_p->emac_ier & emac_isr) != 0) {
1012 emac_err (dev, emac_isr);
1013 serviced = 1;
1014 rc = 0;
1015 }
1016 }
1017 if ((hw_p->emac_ier & emac_isr)
1018 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1019 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1020 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1021 mtdcr (uic2sr, UIC_ETH2);
1022 return (rc); /* we had errors so get out */
1023 }
1024 }
1025
1026 if (hw_p->devnum == 3) {
1027 if (UIC_ETH3 & my_uic2msr) { /* look for EMAC errors */
1028 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1029 if ((hw_p->emac_ier & emac_isr) != 0) {
1030 emac_err (dev, emac_isr);
1031 serviced = 1;
1032 rc = 0;
1033 }
1034 }
1035 if ((hw_p->emac_ier & emac_isr)
1036 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1037 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1038 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1039 mtdcr (uic2sr, UIC_ETH3);
1040 return (rc); /* we had errors so get out */
1041 }
1042 }
1043 #endif /* CONFIG_440GX */
1044 #endif /* !CONFIG_440SP */
1045
1046 /* handle MAX TX EOB interrupt from a tx */
1047 if (my_uic0msr & UIC_MTE) {
1048 mal_rx_eob = mfdcr (maltxeobisr);
1049 mtdcr (maltxeobisr, mal_rx_eob);
1050 mtdcr (UIC0SR, UIC_MTE);
1051 }
1052 /* handle MAL RX EOB interupt from a receive */
1053 /* check for EOB on valid channels */
1054 if (my_uic0msr & UIC_MRE) {
1055 mal_rx_eob = mfdcr (malrxeobisr);
1056 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1057 /* clear EOB
1058 mtdcr(malrxeobisr, mal_rx_eob); */
1059 enet_rcv (dev, emac_isr);
1060 /* indicate that we serviced an interrupt */
1061 serviced = 1;
1062 rc = 0;
1063 }
1064 }
1065
1066 mtdcr (UIC0SR, UIC_MRE); /* Clear */
1067 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1068 switch (hw_p->devnum) {
1069 case 0:
1070 mtdcr (uic1sr, UIC_ETH0);
1071 break;
1072 case 1:
1073 mtdcr (uic1sr, UIC_ETH1);
1074 break;
1075 #if defined (CONFIG_440GX)
1076 case 2:
1077 mtdcr (uic2sr, UIC_ETH2);
1078 break;
1079 case 3:
1080 mtdcr (uic2sr, UIC_ETH3);
1081 break;
1082 #endif /* CONFIG_440GX */
1083 default:
1084 break;
1085 }
1086 } while (serviced);
1087
1088 return (rc);
1089 }
1090
1091 #else /* CONFIG_440 */
1092
1093 int enetInt (struct eth_device *dev)
1094 {
1095 int serviced;
1096 int rc = -1; /* default to not us */
1097 unsigned long mal_isr;
1098 unsigned long emac_isr = 0;
1099 unsigned long mal_rx_eob;
1100 unsigned long my_uicmsr;
1101
1102 EMAC_4XX_HW_PST hw_p;
1103
1104 /*
1105 * Because the mal is generic, we need to get the current
1106 * eth device
1107 */
1108 #if defined(CONFIG_NET_MULTI)
1109 dev = eth_get_dev();
1110 #else
1111 dev = emac0_dev;
1112 #endif
1113
1114 hw_p = dev->priv;
1115
1116 /* enter loop that stays in interrupt code until nothing to service */
1117 do {
1118 serviced = 0;
1119
1120 my_uicmsr = mfdcr (uicmsr);
1121
1122 if ((my_uicmsr & (MAL_UIC_DEF | EMAC_UIC_DEF)) == 0) { /* not for us */
1123 return (rc);
1124 }
1125 /* get and clear controller status interrupts */
1126 /* look at Mal and EMAC interrupts */
1127 if ((MAL_UIC_DEF & my_uicmsr) != 0) { /* we have a MAL interrupt */
1128 mal_isr = mfdcr (malesr);
1129 /* look for mal error */
1130 if ((my_uicmsr & MAL_UIC_ERR) != 0) {
1131 mal_err (dev, mal_isr, my_uicmsr, MAL_UIC_DEF, MAL_UIC_ERR);
1132 serviced = 1;
1133 rc = 0;
1134 }
1135 }
1136
1137 /* port by port dispatch of emac interrupts */
1138
1139 if ((SEL_UIC_DEF(hw_p->devnum) & my_uicmsr) != 0) { /* look for EMAC errors */
1140 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1141 if ((hw_p->emac_ier & emac_isr) != 0) {
1142 emac_err (dev, emac_isr);
1143 serviced = 1;
1144 rc = 0;
1145 }
1146 }
1147 if (((hw_p->emac_ier & emac_isr) != 0) || ((MAL_UIC_ERR & my_uicmsr) != 0)) {
1148 mtdcr (uicsr, MAL_UIC_DEF | SEL_UIC_DEF(hw_p->devnum)); /* Clear */
1149 return (rc); /* we had errors so get out */
1150 }
1151
1152 /* handle MAX TX EOB interrupt from a tx */
1153 if (my_uicmsr & UIC_MAL_TXEOB) {
1154 mal_rx_eob = mfdcr (maltxeobisr);
1155 mtdcr (maltxeobisr, mal_rx_eob);
1156 mtdcr (uicsr, UIC_MAL_TXEOB);
1157 }
1158 /* handle MAL RX EOB interupt from a receive */
1159 /* check for EOB on valid channels */
1160 if (my_uicmsr & UIC_MAL_RXEOB)
1161 {
1162 mal_rx_eob = mfdcr (malrxeobisr);
1163 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1164 /* clear EOB
1165 mtdcr(malrxeobisr, mal_rx_eob); */
1166 enet_rcv (dev, emac_isr);
1167 /* indicate that we serviced an interrupt */
1168 serviced = 1;
1169 rc = 0;
1170 }
1171 }
1172 mtdcr (uicsr, MAL_UIC_DEF|EMAC_UIC_DEF|EMAC_UIC_DEF1); /* Clear */
1173 }
1174 while (serviced);
1175
1176 return (rc);
1177 }
1178
1179 #endif /* CONFIG_440 */
1180
1181 /*-----------------------------------------------------------------------------+
1182 * MAL Error Routine
1183 *-----------------------------------------------------------------------------*/
1184 static void mal_err (struct eth_device *dev, unsigned long isr,
1185 unsigned long uic, unsigned long maldef,
1186 unsigned long mal_errr)
1187 {
1188 EMAC_4XX_HW_PST hw_p = dev->priv;
1189
1190 mtdcr (malesr, isr); /* clear interrupt */
1191
1192 /* clear DE interrupt */
1193 mtdcr (maltxdeir, 0xC0000000);
1194 mtdcr (malrxdeir, 0x80000000);
1195
1196 #ifdef INFO_4XX_ENET
1197 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
1198 #endif
1199
1200 eth_init (hw_p->bis); /* start again... */
1201 }
1202
1203 /*-----------------------------------------------------------------------------+
1204 * EMAC Error Routine
1205 *-----------------------------------------------------------------------------*/
1206 static void emac_err (struct eth_device *dev, unsigned long isr)
1207 {
1208 EMAC_4XX_HW_PST hw_p = dev->priv;
1209
1210 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1211 out32 (EMAC_ISR + hw_p->hw_addr, isr);
1212 }
1213
1214 /*-----------------------------------------------------------------------------+
1215 * enet_rcv() handles the ethernet receive data
1216 *-----------------------------------------------------------------------------*/
1217 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1218 {
1219 struct enet_frame *ef_ptr;
1220 unsigned long data_len;
1221 unsigned long rx_eob_isr;
1222 EMAC_4XX_HW_PST hw_p = dev->priv;
1223
1224 int handled = 0;
1225 int i;
1226 int loop_count = 0;
1227
1228 rx_eob_isr = mfdcr (malrxeobisr);
1229 if ((0x80000000 >> hw_p->devnum) & rx_eob_isr) {
1230 /* clear EOB */
1231 mtdcr (malrxeobisr, rx_eob_isr);
1232
1233 /* EMAC RX done */
1234 while (1) { /* do all */
1235 i = hw_p->rx_slot;
1236
1237 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1238 || (loop_count >= NUM_RX_BUFF))
1239 break;
1240 loop_count++;
1241 hw_p->rx_slot++;
1242 if (NUM_RX_BUFF == hw_p->rx_slot)
1243 hw_p->rx_slot = 0;
1244 handled++;
1245 data_len = (unsigned long) hw_p->rx[i].data_len; /* Get len */
1246 if (data_len) {
1247 if (data_len > ENET_MAX_MTU) /* Check len */
1248 data_len = 0;
1249 else {
1250 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1251 data_len = 0;
1252 hw_p->stats.rx_err_log[hw_p->
1253 rx_err_index]
1254 = hw_p->rx[i].ctrl;
1255 hw_p->rx_err_index++;
1256 if (hw_p->rx_err_index ==
1257 MAX_ERR_LOG)
1258 hw_p->rx_err_index =
1259 0;
1260 } /* emac_erros */
1261 } /* data_len < max mtu */
1262 } /* if data_len */
1263 if (!data_len) { /* no data */
1264 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1265
1266 hw_p->stats.data_len_err++; /* Error at Rx */
1267 }
1268
1269 /* !data_len */
1270 /* AS.HARNOIS */
1271 /* Check if user has already eaten buffer */
1272 /* if not => ERROR */
1273 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1274 if (hw_p->is_receiving)
1275 printf ("ERROR : Receive buffers are full!\n");
1276 break;
1277 } else {
1278 hw_p->stats.rx_frames++;
1279 hw_p->stats.rx += data_len;
1280 ef_ptr = (struct enet_frame *) hw_p->rx[i].
1281 data_ptr;
1282 #ifdef INFO_4XX_ENET
1283 hw_p->stats.pkts_rx++;
1284 #endif
1285 /* AS.HARNOIS
1286 * use ring buffer
1287 */
1288 hw_p->rx_ready[hw_p->rx_i_index] = i;
1289 hw_p->rx_i_index++;
1290 if (NUM_RX_BUFF == hw_p->rx_i_index)
1291 hw_p->rx_i_index = 0;
1292
1293 /* AS.HARNOIS
1294 * free receive buffer only when
1295 * buffer has been handled (eth_rx)
1296 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1297 */
1298 } /* if data_len */
1299 } /* while */
1300 } /* if EMACK_RXCHL */
1301 }
1302
1303
1304 static int ppc_4xx_eth_rx (struct eth_device *dev)
1305 {
1306 int length;
1307 int user_index;
1308 unsigned long msr;
1309 EMAC_4XX_HW_PST hw_p = dev->priv;
1310
1311 hw_p->is_receiving = 1; /* tell driver */
1312
1313 for (;;) {
1314 /* AS.HARNOIS
1315 * use ring buffer and
1316 * get index from rx buffer desciptor queue
1317 */
1318 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1319 if (user_index == -1) {
1320 length = -1;
1321 break; /* nothing received - leave for() loop */
1322 }
1323
1324 msr = mfmsr ();
1325 mtmsr (msr & ~(MSR_EE));
1326
1327 length = hw_p->rx[user_index].data_len;
1328
1329 /* Pass the packet up to the protocol layers. */
1330 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
1331 /* NetReceive(NetRxPackets[i], length); */
1332 NetReceive (NetRxPackets[user_index], length - 4);
1333 /* Free Recv Buffer */
1334 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1335 /* Free rx buffer descriptor queue */
1336 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1337 hw_p->rx_u_index++;
1338 if (NUM_RX_BUFF == hw_p->rx_u_index)
1339 hw_p->rx_u_index = 0;
1340
1341 #ifdef INFO_4XX_ENET
1342 hw_p->stats.pkts_handled++;
1343 #endif
1344
1345 mtmsr (msr); /* Enable IRQ's */
1346 }
1347
1348 hw_p->is_receiving = 0; /* tell driver */
1349
1350 return length;
1351 }
1352
1353 int ppc_4xx_eth_initialize (bd_t * bis)
1354 {
1355 static int virgin = 0;
1356 struct eth_device *dev;
1357 int eth_num = 0;
1358 EMAC_4XX_HW_PST hw = NULL;
1359
1360 #if defined(CONFIG_440GX)
1361 unsigned long pfc1;
1362
1363 mfsdr (sdr_pfc1, pfc1);
1364 pfc1 &= ~(0x01e00000);
1365 pfc1 |= 0x01200000;
1366 mtsdr (sdr_pfc1, pfc1);
1367 #endif
1368 /* set phy num and mode */
1369 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1370 #if defined(CONFIG_PHY1_ADDR)
1371 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1372 #endif
1373 #if defined(CONFIG_440GX)
1374 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1375 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1376 bis->bi_phymode[0] = 0;
1377 bis->bi_phymode[1] = 0;
1378 bis->bi_phymode[2] = 2;
1379 bis->bi_phymode[3] = 2;
1380
1381 #if defined (CONFIG_440GX)
1382 ppc_4xx_eth_setup_bridge(0, bis);
1383 #endif
1384 #endif
1385
1386 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1387
1388 /* See if we can actually bring up the interface, otherwise, skip it */
1389 switch (eth_num) {
1390 default: /* fall through */
1391 case 0:
1392 if (memcmp (bis->bi_enetaddr, "\0\0\0\0\0\0", 6) == 0) {
1393 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1394 continue;
1395 }
1396 break;
1397 #ifdef CONFIG_HAS_ETH1
1398 case 1:
1399 if (memcmp (bis->bi_enet1addr, "\0\0\0\0\0\0", 6) == 0) {
1400 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1401 continue;
1402 }
1403 break;
1404 #endif
1405 #ifdef CONFIG_HAS_ETH2
1406 case 2:
1407 if (memcmp (bis->bi_enet2addr, "\0\0\0\0\0\0", 6) == 0) {
1408 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1409 continue;
1410 }
1411 break;
1412 #endif
1413 #ifdef CONFIG_HAS_ETH3
1414 case 3:
1415 if (memcmp (bis->bi_enet3addr, "\0\0\0\0\0\0", 6) == 0) {
1416 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1417 continue;
1418 }
1419 break;
1420 #endif
1421 }
1422
1423 /* Allocate device structure */
1424 dev = (struct eth_device *) malloc (sizeof (*dev));
1425 if (dev == NULL) {
1426 printf ("ppc_4xx_eth_initialize: "
1427 "Cannot allocate eth_device %d\n", eth_num);
1428 return (-1);
1429 }
1430 memset(dev, 0, sizeof(*dev));
1431
1432 /* Allocate our private use data */
1433 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
1434 if (hw == NULL) {
1435 printf ("ppc_4xx_eth_initialize: "
1436 "Cannot allocate private hw data for eth_device %d",
1437 eth_num);
1438 free (dev);
1439 return (-1);
1440 }
1441 memset(hw, 0, sizeof(*hw));
1442
1443 switch (eth_num) {
1444 default: /* fall through */
1445 case 0:
1446 hw->hw_addr = 0;
1447 memcpy (dev->enetaddr, bis->bi_enetaddr, 6);
1448 break;
1449 #ifdef CONFIG_HAS_ETH1
1450 case 1:
1451 hw->hw_addr = 0x100;
1452 memcpy (dev->enetaddr, bis->bi_enet1addr, 6);
1453 break;
1454 #endif
1455 #ifdef CONFIG_HAS_ETH2
1456 case 2:
1457 hw->hw_addr = 0x400;
1458 memcpy (dev->enetaddr, bis->bi_enet2addr, 6);
1459 break;
1460 #endif
1461 #ifdef CONFIG_HAS_ETH3
1462 case 3:
1463 hw->hw_addr = 0x600;
1464 memcpy (dev->enetaddr, bis->bi_enet3addr, 6);
1465 break;
1466 #endif
1467 }
1468
1469 hw->devnum = eth_num;
1470 hw->print_speed = 1;
1471
1472 sprintf (dev->name, "ppc_4xx_eth%d", eth_num);
1473 dev->priv = (void *) hw;
1474 dev->init = ppc_4xx_eth_init;
1475 dev->halt = ppc_4xx_eth_halt;
1476 dev->send = ppc_4xx_eth_send;
1477 dev->recv = ppc_4xx_eth_rx;
1478
1479 if (0 == virgin) {
1480 /* set the MAL IER ??? names may change with new spec ??? */
1481 mal_ier =
1482 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
1483 MAL_IER_OPBE | MAL_IER_PLBE;
1484 mtdcr (malesr, 0xffffffff); /* clear pending interrupts */
1485 mtdcr (maltxdeir, 0xffffffff); /* clear pending interrupts */
1486 mtdcr (malrxdeir, 0xffffffff); /* clear pending interrupts */
1487 mtdcr (malier, mal_ier);
1488
1489 /* install MAL interrupt handler */
1490 irq_install_handler (VECNUM_MS,
1491 (interrupt_handler_t *) enetInt,
1492 dev);
1493 irq_install_handler (VECNUM_MTE,
1494 (interrupt_handler_t *) enetInt,
1495 dev);
1496 irq_install_handler (VECNUM_MRE,
1497 (interrupt_handler_t *) enetInt,
1498 dev);
1499 irq_install_handler (VECNUM_TXDE,
1500 (interrupt_handler_t *) enetInt,
1501 dev);
1502 irq_install_handler (VECNUM_RXDE,
1503 (interrupt_handler_t *) enetInt,
1504 dev);
1505 virgin = 1;
1506 }
1507
1508 #if defined(CONFIG_NET_MULTI)
1509 eth_register (dev);
1510 #else
1511 emac0_dev = dev;
1512 #endif
1513 #if defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII)
1514 miiphy_register (dev->name,
1515 emac4xx_miiphy_read, emac4xx_miiphy_write);
1516 #endif
1517
1518 } /* end for each supported device */
1519 return (1);
1520 }
1521
1522
1523 #if !defined(CONFIG_NET_MULTI)
1524 void eth_halt (void) {
1525 if (emac0_dev) {
1526 ppc_4xx_eth_halt(emac0_dev);
1527 free(emac0_dev);
1528 emac0_dev = NULL;
1529 }
1530 }
1531
1532 int eth_init (bd_t *bis)
1533 {
1534 ppc_4xx_eth_initialize(bis);
1535 if (emac0_dev) {
1536 return ppc_4xx_eth_init(emac0_dev, bis);
1537 } else {
1538 printf("ERROR: ethaddr not set!\n");
1539 return -1;
1540 }
1541 }
1542
1543 int eth_send(volatile void *packet, int length)
1544 {
1545 return (ppc_4xx_eth_send(emac0_dev, packet, length));
1546 }
1547
1548 int eth_rx(void)
1549 {
1550 return (ppc_4xx_eth_rx(emac0_dev));
1551 }
1552
1553 int emac4xx_miiphy_initialize (bd_t * bis)
1554 {
1555 #if defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII)
1556 miiphy_register ("ppc_4xx_eth0",
1557 emac4xx_miiphy_read, emac4xx_miiphy_write);
1558 #endif
1559
1560 return 0;
1561 }
1562 #endif /* !defined(CONFIG_NET_MULTI) */
1563
1564 #endif /* #if (CONFIG_COMMANDS & CFG_CMD_NET) */