]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/ppc4xx/4xx_enet.c
Coding stylke cleanup; update CHANGELOG.
[people/ms/u-boot.git] / cpu / ppc4xx / 4xx_enet.c
1 /*-----------------------------------------------------------------------------+
2 *
3 * This source code has been made available to you by IBM on an AS-IS
4 * basis. Anyone receiving this source is licensed under IBM
5 * copyrights to use it in any way he or she deems fit, including
6 * copying it, modifying it, compiling it, and redistributing it either
7 * with or without modifications. No license under IBM patents or
8 * patent applications is to be implied by the copyright license.
9 *
10 * Any user of this software should understand that IBM cannot provide
11 * technical support for this software and will not be responsible for
12 * any consequences resulting from the use of this software.
13 *
14 * Any person who transfers this source code or any derivative work
15 * must include the IBM copyright notice, this paragraph, and the
16 * preceding two paragraphs in the transferred software.
17 *
18 * COPYRIGHT I B M CORPORATION 1995
19 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
20 *-----------------------------------------------------------------------------*/
21 /*-----------------------------------------------------------------------------+
22 *
23 * File Name: enetemac.c
24 *
25 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
26 *
27 * Author: Mark Wisner
28 *
29 * Change Activity-
30 *
31 * Date Description of Change BY
32 * --------- --------------------- ---
33 * 05-May-99 Created MKW
34 * 27-Jun-99 Clean up JWB
35 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
36 * 29-Jul-99 Added Full duplex support MKW
37 * 06-Aug-99 Changed names for Mal CR reg MKW
38 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
39 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
40 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
41 * to avoid chaining maximum sized packets. Push starting
42 * RX descriptor address up to the next cache line boundary.
43 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
44 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
45 * EMAC_RXM register. JWB
46 * 12-Mar-01 anne-sophie.harnois@nextream.fr
47 * - Variables are compatible with those already defined in
48 * include/net.h
49 * - Receive buffer descriptor ring is used to send buffers
50 * to the user
51 * - Info print about send/received/handled packet number if
52 * INFO_405_ENET is set
53 * 17-Apr-01 stefan.roese@esd-electronics.com
54 * - MAL reset in "eth_halt" included
55 * - Enet speed and duplex output now in one line
56 * 08-May-01 stefan.roese@esd-electronics.com
57 * - MAL error handling added (eth_init called again)
58 * 13-Nov-01 stefan.roese@esd-electronics.com
59 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
60 * 04-Jan-02 stefan.roese@esd-electronics.com
61 * - Wait for PHY auto negotiation to complete added
62 * 06-Feb-02 stefan.roese@esd-electronics.com
63 * - Bug fixed in waiting for auto negotiation to complete
64 * 26-Feb-02 stefan.roese@esd-electronics.com
65 * - rx and tx buffer descriptors now allocated (no fixed address
66 * used anymore)
67 * 17-Jun-02 stefan.roese@esd-electronics.com
68 * - MAL error debug printf 'M' removed (rx de interrupt may
69 * occur upon many incoming packets with only 4 rx buffers).
70 *-----------------------------------------------------------------------------*
71 * 17-Nov-03 travis.sawyer@sandburst.com
72 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
73 * in the 440GX. This port should work with the 440GP
74 * (2 EMACs) also
75 * 15-Aug-05 sr@denx.de
76 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
77 now handling all 4xx cpu's.
78 *-----------------------------------------------------------------------------*/
79
80 #include <config.h>
81 #include <common.h>
82 #include <net.h>
83 #include <asm/processor.h>
84 #include <commproc.h>
85 #include <ppc4xx.h>
86 #include <ppc4xx_enet.h>
87 #include <405_mal.h>
88 #include <miiphy.h>
89 #include <malloc.h>
90 #include "vecnum.h"
91
92 /*
93 * Only compile for platform with AMCC EMAC ethernet controller and
94 * network support enabled.
95 * Remark: CONFIG_405 describes Xilinx PPC405 FPGA without EMAC controller!
96 */
97 #if (CONFIG_COMMANDS & CFG_CMD_NET) && !defined(CONFIG_405) && !defined(CONFIG_IOP480)
98
99 #if !(defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII))
100 #error "CONFIG_MII has to be defined!"
101 #endif
102
103 #if defined(CONFIG_NETCONSOLE) && !defined(CONFIG_NET_MULTI)
104 #error "CONFIG_NET_MULTI has to be defined for NetConsole"
105 #endif
106
107 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
108 #define PHY_AUTONEGOTIATE_TIMEOUT 4000 /* 4000 ms autonegotiate timeout */
109
110 /* Ethernet Transmit and Receive Buffers */
111 /* AS.HARNOIS
112 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
113 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
114 */
115 #define ENET_MAX_MTU PKTSIZE
116 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
117
118 /*-----------------------------------------------------------------------------+
119 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
120 * Interrupt Controller).
121 *-----------------------------------------------------------------------------*/
122 #define MAL_UIC_ERR ( UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
123 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
124 #define EMAC_UIC_DEF UIC_ENET
125 #define EMAC_UIC_DEF1 UIC_ENET1
126 #define SEL_UIC_DEF(p) (p ? UIC_ENET1 : UIC_ENET )
127
128 #undef INFO_4XX_ENET
129
130 #define BI_PHYMODE_NONE 0
131 #define BI_PHYMODE_ZMII 1
132 #define BI_PHYMODE_RGMII 2
133 #define BI_PHYMODE_GMII 3
134 #define BI_PHYMODE_RTBI 4
135 #define BI_PHYMODE_TBI 5
136 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
137 #define BI_PHYMODE_SMII 6
138 #define BI_PHYMODE_MII 7
139 #endif
140
141 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
142 #define SDR0_MFR_ETH_CLK_SEL_V(n) ((0x01<<27) / (n+1))
143 #endif
144
145 /*-----------------------------------------------------------------------------+
146 * Global variables. TX and RX descriptors and buffers.
147 *-----------------------------------------------------------------------------*/
148 /* IER globals */
149 static uint32_t mal_ier;
150
151 #if !defined(CONFIG_NET_MULTI)
152 struct eth_device *emac0_dev = NULL;
153 #endif
154
155 /*
156 * Get count of EMAC devices (doesn't have to be the max. possible number
157 * supported by the cpu)
158 */
159 #if defined(CONFIG_HAS_ETH3)
160 #define LAST_EMAC_NUM 4
161 #elif defined(CONFIG_HAS_ETH2)
162 #define LAST_EMAC_NUM 3
163 #elif defined(CONFIG_HAS_ETH1)
164 #define LAST_EMAC_NUM 2
165 #else
166 #define LAST_EMAC_NUM 1
167 #endif
168
169 /* normal boards start with EMAC0 */
170 #if !defined(CONFIG_EMAC_NR_START)
171 #define CONFIG_EMAC_NR_START 0
172 #endif
173
174 /*-----------------------------------------------------------------------------+
175 * Prototypes and externals.
176 *-----------------------------------------------------------------------------*/
177 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
178
179 int enetInt (struct eth_device *dev);
180 static void mal_err (struct eth_device *dev, unsigned long isr,
181 unsigned long uic, unsigned long maldef,
182 unsigned long mal_errr);
183 static void emac_err (struct eth_device *dev, unsigned long isr);
184
185 extern int phy_setup_aneg (char *devname, unsigned char addr);
186 extern int emac4xx_miiphy_read (char *devname, unsigned char addr,
187 unsigned char reg, unsigned short *value);
188 extern int emac4xx_miiphy_write (char *devname, unsigned char addr,
189 unsigned char reg, unsigned short value);
190
191 /*-----------------------------------------------------------------------------+
192 | ppc_4xx_eth_halt
193 | Disable MAL channel, and EMACn
194 +-----------------------------------------------------------------------------*/
195 static void ppc_4xx_eth_halt (struct eth_device *dev)
196 {
197 EMAC_4XX_HW_PST hw_p = dev->priv;
198 uint32_t failsafe = 10000;
199 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
200 unsigned long mfr;
201 #endif
202
203 out32 (EMAC_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
204
205 /* 1st reset MAL channel */
206 /* Note: writing a 0 to a channel has no effect */
207 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
208 mtdcr (maltxcarr, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
209 #else
210 mtdcr (maltxcarr, (MAL_CR_MMSR >> hw_p->devnum));
211 #endif
212 mtdcr (malrxcarr, (MAL_CR_MMSR >> hw_p->devnum));
213
214 /* wait for reset */
215 while (mfdcr (malrxcasr) & (MAL_CR_MMSR >> hw_p->devnum)) {
216 udelay (1000); /* Delay 1 MS so as not to hammer the register */
217 failsafe--;
218 if (failsafe == 0)
219 break;
220 }
221
222 /* EMAC RESET */
223 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
224 /* provide clocks for EMAC internal loopback */
225 mfsdr (sdr_mfr, mfr);
226 mfr |= SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
227 mtsdr(sdr_mfr, mfr);
228 #endif
229
230 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
231
232 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
233 /* remove clocks for EMAC internal loopback */
234 mfsdr (sdr_mfr, mfr);
235 mfr &= ~SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
236 mtsdr(sdr_mfr, mfr);
237 #endif
238
239
240 #ifndef CONFIG_NETCONSOLE
241 hw_p->print_speed = 1; /* print speed message again next time */
242 #endif
243
244 return;
245 }
246
247 #if defined (CONFIG_440GX)
248 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
249 {
250 unsigned long pfc1;
251 unsigned long zmiifer;
252 unsigned long rmiifer;
253
254 mfsdr(sdr_pfc1, pfc1);
255 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
256
257 zmiifer = 0;
258 rmiifer = 0;
259
260 switch (pfc1) {
261 case 1:
262 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
263 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
264 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
265 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
266 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
267 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
268 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
269 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
270 break;
271 case 2:
272 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
273 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
274 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
275 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
276 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
277 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
278 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
279 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
280 break;
281 case 3:
282 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
283 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
284 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
285 bis->bi_phymode[1] = BI_PHYMODE_NONE;
286 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
287 bis->bi_phymode[3] = BI_PHYMODE_NONE;
288 break;
289 case 4:
290 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
291 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
292 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
293 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
294 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
295 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
296 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
297 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
298 break;
299 case 5:
300 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
301 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
302 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
303 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
304 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
305 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
306 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
307 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
308 break;
309 case 6:
310 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
311 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
312 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
313 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
314 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
315 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
316 break;
317 case 0:
318 default:
319 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
320 rmiifer = 0x0;
321 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
322 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
323 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
324 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
325 break;
326 }
327
328 /* Ensure we setup mdio for this devnum and ONLY this devnum */
329 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
330
331 out32 (ZMII_FER, zmiifer);
332 out32 (RGMII_FER, rmiifer);
333
334 return ((int)pfc1);
335 }
336 #endif /* CONFIG_440_GX */
337
338 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
339 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
340 {
341 unsigned long zmiifer=0x0;
342 unsigned long pfc1;
343
344 mfsdr(sdr_pfc1, pfc1);
345 pfc1 &= SDR0_PFC1_SELECT_MASK;
346
347 switch (pfc1) {
348 case SDR0_PFC1_SELECT_CONFIG_2:
349 /* 1 x GMII port */
350 out32 (ZMII_FER, 0x00);
351 out32 (RGMII_FER, 0x00000037);
352 bis->bi_phymode[0] = BI_PHYMODE_GMII;
353 bis->bi_phymode[1] = BI_PHYMODE_NONE;
354 break;
355 case SDR0_PFC1_SELECT_CONFIG_4:
356 /* 2 x RGMII ports */
357 out32 (ZMII_FER, 0x00);
358 out32 (RGMII_FER, 0x00000055);
359 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
360 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
361 break;
362 case SDR0_PFC1_SELECT_CONFIG_6:
363 /* 2 x SMII ports */
364 out32 (ZMII_FER,
365 ((ZMII_FER_SMII) << ZMII_FER_V(0)) |
366 ((ZMII_FER_SMII) << ZMII_FER_V(1)));
367 out32 (RGMII_FER, 0x00000000);
368 bis->bi_phymode[0] = BI_PHYMODE_SMII;
369 bis->bi_phymode[1] = BI_PHYMODE_SMII;
370 break;
371 case SDR0_PFC1_SELECT_CONFIG_1_2:
372 /* only 1 x MII supported */
373 out32 (ZMII_FER, (ZMII_FER_MII) << ZMII_FER_V(0));
374 out32 (RGMII_FER, 0x00000000);
375 bis->bi_phymode[0] = BI_PHYMODE_MII;
376 bis->bi_phymode[1] = BI_PHYMODE_NONE;
377 break;
378 default:
379 break;
380 }
381
382 /* Ensure we setup mdio for this devnum and ONLY this devnum */
383 zmiifer = in32 (ZMII_FER);
384 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
385 out32 (ZMII_FER, zmiifer);
386
387 return ((int)0x0);
388 }
389 #endif /* CONFIG_440EPX */
390
391 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
392 {
393 int i, j;
394 unsigned long reg = 0;
395 unsigned long msr;
396 unsigned long speed;
397 unsigned long duplex;
398 unsigned long failsafe;
399 unsigned mode_reg;
400 unsigned short devnum;
401 unsigned short reg_short;
402 #if defined(CONFIG_440GX) || \
403 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
404 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
405 sys_info_t sysinfo;
406 #if defined(CONFIG_440GX) || defined(CONFIG_440SPE) || \
407 defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
408 int ethgroup = -1;
409 #endif
410 #endif
411 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || defined(CONFIG_440SPE)
412 unsigned long mfr;
413 #endif
414
415
416 EMAC_4XX_HW_PST hw_p = dev->priv;
417
418 /* before doing anything, figure out if we have a MAC address */
419 /* if not, bail */
420 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
421 printf("ERROR: ethaddr not set!\n");
422 return -1;
423 }
424
425 #if defined(CONFIG_440GX) || \
426 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
427 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
428 /* Need to get the OPB frequency so we can access the PHY */
429 get_sys_info (&sysinfo);
430 #endif
431
432 msr = mfmsr ();
433 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
434
435 devnum = hw_p->devnum;
436
437 #ifdef INFO_4XX_ENET
438 /* AS.HARNOIS
439 * We should have :
440 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
441 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
442 * is possible that new packets (without relationship with
443 * current transfer) have got the time to arrived before
444 * netloop calls eth_halt
445 */
446 printf ("About preceeding transfer (eth%d):\n"
447 "- Sent packet number %d\n"
448 "- Received packet number %d\n"
449 "- Handled packet number %d\n",
450 hw_p->devnum,
451 hw_p->stats.pkts_tx,
452 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
453
454 hw_p->stats.pkts_tx = 0;
455 hw_p->stats.pkts_rx = 0;
456 hw_p->stats.pkts_handled = 0;
457 hw_p->print_speed = 1; /* print speed message again next time */
458 #endif
459
460 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
461 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
462
463 hw_p->rx_slot = 0; /* MAL Receive Slot */
464 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
465 hw_p->rx_u_index = 0; /* Receive User Queue Index */
466
467 hw_p->tx_slot = 0; /* MAL Transmit Slot */
468 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
469 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
470
471 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
472 /* set RMII mode */
473 /* NOTE: 440GX spec states that mode is mutually exclusive */
474 /* NOTE: Therefore, disable all other EMACS, since we handle */
475 /* NOTE: only one emac at a time */
476 reg = 0;
477 out32 (ZMII_FER, 0);
478 udelay (100);
479
480 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
481 out32 (ZMII_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
482 #elif defined(CONFIG_440GX) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
483 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
484 #elif defined(CONFIG_440GP)
485 /* set RMII mode */
486 out32 (ZMII_FER, ZMII_RMII | ZMII_MDI0);
487 #else
488 if ((devnum == 0) || (devnum == 1)) {
489 out32 (ZMII_FER, (ZMII_FER_SMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
490 } else { /* ((devnum == 2) || (devnum == 3)) */
491 out32 (ZMII_FER, ZMII_FER_MDI << ZMII_FER_V (devnum));
492 out32 (RGMII_FER, ((RGMII_FER_RGMII << RGMII_FER_V (2)) |
493 (RGMII_FER_RGMII << RGMII_FER_V (3))));
494 }
495 #endif
496
497 out32 (ZMII_SSR, ZMII_SSR_SP << ZMII_SSR_V(devnum));
498 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
499
500 __asm__ volatile ("eieio");
501
502 /* reset emac so we have access to the phy */
503 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
504 /* provide clocks for EMAC internal loopback */
505 mfsdr (sdr_mfr, mfr);
506 mfr |= SDR0_MFR_ETH_CLK_SEL_V(devnum);
507 mtsdr(sdr_mfr, mfr);
508 #endif
509
510 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
511 __asm__ volatile ("eieio");
512
513 failsafe = 1000;
514 while ((in32 (EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) {
515 udelay (1000);
516 failsafe--;
517 }
518 if (failsafe <= 0)
519 printf("\nProblem resetting EMAC!\n");
520
521 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
522 /* remove clocks for EMAC internal loopback */
523 mfsdr (sdr_mfr, mfr);
524 mfr &= ~SDR0_MFR_ETH_CLK_SEL_V(devnum);
525 mtsdr(sdr_mfr, mfr);
526 #endif
527
528 #if defined(CONFIG_440GX) || \
529 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
530 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
531 /* Whack the M1 register */
532 mode_reg = 0x0;
533 mode_reg &= ~0x00000038;
534 if (sysinfo.freqOPB <= 50000000);
535 else if (sysinfo.freqOPB <= 66666667)
536 mode_reg |= EMAC_M1_OBCI_66;
537 else if (sysinfo.freqOPB <= 83333333)
538 mode_reg |= EMAC_M1_OBCI_83;
539 else if (sysinfo.freqOPB <= 100000000)
540 mode_reg |= EMAC_M1_OBCI_100;
541 else
542 mode_reg |= EMAC_M1_OBCI_GT100;
543
544 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
545 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
546
547 /* wait for PHY to complete auto negotiation */
548 reg_short = 0;
549 #ifndef CONFIG_CS8952_PHY
550 switch (devnum) {
551 case 0:
552 reg = CONFIG_PHY_ADDR;
553 break;
554 #if defined (CONFIG_PHY1_ADDR)
555 case 1:
556 reg = CONFIG_PHY1_ADDR;
557 break;
558 #endif
559 #if defined (CONFIG_440GX)
560 case 2:
561 reg = CONFIG_PHY2_ADDR;
562 break;
563 case 3:
564 reg = CONFIG_PHY3_ADDR;
565 break;
566 #endif
567 default:
568 reg = CONFIG_PHY_ADDR;
569 break;
570 }
571
572 bis->bi_phynum[devnum] = reg;
573
574 #if defined(CONFIG_PHY_RESET)
575 /*
576 * Reset the phy, only if its the first time through
577 * otherwise, just check the speeds & feeds
578 */
579 if (hw_p->first_init == 0) {
580 #if defined(CONFIG_M88E1111_PHY)
581 miiphy_write (dev->name, reg, 0x14, 0x0ce3);
582 miiphy_write (dev->name, reg, 0x18, 0x4101);
583 miiphy_write (dev->name, reg, 0x09, 0x0e00);
584 miiphy_write (dev->name, reg, 0x04, 0x01e1);
585 #endif
586 miiphy_reset (dev->name, reg);
587
588 #if defined(CONFIG_440GX) || \
589 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
590 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
591
592 #if defined(CONFIG_CIS8201_PHY)
593 /*
594 * Cicada 8201 PHY needs to have an extended register whacked
595 * for RGMII mode.
596 */
597 if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {
598 #if defined(CONFIG_CIS8201_SHORT_ETCH)
599 miiphy_write (dev->name, reg, 23, 0x1300);
600 #else
601 miiphy_write (dev->name, reg, 23, 0x1000);
602 #endif
603 /*
604 * Vitesse VSC8201/Cicada CIS8201 errata:
605 * Interoperability problem with Intel 82547EI phys
606 * This work around (provided by Vitesse) changes
607 * the default timer convergence from 8ms to 12ms
608 */
609 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
610 miiphy_write (dev->name, reg, 0x08, 0x0200);
611 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
612 miiphy_write (dev->name, reg, 0x02, 0x0004);
613 miiphy_write (dev->name, reg, 0x01, 0x0671);
614 miiphy_write (dev->name, reg, 0x00, 0x8fae);
615 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
616 miiphy_write (dev->name, reg, 0x08, 0x0000);
617 miiphy_write (dev->name, reg, 0x1f, 0x0000);
618 /* end Vitesse/Cicada errata */
619 }
620 #endif
621
622 #if defined(CONFIG_ET1011C_PHY)
623 /*
624 * Agere ET1011c PHY needs to have an extended register whacked
625 * for RGMII mode.
626 */
627 if (((devnum == 2) || (devnum ==3)) && (4 == ethgroup)) {
628 miiphy_read (dev->name, reg, 0x16, &reg_short);
629 reg_short &= ~(0x7);
630 reg_short |= 0x6; /* RGMII DLL Delay*/
631 miiphy_write (dev->name, reg, 0x16, reg_short);
632
633 miiphy_read (dev->name, reg, 0x17, &reg_short);
634 reg_short &= ~(0x40);
635 miiphy_write (dev->name, reg, 0x17, reg_short);
636
637 miiphy_write(dev->name, reg, 0x1c, 0x74f0);
638 }
639 #endif
640
641 #endif
642 /* Start/Restart autonegotiation */
643 phy_setup_aneg (dev->name, reg);
644 udelay (1000);
645 }
646 #endif /* defined(CONFIG_PHY_RESET) */
647
648 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
649
650 /*
651 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
652 */
653 if ((reg_short & PHY_BMSR_AUTN_ABLE)
654 && !(reg_short & PHY_BMSR_AUTN_COMP)) {
655 puts ("Waiting for PHY auto negotiation to complete");
656 i = 0;
657 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
658 /*
659 * Timeout reached ?
660 */
661 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
662 puts (" TIMEOUT !\n");
663 break;
664 }
665
666 if ((i++ % 1000) == 0) {
667 putc ('.');
668 }
669 udelay (1000); /* 1 ms */
670 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
671
672 }
673 puts (" done\n");
674 udelay (500000); /* another 500 ms (results in faster booting) */
675 }
676 #endif /* #ifndef CONFIG_CS8952_PHY */
677
678 speed = miiphy_speed (dev->name, reg);
679 duplex = miiphy_duplex (dev->name, reg);
680
681 if (hw_p->print_speed) {
682 hw_p->print_speed = 0;
683 printf ("ENET Speed is %d Mbps - %s duplex connection (EMAC%d)\n",
684 (int) speed, (duplex == HALF) ? "HALF" : "FULL",
685 hw_p->devnum);
686 }
687
688 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \
689 !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX)
690 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
691 mfsdr(sdr_mfr, reg);
692 if (speed == 100) {
693 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
694 } else {
695 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
696 }
697 mtsdr(sdr_mfr, reg);
698 #endif
699
700 /* Set ZMII/RGMII speed according to the phy link speed */
701 reg = in32 (ZMII_SSR);
702 if ( (speed == 100) || (speed == 1000) )
703 out32 (ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));
704 else
705 out32 (ZMII_SSR, reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));
706
707 if ((devnum == 2) || (devnum == 3)) {
708 if (speed == 1000)
709 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
710 else if (speed == 100)
711 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
712 else if (speed == 10)
713 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
714 else {
715 printf("Error in RGMII Speed\n");
716 return -1;
717 }
718 out32 (RGMII_SSR, reg);
719 }
720 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
721
722 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
723 if (speed == 1000)
724 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
725 else if (speed == 100)
726 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
727 else if (speed == 10)
728 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
729 else {
730 printf("Error in RGMII Speed\n");
731 return -1;
732 }
733 out32 (RGMII_SSR, reg);
734 #endif
735
736 /* set the Mal configuration reg */
737 #if defined(CONFIG_440GX) || \
738 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
739 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
740 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
741 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
742 #else
743 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
744 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
745 if (get_pvr() == PVR_440GP_RB) {
746 mtdcr (malmcr, mfdcr(malmcr) & ~MAL_CR_PLBB);
747 }
748 #endif
749
750 /* Free "old" buffers */
751 if (hw_p->alloc_tx_buf)
752 free (hw_p->alloc_tx_buf);
753 if (hw_p->alloc_rx_buf)
754 free (hw_p->alloc_rx_buf);
755
756 /*
757 * Malloc MAL buffer desciptors, make sure they are
758 * aligned on cache line boundary size
759 * (401/403/IOP480 = 16, 405 = 32)
760 * and doesn't cross cache block boundaries.
761 */
762 hw_p->alloc_tx_buf =
763 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_TX_BUFF) +
764 ((2 * CFG_CACHELINE_SIZE) - 2));
765 if (NULL == hw_p->alloc_tx_buf)
766 return -1;
767 if (((int) hw_p->alloc_tx_buf & CACHELINE_MASK) != 0) {
768 hw_p->tx =
769 (mal_desc_t *) ((int) hw_p->alloc_tx_buf +
770 CFG_CACHELINE_SIZE -
771 ((int) hw_p->
772 alloc_tx_buf & CACHELINE_MASK));
773 } else {
774 hw_p->tx = hw_p->alloc_tx_buf;
775 }
776
777 hw_p->alloc_rx_buf =
778 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_RX_BUFF) +
779 ((2 * CFG_CACHELINE_SIZE) - 2));
780 if (NULL == hw_p->alloc_rx_buf) {
781 free(hw_p->alloc_tx_buf);
782 hw_p->alloc_tx_buf = NULL;
783 return -1;
784 }
785
786 if (((int) hw_p->alloc_rx_buf & CACHELINE_MASK) != 0) {
787 hw_p->rx =
788 (mal_desc_t *) ((int) hw_p->alloc_rx_buf +
789 CFG_CACHELINE_SIZE -
790 ((int) hw_p->
791 alloc_rx_buf & CACHELINE_MASK));
792 } else {
793 hw_p->rx = hw_p->alloc_rx_buf;
794 }
795
796 for (i = 0; i < NUM_TX_BUFF; i++) {
797 hw_p->tx[i].ctrl = 0;
798 hw_p->tx[i].data_len = 0;
799 if (hw_p->first_init == 0) {
800 hw_p->txbuf_ptr =
801 (char *) malloc (ENET_MAX_MTU_ALIGNED);
802 if (NULL == hw_p->txbuf_ptr) {
803 free(hw_p->alloc_rx_buf);
804 free(hw_p->alloc_tx_buf);
805 hw_p->alloc_rx_buf = NULL;
806 hw_p->alloc_tx_buf = NULL;
807 for(j = 0; j < i; j++) {
808 free(hw_p->tx[i].data_ptr);
809 hw_p->tx[i].data_ptr = NULL;
810 }
811 }
812 }
813 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
814 if ((NUM_TX_BUFF - 1) == i)
815 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
816 hw_p->tx_run[i] = -1;
817 #if 0
818 printf ("TX_BUFF %d @ 0x%08lx\n", i,
819 (ulong) hw_p->tx[i].data_ptr);
820 #endif
821 }
822
823 for (i = 0; i < NUM_RX_BUFF; i++) {
824 hw_p->rx[i].ctrl = 0;
825 hw_p->rx[i].data_len = 0;
826 /* rx[i].data_ptr = (char *) &rx_buff[i]; */
827 hw_p->rx[i].data_ptr = (char *) NetRxPackets[i];
828 if ((NUM_RX_BUFF - 1) == i)
829 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
830 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
831 hw_p->rx_ready[i] = -1;
832 #if 0
833 printf ("RX_BUFF %d @ 0x%08lx\n", i, (ulong) hw_p->rx[i].data_ptr);
834 #endif
835 }
836
837 reg = 0x00000000;
838
839 reg |= dev->enetaddr[0]; /* set high address */
840 reg = reg << 8;
841 reg |= dev->enetaddr[1];
842
843 out32 (EMAC_IAH + hw_p->hw_addr, reg);
844
845 reg = 0x00000000;
846 reg |= dev->enetaddr[2]; /* set low address */
847 reg = reg << 8;
848 reg |= dev->enetaddr[3];
849 reg = reg << 8;
850 reg |= dev->enetaddr[4];
851 reg = reg << 8;
852 reg |= dev->enetaddr[5];
853
854 out32 (EMAC_IAL + hw_p->hw_addr, reg);
855
856 switch (devnum) {
857 case 1:
858 /* setup MAL tx & rx channel pointers */
859 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
860 mtdcr (maltxctp2r, hw_p->tx);
861 #else
862 mtdcr (maltxctp1r, hw_p->tx);
863 #endif
864 #if defined(CONFIG_440)
865 mtdcr (maltxbattr, 0x0);
866 mtdcr (malrxbattr, 0x0);
867 #endif
868 mtdcr (malrxctp1r, hw_p->rx);
869 /* set RX buffer size */
870 mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16);
871 break;
872 #if defined (CONFIG_440GX)
873 case 2:
874 /* setup MAL tx & rx channel pointers */
875 mtdcr (maltxbattr, 0x0);
876 mtdcr (malrxbattr, 0x0);
877 mtdcr (maltxctp2r, hw_p->tx);
878 mtdcr (malrxctp2r, hw_p->rx);
879 /* set RX buffer size */
880 mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16);
881 break;
882 case 3:
883 /* setup MAL tx & rx channel pointers */
884 mtdcr (maltxbattr, 0x0);
885 mtdcr (maltxctp3r, hw_p->tx);
886 mtdcr (malrxbattr, 0x0);
887 mtdcr (malrxctp3r, hw_p->rx);
888 /* set RX buffer size */
889 mtdcr (malrcbs3, ENET_MAX_MTU_ALIGNED / 16);
890 break;
891 #endif /* CONFIG_440GX */
892 case 0:
893 default:
894 /* setup MAL tx & rx channel pointers */
895 #if defined(CONFIG_440)
896 mtdcr (maltxbattr, 0x0);
897 mtdcr (malrxbattr, 0x0);
898 #endif
899 mtdcr (maltxctp0r, hw_p->tx);
900 mtdcr (malrxctp0r, hw_p->rx);
901 /* set RX buffer size */
902 mtdcr (malrcbs0, ENET_MAX_MTU_ALIGNED / 16);
903 break;
904 }
905
906 /* Enable MAL transmit and receive channels */
907 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
908 mtdcr (maltxcasr, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
909 #else
910 mtdcr (maltxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
911 #endif
912 mtdcr (malrxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
913
914 /* set transmit enable & receive enable */
915 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);
916
917 /* set receive fifo to 4k and tx fifo to 2k */
918 mode_reg = in32 (EMAC_M1 + hw_p->hw_addr);
919 mode_reg |= EMAC_M1_RFS_4K | EMAC_M1_TX_FIFO_2K;
920
921 /* set speed */
922 if (speed == _1000BASET) {
923 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
924 defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
925 unsigned long pfc1;
926
927 mfsdr (sdr_pfc1, pfc1);
928 pfc1 |= SDR0_PFC1_EM_1000;
929 mtsdr (sdr_pfc1, pfc1);
930 #endif
931 mode_reg = mode_reg | EMAC_M1_MF_1000MBPS | EMAC_M1_IST;
932 } else if (speed == _100BASET)
933 mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;
934 else
935 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
936 if (duplex == FULL)
937 mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;
938
939 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
940
941 /* Enable broadcast and indvidual address */
942 /* TBS: enabling runts as some misbehaved nics will send runts */
943 out32 (EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
944
945 /* we probably need to set the tx mode1 reg? maybe at tx time */
946
947 /* set transmit request threshold register */
948 out32 (EMAC_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
949
950 /* set receive low/high water mark register */
951 #if defined(CONFIG_440)
952 /* 440s has a 64 byte burst length */
953 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
954 #else
955 /* 405s have a 16 byte burst length */
956 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
957 #endif /* defined(CONFIG_440) */
958 out32 (EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);
959
960 /* Set fifo limit entry in tx mode 0 */
961 out32 (EMAC_TXM0 + hw_p->hw_addr, 0x00000003);
962 /* Frame gap set */
963 out32 (EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
964
965 /* Set EMAC IER */
966 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
967 if (speed == _100BASET)
968 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
969
970 out32 (EMAC_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
971 out32 (EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);
972
973 if (hw_p->first_init == 0) {
974 /*
975 * Connect interrupt service routines
976 */
977 irq_install_handler (VECNUM_ETH0 + (hw_p->devnum * 2),
978 (interrupt_handler_t *) enetInt, dev);
979 }
980
981 mtmsr (msr); /* enable interrupts again */
982
983 hw_p->bis = bis;
984 hw_p->first_init = 1;
985
986 return (1);
987 }
988
989
990 static int ppc_4xx_eth_send (struct eth_device *dev, volatile void *ptr,
991 int len)
992 {
993 struct enet_frame *ef_ptr;
994 ulong time_start, time_now;
995 unsigned long temp_txm0;
996 EMAC_4XX_HW_PST hw_p = dev->priv;
997
998 ef_ptr = (struct enet_frame *) ptr;
999
1000 /*-----------------------------------------------------------------------+
1001 * Copy in our address into the frame.
1002 *-----------------------------------------------------------------------*/
1003 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
1004
1005 /*-----------------------------------------------------------------------+
1006 * If frame is too long or too short, modify length.
1007 *-----------------------------------------------------------------------*/
1008 /* TBS: where does the fragment go???? */
1009 if (len > ENET_MAX_MTU)
1010 len = ENET_MAX_MTU;
1011
1012 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
1013 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
1014
1015 /*-----------------------------------------------------------------------+
1016 * set TX Buffer busy, and send it
1017 *-----------------------------------------------------------------------*/
1018 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
1019 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
1020 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
1021 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
1022 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
1023
1024 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
1025 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
1026
1027 __asm__ volatile ("eieio");
1028
1029 out32 (EMAC_TXM0 + hw_p->hw_addr,
1030 in32 (EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);
1031 #ifdef INFO_4XX_ENET
1032 hw_p->stats.pkts_tx++;
1033 #endif
1034
1035 /*-----------------------------------------------------------------------+
1036 * poll unitl the packet is sent and then make sure it is OK
1037 *-----------------------------------------------------------------------*/
1038 time_start = get_timer (0);
1039 while (1) {
1040 temp_txm0 = in32 (EMAC_TXM0 + hw_p->hw_addr);
1041 /* loop until either TINT turns on or 3 seconds elapse */
1042 if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {
1043 /* transmit is done, so now check for errors
1044 * If there is an error, an interrupt should
1045 * happen when we return
1046 */
1047 time_now = get_timer (0);
1048 if ((time_now - time_start) > 3000) {
1049 return (-1);
1050 }
1051 } else {
1052 return (len);
1053 }
1054 }
1055 }
1056
1057
1058 #if defined (CONFIG_440)
1059
1060 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1061 /*
1062 * Hack: On 440SP all enet irq sources are located on UIC1
1063 * Needs some cleanup. --sr
1064 */
1065 #define UIC0MSR uic1msr
1066 #define UIC0SR uic1sr
1067 #else
1068 #define UIC0MSR uic0msr
1069 #define UIC0SR uic0sr
1070 #endif
1071
1072 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
1073 #define UICMSR_ETHX uic0msr
1074 #define UICSR_ETHX uic0sr
1075 #else
1076 #define UICMSR_ETHX uic1msr
1077 #define UICSR_ETHX uic1sr
1078 #endif
1079
1080 int enetInt (struct eth_device *dev)
1081 {
1082 int serviced;
1083 int rc = -1; /* default to not us */
1084 unsigned long mal_isr;
1085 unsigned long emac_isr = 0;
1086 unsigned long mal_rx_eob;
1087 unsigned long my_uic0msr, my_uic1msr;
1088 unsigned long my_uicmsr_ethx;
1089
1090 #if defined(CONFIG_440GX)
1091 unsigned long my_uic2msr;
1092 #endif
1093 EMAC_4XX_HW_PST hw_p;
1094
1095 /*
1096 * Because the mal is generic, we need to get the current
1097 * eth device
1098 */
1099 #if defined(CONFIG_NET_MULTI)
1100 dev = eth_get_dev();
1101 #else
1102 dev = emac0_dev;
1103 #endif
1104
1105 hw_p = dev->priv;
1106
1107 /* enter loop that stays in interrupt code until nothing to service */
1108 do {
1109 serviced = 0;
1110
1111 my_uic0msr = mfdcr (UIC0MSR);
1112 my_uic1msr = mfdcr (uic1msr);
1113 #if defined(CONFIG_440GX)
1114 my_uic2msr = mfdcr (uic2msr);
1115 #endif
1116 my_uicmsr_ethx = mfdcr (UICMSR_ETHX);
1117
1118 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
1119 && !(my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))
1120 && !(my_uicmsr_ethx & (UIC_ETH0 | UIC_ETH1))) {
1121 /* not for us */
1122 return (rc);
1123 }
1124 #if defined (CONFIG_440GX)
1125 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
1126 && !(my_uic2msr & (UIC_ETH2 | UIC_ETH3))) {
1127 /* not for us */
1128 return (rc);
1129 }
1130 #endif
1131 /* get and clear controller status interrupts */
1132 /* look at Mal and EMAC interrupts */
1133 if ((my_uic0msr & (UIC_MRE | UIC_MTE))
1134 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1135 /* we have a MAL interrupt */
1136 mal_isr = mfdcr (malesr);
1137 /* look for mal error */
1138 if (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE)) {
1139 mal_err (dev, mal_isr, my_uic1msr, MAL_UIC_DEF, MAL_UIC_ERR);
1140 serviced = 1;
1141 rc = 0;
1142 }
1143 }
1144
1145 /* port by port dispatch of emac interrupts */
1146 if (hw_p->devnum == 0) {
1147 if (UIC_ETH0 & my_uicmsr_ethx) { /* look for EMAC errors */
1148 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1149 if ((hw_p->emac_ier & emac_isr) != 0) {
1150 emac_err (dev, emac_isr);
1151 serviced = 1;
1152 rc = 0;
1153 }
1154 }
1155 if ((hw_p->emac_ier & emac_isr)
1156 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1157 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1158 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1159 mtdcr (UICSR_ETHX, UIC_ETH0); /* Clear */
1160 return (rc); /* we had errors so get out */
1161 }
1162 }
1163
1164 #if !defined(CONFIG_440SP)
1165 if (hw_p->devnum == 1) {
1166 if (UIC_ETH1 & my_uicmsr_ethx) { /* look for EMAC errors */
1167 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1168 if ((hw_p->emac_ier & emac_isr) != 0) {
1169 emac_err (dev, emac_isr);
1170 serviced = 1;
1171 rc = 0;
1172 }
1173 }
1174 if ((hw_p->emac_ier & emac_isr)
1175 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1176 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1177 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1178 mtdcr (UICSR_ETHX, UIC_ETH1); /* Clear */
1179 return (rc); /* we had errors so get out */
1180 }
1181 }
1182 #if defined (CONFIG_440GX)
1183 if (hw_p->devnum == 2) {
1184 if (UIC_ETH2 & my_uic2msr) { /* look for EMAC errors */
1185 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1186 if ((hw_p->emac_ier & emac_isr) != 0) {
1187 emac_err (dev, emac_isr);
1188 serviced = 1;
1189 rc = 0;
1190 }
1191 }
1192 if ((hw_p->emac_ier & emac_isr)
1193 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1194 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1195 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1196 mtdcr (uic2sr, UIC_ETH2);
1197 return (rc); /* we had errors so get out */
1198 }
1199 }
1200
1201 if (hw_p->devnum == 3) {
1202 if (UIC_ETH3 & my_uic2msr) { /* look for EMAC errors */
1203 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1204 if ((hw_p->emac_ier & emac_isr) != 0) {
1205 emac_err (dev, emac_isr);
1206 serviced = 1;
1207 rc = 0;
1208 }
1209 }
1210 if ((hw_p->emac_ier & emac_isr)
1211 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1212 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1213 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1214 mtdcr (uic2sr, UIC_ETH3);
1215 return (rc); /* we had errors so get out */
1216 }
1217 }
1218 #endif /* CONFIG_440GX */
1219 #endif /* !CONFIG_440SP */
1220
1221 /* handle MAX TX EOB interrupt from a tx */
1222 if (my_uic0msr & UIC_MTE) {
1223 mal_rx_eob = mfdcr (maltxeobisr);
1224 mtdcr (maltxeobisr, mal_rx_eob);
1225 mtdcr (UIC0SR, UIC_MTE);
1226 }
1227 /* handle MAL RX EOB interupt from a receive */
1228 /* check for EOB on valid channels */
1229 if (my_uic0msr & UIC_MRE) {
1230 mal_rx_eob = mfdcr (malrxeobisr);
1231 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1232 /* clear EOB
1233 mtdcr(malrxeobisr, mal_rx_eob); */
1234 enet_rcv (dev, emac_isr);
1235 /* indicate that we serviced an interrupt */
1236 serviced = 1;
1237 rc = 0;
1238 }
1239 }
1240
1241 mtdcr (UIC0SR, UIC_MRE); /* Clear */
1242 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1243 switch (hw_p->devnum) {
1244 case 0:
1245 mtdcr (UICSR_ETHX, UIC_ETH0);
1246 break;
1247 case 1:
1248 mtdcr (UICSR_ETHX, UIC_ETH1);
1249 break;
1250 #if defined (CONFIG_440GX)
1251 case 2:
1252 mtdcr (uic2sr, UIC_ETH2);
1253 break;
1254 case 3:
1255 mtdcr (uic2sr, UIC_ETH3);
1256 break;
1257 #endif /* CONFIG_440GX */
1258 default:
1259 break;
1260 }
1261 } while (serviced);
1262
1263 return (rc);
1264 }
1265
1266 #else /* CONFIG_440 */
1267
1268 int enetInt (struct eth_device *dev)
1269 {
1270 int serviced;
1271 int rc = -1; /* default to not us */
1272 unsigned long mal_isr;
1273 unsigned long emac_isr = 0;
1274 unsigned long mal_rx_eob;
1275 unsigned long my_uicmsr;
1276
1277 EMAC_4XX_HW_PST hw_p;
1278
1279 /*
1280 * Because the mal is generic, we need to get the current
1281 * eth device
1282 */
1283 #if defined(CONFIG_NET_MULTI)
1284 dev = eth_get_dev();
1285 #else
1286 dev = emac0_dev;
1287 #endif
1288
1289 hw_p = dev->priv;
1290
1291 /* enter loop that stays in interrupt code until nothing to service */
1292 do {
1293 serviced = 0;
1294
1295 my_uicmsr = mfdcr (uicmsr);
1296
1297 if ((my_uicmsr & (MAL_UIC_DEF | EMAC_UIC_DEF)) == 0) { /* not for us */
1298 return (rc);
1299 }
1300 /* get and clear controller status interrupts */
1301 /* look at Mal and EMAC interrupts */
1302 if ((MAL_UIC_DEF & my_uicmsr) != 0) { /* we have a MAL interrupt */
1303 mal_isr = mfdcr (malesr);
1304 /* look for mal error */
1305 if ((my_uicmsr & MAL_UIC_ERR) != 0) {
1306 mal_err (dev, mal_isr, my_uicmsr, MAL_UIC_DEF, MAL_UIC_ERR);
1307 serviced = 1;
1308 rc = 0;
1309 }
1310 }
1311
1312 /* port by port dispatch of emac interrupts */
1313
1314 if ((SEL_UIC_DEF(hw_p->devnum) & my_uicmsr) != 0) { /* look for EMAC errors */
1315 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1316 if ((hw_p->emac_ier & emac_isr) != 0) {
1317 emac_err (dev, emac_isr);
1318 serviced = 1;
1319 rc = 0;
1320 }
1321 }
1322 if (((hw_p->emac_ier & emac_isr) != 0) || ((MAL_UIC_ERR & my_uicmsr) != 0)) {
1323 mtdcr (uicsr, MAL_UIC_DEF | SEL_UIC_DEF(hw_p->devnum)); /* Clear */
1324 return (rc); /* we had errors so get out */
1325 }
1326
1327 /* handle MAX TX EOB interrupt from a tx */
1328 if (my_uicmsr & UIC_MAL_TXEOB) {
1329 mal_rx_eob = mfdcr (maltxeobisr);
1330 mtdcr (maltxeobisr, mal_rx_eob);
1331 mtdcr (uicsr, UIC_MAL_TXEOB);
1332 }
1333 /* handle MAL RX EOB interupt from a receive */
1334 /* check for EOB on valid channels */
1335 if (my_uicmsr & UIC_MAL_RXEOB)
1336 {
1337 mal_rx_eob = mfdcr (malrxeobisr);
1338 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1339 /* clear EOB
1340 mtdcr(malrxeobisr, mal_rx_eob); */
1341 enet_rcv (dev, emac_isr);
1342 /* indicate that we serviced an interrupt */
1343 serviced = 1;
1344 rc = 0;
1345 }
1346 }
1347 mtdcr (uicsr, MAL_UIC_DEF|EMAC_UIC_DEF|EMAC_UIC_DEF1); /* Clear */
1348 #if defined(CONFIG_405EZ)
1349 mtsdr (sdricintstat, SDR_ICRX_STAT | SDR_ICTX0_STAT | SDR_ICTX1_STAT);
1350 #endif /* defined(CONFIG_405EZ) */
1351 }
1352 while (serviced);
1353
1354 return (rc);
1355 }
1356
1357 #endif /* CONFIG_440 */
1358
1359 /*-----------------------------------------------------------------------------+
1360 * MAL Error Routine
1361 *-----------------------------------------------------------------------------*/
1362 static void mal_err (struct eth_device *dev, unsigned long isr,
1363 unsigned long uic, unsigned long maldef,
1364 unsigned long mal_errr)
1365 {
1366 EMAC_4XX_HW_PST hw_p = dev->priv;
1367
1368 mtdcr (malesr, isr); /* clear interrupt */
1369
1370 /* clear DE interrupt */
1371 mtdcr (maltxdeir, 0xC0000000);
1372 mtdcr (malrxdeir, 0x80000000);
1373
1374 #ifdef INFO_4XX_ENET
1375 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
1376 #endif
1377
1378 eth_init (hw_p->bis); /* start again... */
1379 }
1380
1381 /*-----------------------------------------------------------------------------+
1382 * EMAC Error Routine
1383 *-----------------------------------------------------------------------------*/
1384 static void emac_err (struct eth_device *dev, unsigned long isr)
1385 {
1386 EMAC_4XX_HW_PST hw_p = dev->priv;
1387
1388 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1389 out32 (EMAC_ISR + hw_p->hw_addr, isr);
1390 }
1391
1392 /*-----------------------------------------------------------------------------+
1393 * enet_rcv() handles the ethernet receive data
1394 *-----------------------------------------------------------------------------*/
1395 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1396 {
1397 struct enet_frame *ef_ptr;
1398 unsigned long data_len;
1399 unsigned long rx_eob_isr;
1400 EMAC_4XX_HW_PST hw_p = dev->priv;
1401
1402 int handled = 0;
1403 int i;
1404 int loop_count = 0;
1405
1406 rx_eob_isr = mfdcr (malrxeobisr);
1407 if ((0x80000000 >> hw_p->devnum) & rx_eob_isr) {
1408 /* clear EOB */
1409 mtdcr (malrxeobisr, rx_eob_isr);
1410
1411 /* EMAC RX done */
1412 while (1) { /* do all */
1413 i = hw_p->rx_slot;
1414
1415 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1416 || (loop_count >= NUM_RX_BUFF))
1417 break;
1418 loop_count++;
1419 hw_p->rx_slot++;
1420 if (NUM_RX_BUFF == hw_p->rx_slot)
1421 hw_p->rx_slot = 0;
1422 handled++;
1423 data_len = (unsigned long) hw_p->rx[i].data_len; /* Get len */
1424 if (data_len) {
1425 if (data_len > ENET_MAX_MTU) /* Check len */
1426 data_len = 0;
1427 else {
1428 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1429 data_len = 0;
1430 hw_p->stats.rx_err_log[hw_p->
1431 rx_err_index]
1432 = hw_p->rx[i].ctrl;
1433 hw_p->rx_err_index++;
1434 if (hw_p->rx_err_index ==
1435 MAX_ERR_LOG)
1436 hw_p->rx_err_index =
1437 0;
1438 } /* emac_erros */
1439 } /* data_len < max mtu */
1440 } /* if data_len */
1441 if (!data_len) { /* no data */
1442 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1443
1444 hw_p->stats.data_len_err++; /* Error at Rx */
1445 }
1446
1447 /* !data_len */
1448 /* AS.HARNOIS */
1449 /* Check if user has already eaten buffer */
1450 /* if not => ERROR */
1451 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1452 if (hw_p->is_receiving)
1453 printf ("ERROR : Receive buffers are full!\n");
1454 break;
1455 } else {
1456 hw_p->stats.rx_frames++;
1457 hw_p->stats.rx += data_len;
1458 ef_ptr = (struct enet_frame *) hw_p->rx[i].
1459 data_ptr;
1460 #ifdef INFO_4XX_ENET
1461 hw_p->stats.pkts_rx++;
1462 #endif
1463 /* AS.HARNOIS
1464 * use ring buffer
1465 */
1466 hw_p->rx_ready[hw_p->rx_i_index] = i;
1467 hw_p->rx_i_index++;
1468 if (NUM_RX_BUFF == hw_p->rx_i_index)
1469 hw_p->rx_i_index = 0;
1470
1471 /* AS.HARNOIS
1472 * free receive buffer only when
1473 * buffer has been handled (eth_rx)
1474 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1475 */
1476 } /* if data_len */
1477 } /* while */
1478 } /* if EMACK_RXCHL */
1479 }
1480
1481
1482 static int ppc_4xx_eth_rx (struct eth_device *dev)
1483 {
1484 int length;
1485 int user_index;
1486 unsigned long msr;
1487 EMAC_4XX_HW_PST hw_p = dev->priv;
1488
1489 hw_p->is_receiving = 1; /* tell driver */
1490
1491 for (;;) {
1492 /* AS.HARNOIS
1493 * use ring buffer and
1494 * get index from rx buffer desciptor queue
1495 */
1496 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1497 if (user_index == -1) {
1498 length = -1;
1499 break; /* nothing received - leave for() loop */
1500 }
1501
1502 msr = mfmsr ();
1503 mtmsr (msr & ~(MSR_EE));
1504
1505 length = hw_p->rx[user_index].data_len;
1506
1507 /* Pass the packet up to the protocol layers. */
1508 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
1509 /* NetReceive(NetRxPackets[i], length); */
1510 NetReceive (NetRxPackets[user_index], length - 4);
1511 /* Free Recv Buffer */
1512 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1513 /* Free rx buffer descriptor queue */
1514 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1515 hw_p->rx_u_index++;
1516 if (NUM_RX_BUFF == hw_p->rx_u_index)
1517 hw_p->rx_u_index = 0;
1518
1519 #ifdef INFO_4XX_ENET
1520 hw_p->stats.pkts_handled++;
1521 #endif
1522
1523 mtmsr (msr); /* Enable IRQ's */
1524 }
1525
1526 hw_p->is_receiving = 0; /* tell driver */
1527
1528 return length;
1529 }
1530
1531 int ppc_4xx_eth_initialize (bd_t * bis)
1532 {
1533 static int virgin = 0;
1534 struct eth_device *dev;
1535 int eth_num = 0;
1536 EMAC_4XX_HW_PST hw = NULL;
1537 u8 ethaddr[4 + CONFIG_EMAC_NR_START][6];
1538 u32 hw_addr[4];
1539
1540 #if defined(CONFIG_440GX)
1541 unsigned long pfc1;
1542
1543 mfsdr (sdr_pfc1, pfc1);
1544 pfc1 &= ~(0x01e00000);
1545 pfc1 |= 0x01200000;
1546 mtsdr (sdr_pfc1, pfc1);
1547 #endif
1548
1549 /* first clear all mac-addresses */
1550 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++)
1551 memcpy(ethaddr[eth_num], "\0\0\0\0\0\0", 6);
1552
1553 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1554 switch (eth_num) {
1555 default: /* fall through */
1556 case 0:
1557 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1558 bis->bi_enetaddr, 6);
1559 hw_addr[eth_num] = 0x0;
1560 break;
1561 #ifdef CONFIG_HAS_ETH1
1562 case 1:
1563 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1564 bis->bi_enet1addr, 6);
1565 hw_addr[eth_num] = 0x100;
1566 break;
1567 #endif
1568 #ifdef CONFIG_HAS_ETH2
1569 case 2:
1570 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1571 bis->bi_enet2addr, 6);
1572 hw_addr[eth_num] = 0x400;
1573 break;
1574 #endif
1575 #ifdef CONFIG_HAS_ETH3
1576 case 3:
1577 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1578 bis->bi_enet3addr, 6);
1579 hw_addr[eth_num] = 0x600;
1580 break;
1581 #endif
1582 }
1583 }
1584
1585 /* set phy num and mode */
1586 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1587 bis->bi_phymode[0] = 0;
1588
1589 #if defined(CONFIG_PHY1_ADDR)
1590 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1591 bis->bi_phymode[1] = 0;
1592 #endif
1593 #if defined(CONFIG_440GX)
1594 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1595 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1596 bis->bi_phymode[2] = 2;
1597 bis->bi_phymode[3] = 2;
1598
1599 ppc_4xx_eth_setup_bridge(0, bis);
1600 #endif
1601
1602 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1603 /*
1604 * See if we can actually bring up the interface,
1605 * otherwise, skip it
1606 */
1607 if (memcmp (ethaddr[eth_num], "\0\0\0\0\0\0", 6) == 0) {
1608 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1609 continue;
1610 }
1611
1612 /* Allocate device structure */
1613 dev = (struct eth_device *) malloc (sizeof (*dev));
1614 if (dev == NULL) {
1615 printf ("ppc_4xx_eth_initialize: "
1616 "Cannot allocate eth_device %d\n", eth_num);
1617 return (-1);
1618 }
1619 memset(dev, 0, sizeof(*dev));
1620
1621 /* Allocate our private use data */
1622 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
1623 if (hw == NULL) {
1624 printf ("ppc_4xx_eth_initialize: "
1625 "Cannot allocate private hw data for eth_device %d",
1626 eth_num);
1627 free (dev);
1628 return (-1);
1629 }
1630 memset(hw, 0, sizeof(*hw));
1631
1632 hw->hw_addr = hw_addr[eth_num];
1633 memcpy (dev->enetaddr, ethaddr[eth_num], 6);
1634 hw->devnum = eth_num;
1635 hw->print_speed = 1;
1636
1637 sprintf (dev->name, "ppc_4xx_eth%d", eth_num - CONFIG_EMAC_NR_START);
1638 dev->priv = (void *) hw;
1639 dev->init = ppc_4xx_eth_init;
1640 dev->halt = ppc_4xx_eth_halt;
1641 dev->send = ppc_4xx_eth_send;
1642 dev->recv = ppc_4xx_eth_rx;
1643
1644 if (0 == virgin) {
1645 /* set the MAL IER ??? names may change with new spec ??? */
1646 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
1647 mal_ier =
1648 MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
1649 MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
1650 #else
1651 mal_ier =
1652 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
1653 MAL_IER_OPBE | MAL_IER_PLBE;
1654 #endif
1655 mtdcr (malesr, 0xffffffff); /* clear pending interrupts */
1656 mtdcr (maltxdeir, 0xffffffff); /* clear pending interrupts */
1657 mtdcr (malrxdeir, 0xffffffff); /* clear pending interrupts */
1658 mtdcr (malier, mal_ier);
1659
1660 /* install MAL interrupt handler */
1661 irq_install_handler (VECNUM_MS,
1662 (interrupt_handler_t *) enetInt,
1663 dev);
1664 irq_install_handler (VECNUM_MTE,
1665 (interrupt_handler_t *) enetInt,
1666 dev);
1667 irq_install_handler (VECNUM_MRE,
1668 (interrupt_handler_t *) enetInt,
1669 dev);
1670 irq_install_handler (VECNUM_TXDE,
1671 (interrupt_handler_t *) enetInt,
1672 dev);
1673 irq_install_handler (VECNUM_RXDE,
1674 (interrupt_handler_t *) enetInt,
1675 dev);
1676 virgin = 1;
1677 }
1678
1679 #if defined(CONFIG_NET_MULTI)
1680 eth_register (dev);
1681 #else
1682 emac0_dev = dev;
1683 #endif
1684
1685 #if defined(CONFIG_NET_MULTI)
1686 #if defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII)
1687 miiphy_register (dev->name,
1688 emac4xx_miiphy_read, emac4xx_miiphy_write);
1689 #endif
1690 #endif
1691 } /* end for each supported device */
1692 return (1);
1693 }
1694
1695 #if !defined(CONFIG_NET_MULTI)
1696 void eth_halt (void) {
1697 if (emac0_dev) {
1698 ppc_4xx_eth_halt(emac0_dev);
1699 free(emac0_dev);
1700 emac0_dev = NULL;
1701 }
1702 }
1703
1704 int eth_init (bd_t *bis)
1705 {
1706 ppc_4xx_eth_initialize(bis);
1707 if (emac0_dev) {
1708 return ppc_4xx_eth_init(emac0_dev, bis);
1709 } else {
1710 printf("ERROR: ethaddr not set!\n");
1711 return -1;
1712 }
1713 }
1714
1715 int eth_send(volatile void *packet, int length)
1716 {
1717 return (ppc_4xx_eth_send(emac0_dev, packet, length));
1718 }
1719
1720 int eth_rx(void)
1721 {
1722 return (ppc_4xx_eth_rx(emac0_dev));
1723 }
1724
1725 int emac4xx_miiphy_initialize (bd_t * bis)
1726 {
1727 #if defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII)
1728 miiphy_register ("ppc_4xx_eth0",
1729 emac4xx_miiphy_read, emac4xx_miiphy_write);
1730 #endif
1731
1732 return 0;
1733 }
1734 #endif /* !defined(CONFIG_NET_MULTI) */
1735
1736 #endif /* #if (CONFIG_COMMANDS & CFG_CMD_NET) */