]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/ppc4xx/4xx_enet.c
Merge with git://www.denx.de/git/u-boot.git
[people/ms/u-boot.git] / cpu / ppc4xx / 4xx_enet.c
1 /*-----------------------------------------------------------------------------+
2 *
3 * This source code has been made available to you by IBM on an AS-IS
4 * basis. Anyone receiving this source is licensed under IBM
5 * copyrights to use it in any way he or she deems fit, including
6 * copying it, modifying it, compiling it, and redistributing it either
7 * with or without modifications. No license under IBM patents or
8 * patent applications is to be implied by the copyright license.
9 *
10 * Any user of this software should understand that IBM cannot provide
11 * technical support for this software and will not be responsible for
12 * any consequences resulting from the use of this software.
13 *
14 * Any person who transfers this source code or any derivative work
15 * must include the IBM copyright notice, this paragraph, and the
16 * preceding two paragraphs in the transferred software.
17 *
18 * COPYRIGHT I B M CORPORATION 1995
19 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
20 *-----------------------------------------------------------------------------*/
21 /*-----------------------------------------------------------------------------+
22 *
23 * File Name: enetemac.c
24 *
25 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
26 *
27 * Author: Mark Wisner
28 *
29 * Change Activity-
30 *
31 * Date Description of Change BY
32 * --------- --------------------- ---
33 * 05-May-99 Created MKW
34 * 27-Jun-99 Clean up JWB
35 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
36 * 29-Jul-99 Added Full duplex support MKW
37 * 06-Aug-99 Changed names for Mal CR reg MKW
38 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
39 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
40 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
41 * to avoid chaining maximum sized packets. Push starting
42 * RX descriptor address up to the next cache line boundary.
43 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
44 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
45 * EMAC_RXM register. JWB
46 * 12-Mar-01 anne-sophie.harnois@nextream.fr
47 * - Variables are compatible with those already defined in
48 * include/net.h
49 * - Receive buffer descriptor ring is used to send buffers
50 * to the user
51 * - Info print about send/received/handled packet number if
52 * INFO_405_ENET is set
53 * 17-Apr-01 stefan.roese@esd-electronics.com
54 * - MAL reset in "eth_halt" included
55 * - Enet speed and duplex output now in one line
56 * 08-May-01 stefan.roese@esd-electronics.com
57 * - MAL error handling added (eth_init called again)
58 * 13-Nov-01 stefan.roese@esd-electronics.com
59 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
60 * 04-Jan-02 stefan.roese@esd-electronics.com
61 * - Wait for PHY auto negotiation to complete added
62 * 06-Feb-02 stefan.roese@esd-electronics.com
63 * - Bug fixed in waiting for auto negotiation to complete
64 * 26-Feb-02 stefan.roese@esd-electronics.com
65 * - rx and tx buffer descriptors now allocated (no fixed address
66 * used anymore)
67 * 17-Jun-02 stefan.roese@esd-electronics.com
68 * - MAL error debug printf 'M' removed (rx de interrupt may
69 * occur upon many incoming packets with only 4 rx buffers).
70 *-----------------------------------------------------------------------------*
71 * 17-Nov-03 travis.sawyer@sandburst.com
72 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
73 * in the 440GX. This port should work with the 440GP
74 * (2 EMACs) also
75 * 15-Aug-05 sr@denx.de
76 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
77 now handling all 4xx cpu's.
78 *-----------------------------------------------------------------------------*/
79
80 #include <config.h>
81 #include <common.h>
82 #include <net.h>
83 #include <asm/processor.h>
84 #include <commproc.h>
85 #include <ppc4xx.h>
86 #include <ppc4xx_enet.h>
87 #include <405_mal.h>
88 #include <miiphy.h>
89 #include <malloc.h>
90 #include "vecnum.h"
91
92 /*
93 * Only compile for platform with AMCC EMAC ethernet controller and
94 * network support enabled.
95 * Remark: CONFIG_405 describes Xilinx PPC405 FPGA without EMAC controller!
96 */
97 #if defined(CONFIG_CMD_NET) && !defined(CONFIG_405) && !defined(CONFIG_IOP480)
98
99 #if !(defined(CONFIG_MII) || defined(CONFIG_CMD_MII))
100 #error "CONFIG_MII has to be defined!"
101 #endif
102
103 #if defined(CONFIG_NETCONSOLE) && !defined(CONFIG_NET_MULTI)
104 #error "CONFIG_NET_MULTI has to be defined for NetConsole"
105 #endif
106
107 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
108 #define PHY_AUTONEGOTIATE_TIMEOUT 4000 /* 4000 ms autonegotiate timeout */
109
110 /* Ethernet Transmit and Receive Buffers */
111 /* AS.HARNOIS
112 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
113 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
114 */
115 #define ENET_MAX_MTU PKTSIZE
116 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
117
118 /*-----------------------------------------------------------------------------+
119 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
120 * Interrupt Controller).
121 *-----------------------------------------------------------------------------*/
122 #define MAL_UIC_ERR ( UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
123 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
124 #define EMAC_UIC_DEF UIC_ENET
125 #define EMAC_UIC_DEF1 UIC_ENET1
126 #define SEL_UIC_DEF(p) (p ? UIC_ENET1 : UIC_ENET )
127
128 #undef INFO_4XX_ENET
129
130 #define BI_PHYMODE_NONE 0
131 #define BI_PHYMODE_ZMII 1
132 #define BI_PHYMODE_RGMII 2
133 #define BI_PHYMODE_GMII 3
134 #define BI_PHYMODE_RTBI 4
135 #define BI_PHYMODE_TBI 5
136 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
137 #define BI_PHYMODE_SMII 6
138 #define BI_PHYMODE_MII 7
139 #endif
140
141 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || \
142 defined(CONFIG_440GRX) || defined(CONFIG_440SP)
143 #define SDR0_MFR_ETH_CLK_SEL_V(n) ((0x01<<27) / (n+1))
144 #endif
145
146 /*-----------------------------------------------------------------------------+
147 * Global variables. TX and RX descriptors and buffers.
148 *-----------------------------------------------------------------------------*/
149 /* IER globals */
150 static uint32_t mal_ier;
151
152 #if !defined(CONFIG_NET_MULTI)
153 struct eth_device *emac0_dev = NULL;
154 #endif
155
156 /*
157 * Get count of EMAC devices (doesn't have to be the max. possible number
158 * supported by the cpu)
159 */
160 #if defined(CONFIG_HAS_ETH3)
161 #define LAST_EMAC_NUM 4
162 #elif defined(CONFIG_HAS_ETH2)
163 #define LAST_EMAC_NUM 3
164 #elif defined(CONFIG_HAS_ETH1)
165 #define LAST_EMAC_NUM 2
166 #else
167 #define LAST_EMAC_NUM 1
168 #endif
169
170 /* normal boards start with EMAC0 */
171 #if !defined(CONFIG_EMAC_NR_START)
172 #define CONFIG_EMAC_NR_START 0
173 #endif
174
175 /*-----------------------------------------------------------------------------+
176 * Prototypes and externals.
177 *-----------------------------------------------------------------------------*/
178 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
179
180 int enetInt (struct eth_device *dev);
181 static void mal_err (struct eth_device *dev, unsigned long isr,
182 unsigned long uic, unsigned long maldef,
183 unsigned long mal_errr);
184 static void emac_err (struct eth_device *dev, unsigned long isr);
185
186 extern int phy_setup_aneg (char *devname, unsigned char addr);
187 extern int emac4xx_miiphy_read (char *devname, unsigned char addr,
188 unsigned char reg, unsigned short *value);
189 extern int emac4xx_miiphy_write (char *devname, unsigned char addr,
190 unsigned char reg, unsigned short value);
191
192 /*-----------------------------------------------------------------------------+
193 | ppc_4xx_eth_halt
194 | Disable MAL channel, and EMACn
195 +-----------------------------------------------------------------------------*/
196 static void ppc_4xx_eth_halt (struct eth_device *dev)
197 {
198 EMAC_4XX_HW_PST hw_p = dev->priv;
199 uint32_t failsafe = 10000;
200 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
201 unsigned long mfr;
202 #endif
203
204 out32 (EMAC_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
205
206 /* 1st reset MAL channel */
207 /* Note: writing a 0 to a channel has no effect */
208 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
209 mtdcr (maltxcarr, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
210 #else
211 mtdcr (maltxcarr, (MAL_CR_MMSR >> hw_p->devnum));
212 #endif
213 mtdcr (malrxcarr, (MAL_CR_MMSR >> hw_p->devnum));
214
215 /* wait for reset */
216 while (mfdcr (malrxcasr) & (MAL_CR_MMSR >> hw_p->devnum)) {
217 udelay (1000); /* Delay 1 MS so as not to hammer the register */
218 failsafe--;
219 if (failsafe == 0)
220 break;
221 }
222
223 /* EMAC RESET */
224 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
225 /* provide clocks for EMAC internal loopback */
226 mfsdr (sdr_mfr, mfr);
227 mfr |= SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
228 mtsdr(sdr_mfr, mfr);
229 #endif
230
231 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
232
233 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
234 /* remove clocks for EMAC internal loopback */
235 mfsdr (sdr_mfr, mfr);
236 mfr &= ~SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
237 mtsdr(sdr_mfr, mfr);
238 #endif
239
240
241 #ifndef CONFIG_NETCONSOLE
242 hw_p->print_speed = 1; /* print speed message again next time */
243 #endif
244
245 return;
246 }
247
248 #if defined (CONFIG_440GX)
249 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
250 {
251 unsigned long pfc1;
252 unsigned long zmiifer;
253 unsigned long rmiifer;
254
255 mfsdr(sdr_pfc1, pfc1);
256 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
257
258 zmiifer = 0;
259 rmiifer = 0;
260
261 switch (pfc1) {
262 case 1:
263 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
264 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
265 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
266 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
267 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
268 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
269 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
270 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
271 break;
272 case 2:
273 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
274 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
275 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
276 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
277 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
278 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
279 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
280 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
281 break;
282 case 3:
283 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
284 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
285 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
286 bis->bi_phymode[1] = BI_PHYMODE_NONE;
287 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
288 bis->bi_phymode[3] = BI_PHYMODE_NONE;
289 break;
290 case 4:
291 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
292 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
293 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
294 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
295 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
296 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
297 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
298 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
299 break;
300 case 5:
301 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
302 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
303 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
304 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
305 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
306 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
307 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
308 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
309 break;
310 case 6:
311 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
312 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
313 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
314 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
315 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
316 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
317 break;
318 case 0:
319 default:
320 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
321 rmiifer = 0x0;
322 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
323 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
324 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
325 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
326 break;
327 }
328
329 /* Ensure we setup mdio for this devnum and ONLY this devnum */
330 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
331
332 out32 (ZMII_FER, zmiifer);
333 out32 (RGMII_FER, rmiifer);
334
335 return ((int)pfc1);
336 }
337 #endif /* CONFIG_440_GX */
338
339 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
340 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
341 {
342 unsigned long zmiifer=0x0;
343 unsigned long pfc1;
344
345 mfsdr(sdr_pfc1, pfc1);
346 pfc1 &= SDR0_PFC1_SELECT_MASK;
347
348 switch (pfc1) {
349 case SDR0_PFC1_SELECT_CONFIG_2:
350 /* 1 x GMII port */
351 out32 (ZMII_FER, 0x00);
352 out32 (RGMII_FER, 0x00000037);
353 bis->bi_phymode[0] = BI_PHYMODE_GMII;
354 bis->bi_phymode[1] = BI_PHYMODE_NONE;
355 break;
356 case SDR0_PFC1_SELECT_CONFIG_4:
357 /* 2 x RGMII ports */
358 out32 (ZMII_FER, 0x00);
359 out32 (RGMII_FER, 0x00000055);
360 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
361 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
362 break;
363 case SDR0_PFC1_SELECT_CONFIG_6:
364 /* 2 x SMII ports */
365 out32 (ZMII_FER,
366 ((ZMII_FER_SMII) << ZMII_FER_V(0)) |
367 ((ZMII_FER_SMII) << ZMII_FER_V(1)));
368 out32 (RGMII_FER, 0x00000000);
369 bis->bi_phymode[0] = BI_PHYMODE_SMII;
370 bis->bi_phymode[1] = BI_PHYMODE_SMII;
371 break;
372 case SDR0_PFC1_SELECT_CONFIG_1_2:
373 /* only 1 x MII supported */
374 out32 (ZMII_FER, (ZMII_FER_MII) << ZMII_FER_V(0));
375 out32 (RGMII_FER, 0x00000000);
376 bis->bi_phymode[0] = BI_PHYMODE_MII;
377 bis->bi_phymode[1] = BI_PHYMODE_NONE;
378 break;
379 default:
380 break;
381 }
382
383 /* Ensure we setup mdio for this devnum and ONLY this devnum */
384 zmiifer = in32 (ZMII_FER);
385 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
386 out32 (ZMII_FER, zmiifer);
387
388 return ((int)0x0);
389 }
390 #endif /* CONFIG_440EPX */
391
392 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
393 {
394 int i, j;
395 unsigned long reg = 0;
396 unsigned long msr;
397 unsigned long speed;
398 unsigned long duplex;
399 unsigned long failsafe;
400 unsigned mode_reg;
401 unsigned short devnum;
402 unsigned short reg_short;
403 #if defined(CONFIG_440GX) || \
404 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
405 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
406 sys_info_t sysinfo;
407 #if defined(CONFIG_440GX) || defined(CONFIG_440SPE) || \
408 defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
409 int ethgroup = -1;
410 #endif
411 #endif
412 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
413 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
414 unsigned long mfr;
415 #endif
416
417
418 EMAC_4XX_HW_PST hw_p = dev->priv;
419
420 /* before doing anything, figure out if we have a MAC address */
421 /* if not, bail */
422 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
423 printf("ERROR: ethaddr not set!\n");
424 return -1;
425 }
426
427 #if defined(CONFIG_440GX) || \
428 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
429 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
430 /* Need to get the OPB frequency so we can access the PHY */
431 get_sys_info (&sysinfo);
432 #endif
433
434 msr = mfmsr ();
435 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
436
437 devnum = hw_p->devnum;
438
439 #ifdef INFO_4XX_ENET
440 /* AS.HARNOIS
441 * We should have :
442 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
443 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
444 * is possible that new packets (without relationship with
445 * current transfer) have got the time to arrived before
446 * netloop calls eth_halt
447 */
448 printf ("About preceeding transfer (eth%d):\n"
449 "- Sent packet number %d\n"
450 "- Received packet number %d\n"
451 "- Handled packet number %d\n",
452 hw_p->devnum,
453 hw_p->stats.pkts_tx,
454 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
455
456 hw_p->stats.pkts_tx = 0;
457 hw_p->stats.pkts_rx = 0;
458 hw_p->stats.pkts_handled = 0;
459 hw_p->print_speed = 1; /* print speed message again next time */
460 #endif
461
462 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
463 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
464
465 hw_p->rx_slot = 0; /* MAL Receive Slot */
466 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
467 hw_p->rx_u_index = 0; /* Receive User Queue Index */
468
469 hw_p->tx_slot = 0; /* MAL Transmit Slot */
470 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
471 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
472
473 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
474 /* set RMII mode */
475 /* NOTE: 440GX spec states that mode is mutually exclusive */
476 /* NOTE: Therefore, disable all other EMACS, since we handle */
477 /* NOTE: only one emac at a time */
478 reg = 0;
479 out32 (ZMII_FER, 0);
480 udelay (100);
481
482 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
483 out32 (ZMII_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
484 #elif defined(CONFIG_440GX) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
485 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
486 #elif defined(CONFIG_440GP)
487 /* set RMII mode */
488 out32 (ZMII_FER, ZMII_RMII | ZMII_MDI0);
489 #else
490 if ((devnum == 0) || (devnum == 1)) {
491 out32 (ZMII_FER, (ZMII_FER_SMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
492 } else { /* ((devnum == 2) || (devnum == 3)) */
493 out32 (ZMII_FER, ZMII_FER_MDI << ZMII_FER_V (devnum));
494 out32 (RGMII_FER, ((RGMII_FER_RGMII << RGMII_FER_V (2)) |
495 (RGMII_FER_RGMII << RGMII_FER_V (3))));
496 }
497 #endif
498
499 out32 (ZMII_SSR, ZMII_SSR_SP << ZMII_SSR_V(devnum));
500 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
501
502 __asm__ volatile ("eieio");
503
504 /* reset emac so we have access to the phy */
505 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
506 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
507 /* provide clocks for EMAC internal loopback */
508 mfsdr (sdr_mfr, mfr);
509 mfr |= SDR0_MFR_ETH_CLK_SEL_V(devnum);
510 mtsdr(sdr_mfr, mfr);
511 #endif
512
513 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
514 __asm__ volatile ("eieio");
515
516 failsafe = 1000;
517 while ((in32 (EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) {
518 udelay (1000);
519 failsafe--;
520 }
521 if (failsafe <= 0)
522 printf("\nProblem resetting EMAC!\n");
523
524 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
525 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
526 /* remove clocks for EMAC internal loopback */
527 mfsdr (sdr_mfr, mfr);
528 mfr &= ~SDR0_MFR_ETH_CLK_SEL_V(devnum);
529 mtsdr(sdr_mfr, mfr);
530 #endif
531
532 #if defined(CONFIG_440GX) || \
533 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
534 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
535 /* Whack the M1 register */
536 mode_reg = 0x0;
537 mode_reg &= ~0x00000038;
538 if (sysinfo.freqOPB <= 50000000);
539 else if (sysinfo.freqOPB <= 66666667)
540 mode_reg |= EMAC_M1_OBCI_66;
541 else if (sysinfo.freqOPB <= 83333333)
542 mode_reg |= EMAC_M1_OBCI_83;
543 else if (sysinfo.freqOPB <= 100000000)
544 mode_reg |= EMAC_M1_OBCI_100;
545 else
546 mode_reg |= EMAC_M1_OBCI_GT100;
547
548 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
549 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
550
551 /* wait for PHY to complete auto negotiation */
552 reg_short = 0;
553 #ifndef CONFIG_CS8952_PHY
554 switch (devnum) {
555 case 0:
556 reg = CONFIG_PHY_ADDR;
557 break;
558 #if defined (CONFIG_PHY1_ADDR)
559 case 1:
560 reg = CONFIG_PHY1_ADDR;
561 break;
562 #endif
563 #if defined (CONFIG_440GX)
564 case 2:
565 reg = CONFIG_PHY2_ADDR;
566 break;
567 case 3:
568 reg = CONFIG_PHY3_ADDR;
569 break;
570 #endif
571 default:
572 reg = CONFIG_PHY_ADDR;
573 break;
574 }
575
576 bis->bi_phynum[devnum] = reg;
577
578 #if defined(CONFIG_PHY_RESET)
579 /*
580 * Reset the phy, only if its the first time through
581 * otherwise, just check the speeds & feeds
582 */
583 if (hw_p->first_init == 0) {
584 #if defined(CONFIG_M88E1111_PHY)
585 miiphy_write (dev->name, reg, 0x14, 0x0ce3);
586 miiphy_write (dev->name, reg, 0x18, 0x4101);
587 miiphy_write (dev->name, reg, 0x09, 0x0e00);
588 miiphy_write (dev->name, reg, 0x04, 0x01e1);
589 #endif
590 miiphy_reset (dev->name, reg);
591
592 #if defined(CONFIG_440GX) || \
593 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
594 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
595
596 #if defined(CONFIG_CIS8201_PHY)
597 /*
598 * Cicada 8201 PHY needs to have an extended register whacked
599 * for RGMII mode.
600 */
601 if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {
602 #if defined(CONFIG_CIS8201_SHORT_ETCH)
603 miiphy_write (dev->name, reg, 23, 0x1300);
604 #else
605 miiphy_write (dev->name, reg, 23, 0x1000);
606 #endif
607 /*
608 * Vitesse VSC8201/Cicada CIS8201 errata:
609 * Interoperability problem with Intel 82547EI phys
610 * This work around (provided by Vitesse) changes
611 * the default timer convergence from 8ms to 12ms
612 */
613 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
614 miiphy_write (dev->name, reg, 0x08, 0x0200);
615 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
616 miiphy_write (dev->name, reg, 0x02, 0x0004);
617 miiphy_write (dev->name, reg, 0x01, 0x0671);
618 miiphy_write (dev->name, reg, 0x00, 0x8fae);
619 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
620 miiphy_write (dev->name, reg, 0x08, 0x0000);
621 miiphy_write (dev->name, reg, 0x1f, 0x0000);
622 /* end Vitesse/Cicada errata */
623 }
624 #endif
625
626 #if defined(CONFIG_ET1011C_PHY)
627 /*
628 * Agere ET1011c PHY needs to have an extended register whacked
629 * for RGMII mode.
630 */
631 if (((devnum == 2) || (devnum ==3)) && (4 == ethgroup)) {
632 miiphy_read (dev->name, reg, 0x16, &reg_short);
633 reg_short &= ~(0x7);
634 reg_short |= 0x6; /* RGMII DLL Delay*/
635 miiphy_write (dev->name, reg, 0x16, reg_short);
636
637 miiphy_read (dev->name, reg, 0x17, &reg_short);
638 reg_short &= ~(0x40);
639 miiphy_write (dev->name, reg, 0x17, reg_short);
640
641 miiphy_write(dev->name, reg, 0x1c, 0x74f0);
642 }
643 #endif
644
645 #endif
646 /* Start/Restart autonegotiation */
647 phy_setup_aneg (dev->name, reg);
648 udelay (1000);
649 }
650 #endif /* defined(CONFIG_PHY_RESET) */
651
652 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
653
654 /*
655 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
656 */
657 if ((reg_short & PHY_BMSR_AUTN_ABLE)
658 && !(reg_short & PHY_BMSR_AUTN_COMP)) {
659 puts ("Waiting for PHY auto negotiation to complete");
660 i = 0;
661 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
662 /*
663 * Timeout reached ?
664 */
665 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
666 puts (" TIMEOUT !\n");
667 break;
668 }
669
670 if ((i++ % 1000) == 0) {
671 putc ('.');
672 }
673 udelay (1000); /* 1 ms */
674 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
675
676 }
677 puts (" done\n");
678 udelay (500000); /* another 500 ms (results in faster booting) */
679 }
680 #endif /* #ifndef CONFIG_CS8952_PHY */
681
682 speed = miiphy_speed (dev->name, reg);
683 duplex = miiphy_duplex (dev->name, reg);
684
685 if (hw_p->print_speed) {
686 hw_p->print_speed = 0;
687 printf ("ENET Speed is %d Mbps - %s duplex connection (EMAC%d)\n",
688 (int) speed, (duplex == HALF) ? "HALF" : "FULL",
689 hw_p->devnum);
690 }
691
692 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \
693 !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX)
694 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
695 mfsdr(sdr_mfr, reg);
696 if (speed == 100) {
697 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
698 } else {
699 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
700 }
701 mtsdr(sdr_mfr, reg);
702 #endif
703
704 /* Set ZMII/RGMII speed according to the phy link speed */
705 reg = in32 (ZMII_SSR);
706 if ( (speed == 100) || (speed == 1000) )
707 out32 (ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));
708 else
709 out32 (ZMII_SSR, reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));
710
711 if ((devnum == 2) || (devnum == 3)) {
712 if (speed == 1000)
713 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
714 else if (speed == 100)
715 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
716 else if (speed == 10)
717 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
718 else {
719 printf("Error in RGMII Speed\n");
720 return -1;
721 }
722 out32 (RGMII_SSR, reg);
723 }
724 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
725
726 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
727 if (speed == 1000)
728 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
729 else if (speed == 100)
730 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
731 else if (speed == 10)
732 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
733 else {
734 printf("Error in RGMII Speed\n");
735 return -1;
736 }
737 out32 (RGMII_SSR, reg);
738 #endif
739
740 /* set the Mal configuration reg */
741 #if defined(CONFIG_440GX) || \
742 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
743 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
744 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
745 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
746 #else
747 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
748 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
749 if (get_pvr() == PVR_440GP_RB) {
750 mtdcr (malmcr, mfdcr(malmcr) & ~MAL_CR_PLBB);
751 }
752 #endif
753
754 /* Free "old" buffers */
755 if (hw_p->alloc_tx_buf)
756 free (hw_p->alloc_tx_buf);
757 if (hw_p->alloc_rx_buf)
758 free (hw_p->alloc_rx_buf);
759
760 /*
761 * Malloc MAL buffer desciptors, make sure they are
762 * aligned on cache line boundary size
763 * (401/403/IOP480 = 16, 405 = 32)
764 * and doesn't cross cache block boundaries.
765 */
766 hw_p->alloc_tx_buf =
767 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_TX_BUFF) +
768 ((2 * CFG_CACHELINE_SIZE) - 2));
769 if (NULL == hw_p->alloc_tx_buf)
770 return -1;
771 if (((int) hw_p->alloc_tx_buf & CACHELINE_MASK) != 0) {
772 hw_p->tx =
773 (mal_desc_t *) ((int) hw_p->alloc_tx_buf +
774 CFG_CACHELINE_SIZE -
775 ((int) hw_p->
776 alloc_tx_buf & CACHELINE_MASK));
777 } else {
778 hw_p->tx = hw_p->alloc_tx_buf;
779 }
780
781 hw_p->alloc_rx_buf =
782 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_RX_BUFF) +
783 ((2 * CFG_CACHELINE_SIZE) - 2));
784 if (NULL == hw_p->alloc_rx_buf) {
785 free(hw_p->alloc_tx_buf);
786 hw_p->alloc_tx_buf = NULL;
787 return -1;
788 }
789
790 if (((int) hw_p->alloc_rx_buf & CACHELINE_MASK) != 0) {
791 hw_p->rx =
792 (mal_desc_t *) ((int) hw_p->alloc_rx_buf +
793 CFG_CACHELINE_SIZE -
794 ((int) hw_p->
795 alloc_rx_buf & CACHELINE_MASK));
796 } else {
797 hw_p->rx = hw_p->alloc_rx_buf;
798 }
799
800 for (i = 0; i < NUM_TX_BUFF; i++) {
801 hw_p->tx[i].ctrl = 0;
802 hw_p->tx[i].data_len = 0;
803 if (hw_p->first_init == 0) {
804 hw_p->txbuf_ptr =
805 (char *) malloc (ENET_MAX_MTU_ALIGNED);
806 if (NULL == hw_p->txbuf_ptr) {
807 free(hw_p->alloc_rx_buf);
808 free(hw_p->alloc_tx_buf);
809 hw_p->alloc_rx_buf = NULL;
810 hw_p->alloc_tx_buf = NULL;
811 for(j = 0; j < i; j++) {
812 free(hw_p->tx[i].data_ptr);
813 hw_p->tx[i].data_ptr = NULL;
814 }
815 }
816 }
817 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
818 if ((NUM_TX_BUFF - 1) == i)
819 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
820 hw_p->tx_run[i] = -1;
821 #if 0
822 printf ("TX_BUFF %d @ 0x%08lx\n", i,
823 (ulong) hw_p->tx[i].data_ptr);
824 #endif
825 }
826
827 for (i = 0; i < NUM_RX_BUFF; i++) {
828 hw_p->rx[i].ctrl = 0;
829 hw_p->rx[i].data_len = 0;
830 /* rx[i].data_ptr = (char *) &rx_buff[i]; */
831 hw_p->rx[i].data_ptr = (char *) NetRxPackets[i];
832 if ((NUM_RX_BUFF - 1) == i)
833 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
834 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
835 hw_p->rx_ready[i] = -1;
836 #if 0
837 printf ("RX_BUFF %d @ 0x%08lx\n", i, (ulong) hw_p->rx[i].data_ptr);
838 #endif
839 }
840
841 reg = 0x00000000;
842
843 reg |= dev->enetaddr[0]; /* set high address */
844 reg = reg << 8;
845 reg |= dev->enetaddr[1];
846
847 out32 (EMAC_IAH + hw_p->hw_addr, reg);
848
849 reg = 0x00000000;
850 reg |= dev->enetaddr[2]; /* set low address */
851 reg = reg << 8;
852 reg |= dev->enetaddr[3];
853 reg = reg << 8;
854 reg |= dev->enetaddr[4];
855 reg = reg << 8;
856 reg |= dev->enetaddr[5];
857
858 out32 (EMAC_IAL + hw_p->hw_addr, reg);
859
860 switch (devnum) {
861 case 1:
862 /* setup MAL tx & rx channel pointers */
863 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
864 mtdcr (maltxctp2r, hw_p->tx);
865 #else
866 mtdcr (maltxctp1r, hw_p->tx);
867 #endif
868 #if defined(CONFIG_440)
869 mtdcr (maltxbattr, 0x0);
870 mtdcr (malrxbattr, 0x0);
871 #endif
872 mtdcr (malrxctp1r, hw_p->rx);
873 /* set RX buffer size */
874 mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16);
875 break;
876 #if defined (CONFIG_440GX)
877 case 2:
878 /* setup MAL tx & rx channel pointers */
879 mtdcr (maltxbattr, 0x0);
880 mtdcr (malrxbattr, 0x0);
881 mtdcr (maltxctp2r, hw_p->tx);
882 mtdcr (malrxctp2r, hw_p->rx);
883 /* set RX buffer size */
884 mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16);
885 break;
886 case 3:
887 /* setup MAL tx & rx channel pointers */
888 mtdcr (maltxbattr, 0x0);
889 mtdcr (maltxctp3r, hw_p->tx);
890 mtdcr (malrxbattr, 0x0);
891 mtdcr (malrxctp3r, hw_p->rx);
892 /* set RX buffer size */
893 mtdcr (malrcbs3, ENET_MAX_MTU_ALIGNED / 16);
894 break;
895 #endif /* CONFIG_440GX */
896 case 0:
897 default:
898 /* setup MAL tx & rx channel pointers */
899 #if defined(CONFIG_440)
900 mtdcr (maltxbattr, 0x0);
901 mtdcr (malrxbattr, 0x0);
902 #endif
903 mtdcr (maltxctp0r, hw_p->tx);
904 mtdcr (malrxctp0r, hw_p->rx);
905 /* set RX buffer size */
906 mtdcr (malrcbs0, ENET_MAX_MTU_ALIGNED / 16);
907 break;
908 }
909
910 /* Enable MAL transmit and receive channels */
911 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
912 mtdcr (maltxcasr, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
913 #else
914 mtdcr (maltxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
915 #endif
916 mtdcr (malrxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
917
918 /* set transmit enable & receive enable */
919 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);
920
921 /* set receive fifo to 4k and tx fifo to 2k */
922 mode_reg = in32 (EMAC_M1 + hw_p->hw_addr);
923 mode_reg |= EMAC_M1_RFS_4K | EMAC_M1_TX_FIFO_2K;
924
925 /* set speed */
926 if (speed == _1000BASET) {
927 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
928 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
929 unsigned long pfc1;
930
931 mfsdr (sdr_pfc1, pfc1);
932 pfc1 |= SDR0_PFC1_EM_1000;
933 mtsdr (sdr_pfc1, pfc1);
934 #endif
935 mode_reg = mode_reg | EMAC_M1_MF_1000MBPS | EMAC_M1_IST;
936 } else if (speed == _100BASET)
937 mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;
938 else
939 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
940 if (duplex == FULL)
941 mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;
942
943 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
944
945 /* Enable broadcast and indvidual address */
946 /* TBS: enabling runts as some misbehaved nics will send runts */
947 out32 (EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
948
949 /* we probably need to set the tx mode1 reg? maybe at tx time */
950
951 /* set transmit request threshold register */
952 out32 (EMAC_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
953
954 /* set receive low/high water mark register */
955 #if defined(CONFIG_440)
956 /* 440s has a 64 byte burst length */
957 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
958 #else
959 /* 405s have a 16 byte burst length */
960 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
961 #endif /* defined(CONFIG_440) */
962 out32 (EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);
963
964 /* Set fifo limit entry in tx mode 0 */
965 out32 (EMAC_TXM0 + hw_p->hw_addr, 0x00000003);
966 /* Frame gap set */
967 out32 (EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
968
969 /* Set EMAC IER */
970 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
971 if (speed == _100BASET)
972 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
973
974 out32 (EMAC_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
975 out32 (EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);
976
977 if (hw_p->first_init == 0) {
978 /*
979 * Connect interrupt service routines
980 */
981 irq_install_handler (VECNUM_ETH0 + (hw_p->devnum * 2),
982 (interrupt_handler_t *) enetInt, dev);
983 }
984
985 mtmsr (msr); /* enable interrupts again */
986
987 hw_p->bis = bis;
988 hw_p->first_init = 1;
989
990 return (1);
991 }
992
993
994 static int ppc_4xx_eth_send (struct eth_device *dev, volatile void *ptr,
995 int len)
996 {
997 struct enet_frame *ef_ptr;
998 ulong time_start, time_now;
999 unsigned long temp_txm0;
1000 EMAC_4XX_HW_PST hw_p = dev->priv;
1001
1002 ef_ptr = (struct enet_frame *) ptr;
1003
1004 /*-----------------------------------------------------------------------+
1005 * Copy in our address into the frame.
1006 *-----------------------------------------------------------------------*/
1007 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
1008
1009 /*-----------------------------------------------------------------------+
1010 * If frame is too long or too short, modify length.
1011 *-----------------------------------------------------------------------*/
1012 /* TBS: where does the fragment go???? */
1013 if (len > ENET_MAX_MTU)
1014 len = ENET_MAX_MTU;
1015
1016 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
1017 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
1018
1019 /*-----------------------------------------------------------------------+
1020 * set TX Buffer busy, and send it
1021 *-----------------------------------------------------------------------*/
1022 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
1023 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
1024 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
1025 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
1026 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
1027
1028 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
1029 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
1030
1031 __asm__ volatile ("eieio");
1032
1033 out32 (EMAC_TXM0 + hw_p->hw_addr,
1034 in32 (EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);
1035 #ifdef INFO_4XX_ENET
1036 hw_p->stats.pkts_tx++;
1037 #endif
1038
1039 /*-----------------------------------------------------------------------+
1040 * poll unitl the packet is sent and then make sure it is OK
1041 *-----------------------------------------------------------------------*/
1042 time_start = get_timer (0);
1043 while (1) {
1044 temp_txm0 = in32 (EMAC_TXM0 + hw_p->hw_addr);
1045 /* loop until either TINT turns on or 3 seconds elapse */
1046 if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {
1047 /* transmit is done, so now check for errors
1048 * If there is an error, an interrupt should
1049 * happen when we return
1050 */
1051 time_now = get_timer (0);
1052 if ((time_now - time_start) > 3000) {
1053 return (-1);
1054 }
1055 } else {
1056 return (len);
1057 }
1058 }
1059 }
1060
1061
1062 #if defined (CONFIG_440)
1063
1064 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1065 /*
1066 * Hack: On 440SP all enet irq sources are located on UIC1
1067 * Needs some cleanup. --sr
1068 */
1069 #define UIC0MSR uic1msr
1070 #define UIC0SR uic1sr
1071 #else
1072 #define UIC0MSR uic0msr
1073 #define UIC0SR uic0sr
1074 #endif
1075
1076 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
1077 #define UICMSR_ETHX uic0msr
1078 #define UICSR_ETHX uic0sr
1079 #else
1080 #define UICMSR_ETHX uic1msr
1081 #define UICSR_ETHX uic1sr
1082 #endif
1083
1084 int enetInt (struct eth_device *dev)
1085 {
1086 int serviced;
1087 int rc = -1; /* default to not us */
1088 unsigned long mal_isr;
1089 unsigned long emac_isr = 0;
1090 unsigned long mal_rx_eob;
1091 unsigned long my_uic0msr, my_uic1msr;
1092 unsigned long my_uicmsr_ethx;
1093
1094 #if defined(CONFIG_440GX)
1095 unsigned long my_uic2msr;
1096 #endif
1097 EMAC_4XX_HW_PST hw_p;
1098
1099 /*
1100 * Because the mal is generic, we need to get the current
1101 * eth device
1102 */
1103 #if defined(CONFIG_NET_MULTI)
1104 dev = eth_get_dev();
1105 #else
1106 dev = emac0_dev;
1107 #endif
1108
1109 hw_p = dev->priv;
1110
1111 /* enter loop that stays in interrupt code until nothing to service */
1112 do {
1113 serviced = 0;
1114
1115 my_uic0msr = mfdcr (UIC0MSR);
1116 my_uic1msr = mfdcr (uic1msr);
1117 #if defined(CONFIG_440GX)
1118 my_uic2msr = mfdcr (uic2msr);
1119 #endif
1120 my_uicmsr_ethx = mfdcr (UICMSR_ETHX);
1121
1122 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
1123 && !(my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))
1124 && !(my_uicmsr_ethx & (UIC_ETH0 | UIC_ETH1))) {
1125 /* not for us */
1126 return (rc);
1127 }
1128 #if defined (CONFIG_440GX)
1129 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
1130 && !(my_uic2msr & (UIC_ETH2 | UIC_ETH3))) {
1131 /* not for us */
1132 return (rc);
1133 }
1134 #endif
1135 /* get and clear controller status interrupts */
1136 /* look at Mal and EMAC interrupts */
1137 if ((my_uic0msr & (UIC_MRE | UIC_MTE))
1138 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1139 /* we have a MAL interrupt */
1140 mal_isr = mfdcr (malesr);
1141 /* look for mal error */
1142 if (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE)) {
1143 mal_err (dev, mal_isr, my_uic1msr, MAL_UIC_DEF, MAL_UIC_ERR);
1144 serviced = 1;
1145 rc = 0;
1146 }
1147 }
1148
1149 /* port by port dispatch of emac interrupts */
1150 if (hw_p->devnum == 0) {
1151 if (UIC_ETH0 & my_uicmsr_ethx) { /* look for EMAC errors */
1152 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1153 if ((hw_p->emac_ier & emac_isr) != 0) {
1154 emac_err (dev, emac_isr);
1155 serviced = 1;
1156 rc = 0;
1157 }
1158 }
1159 if ((hw_p->emac_ier & emac_isr)
1160 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1161 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1162 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1163 mtdcr (UICSR_ETHX, UIC_ETH0); /* Clear */
1164 return (rc); /* we had errors so get out */
1165 }
1166 }
1167
1168 #if !defined(CONFIG_440SP)
1169 if (hw_p->devnum == 1) {
1170 if (UIC_ETH1 & my_uicmsr_ethx) { /* look for EMAC errors */
1171 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1172 if ((hw_p->emac_ier & emac_isr) != 0) {
1173 emac_err (dev, emac_isr);
1174 serviced = 1;
1175 rc = 0;
1176 }
1177 }
1178 if ((hw_p->emac_ier & emac_isr)
1179 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1180 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1181 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1182 mtdcr (UICSR_ETHX, UIC_ETH1); /* Clear */
1183 return (rc); /* we had errors so get out */
1184 }
1185 }
1186 #if defined (CONFIG_440GX)
1187 if (hw_p->devnum == 2) {
1188 if (UIC_ETH2 & my_uic2msr) { /* look for EMAC errors */
1189 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1190 if ((hw_p->emac_ier & emac_isr) != 0) {
1191 emac_err (dev, emac_isr);
1192 serviced = 1;
1193 rc = 0;
1194 }
1195 }
1196 if ((hw_p->emac_ier & emac_isr)
1197 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1198 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1199 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1200 mtdcr (uic2sr, UIC_ETH2);
1201 return (rc); /* we had errors so get out */
1202 }
1203 }
1204
1205 if (hw_p->devnum == 3) {
1206 if (UIC_ETH3 & my_uic2msr) { /* look for EMAC errors */
1207 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1208 if ((hw_p->emac_ier & emac_isr) != 0) {
1209 emac_err (dev, emac_isr);
1210 serviced = 1;
1211 rc = 0;
1212 }
1213 }
1214 if ((hw_p->emac_ier & emac_isr)
1215 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1216 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1217 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1218 mtdcr (uic2sr, UIC_ETH3);
1219 return (rc); /* we had errors so get out */
1220 }
1221 }
1222 #endif /* CONFIG_440GX */
1223 #endif /* !CONFIG_440SP */
1224
1225 /* handle MAX TX EOB interrupt from a tx */
1226 if (my_uic0msr & UIC_MTE) {
1227 mal_rx_eob = mfdcr (maltxeobisr);
1228 mtdcr (maltxeobisr, mal_rx_eob);
1229 mtdcr (UIC0SR, UIC_MTE);
1230 }
1231 /* handle MAL RX EOB interupt from a receive */
1232 /* check for EOB on valid channels */
1233 if (my_uic0msr & UIC_MRE) {
1234 mal_rx_eob = mfdcr (malrxeobisr);
1235 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1236 /* clear EOB
1237 mtdcr(malrxeobisr, mal_rx_eob); */
1238 enet_rcv (dev, emac_isr);
1239 /* indicate that we serviced an interrupt */
1240 serviced = 1;
1241 rc = 0;
1242 }
1243 }
1244
1245 mtdcr (UIC0SR, UIC_MRE); /* Clear */
1246 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1247 switch (hw_p->devnum) {
1248 case 0:
1249 mtdcr (UICSR_ETHX, UIC_ETH0);
1250 break;
1251 case 1:
1252 mtdcr (UICSR_ETHX, UIC_ETH1);
1253 break;
1254 #if defined (CONFIG_440GX)
1255 case 2:
1256 mtdcr (uic2sr, UIC_ETH2);
1257 break;
1258 case 3:
1259 mtdcr (uic2sr, UIC_ETH3);
1260 break;
1261 #endif /* CONFIG_440GX */
1262 default:
1263 break;
1264 }
1265 } while (serviced);
1266
1267 return (rc);
1268 }
1269
1270 #else /* CONFIG_440 */
1271
1272 int enetInt (struct eth_device *dev)
1273 {
1274 int serviced;
1275 int rc = -1; /* default to not us */
1276 unsigned long mal_isr;
1277 unsigned long emac_isr = 0;
1278 unsigned long mal_rx_eob;
1279 unsigned long my_uicmsr;
1280
1281 EMAC_4XX_HW_PST hw_p;
1282
1283 /*
1284 * Because the mal is generic, we need to get the current
1285 * eth device
1286 */
1287 #if defined(CONFIG_NET_MULTI)
1288 dev = eth_get_dev();
1289 #else
1290 dev = emac0_dev;
1291 #endif
1292
1293 hw_p = dev->priv;
1294
1295 /* enter loop that stays in interrupt code until nothing to service */
1296 do {
1297 serviced = 0;
1298
1299 my_uicmsr = mfdcr (uicmsr);
1300
1301 if ((my_uicmsr & (MAL_UIC_DEF | EMAC_UIC_DEF)) == 0) { /* not for us */
1302 return (rc);
1303 }
1304 /* get and clear controller status interrupts */
1305 /* look at Mal and EMAC interrupts */
1306 if ((MAL_UIC_DEF & my_uicmsr) != 0) { /* we have a MAL interrupt */
1307 mal_isr = mfdcr (malesr);
1308 /* look for mal error */
1309 if ((my_uicmsr & MAL_UIC_ERR) != 0) {
1310 mal_err (dev, mal_isr, my_uicmsr, MAL_UIC_DEF, MAL_UIC_ERR);
1311 serviced = 1;
1312 rc = 0;
1313 }
1314 }
1315
1316 /* port by port dispatch of emac interrupts */
1317
1318 if ((SEL_UIC_DEF(hw_p->devnum) & my_uicmsr) != 0) { /* look for EMAC errors */
1319 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1320 if ((hw_p->emac_ier & emac_isr) != 0) {
1321 emac_err (dev, emac_isr);
1322 serviced = 1;
1323 rc = 0;
1324 }
1325 }
1326 if (((hw_p->emac_ier & emac_isr) != 0) || ((MAL_UIC_ERR & my_uicmsr) != 0)) {
1327 mtdcr (uicsr, MAL_UIC_DEF | SEL_UIC_DEF(hw_p->devnum)); /* Clear */
1328 return (rc); /* we had errors so get out */
1329 }
1330
1331 /* handle MAX TX EOB interrupt from a tx */
1332 if (my_uicmsr & UIC_MAL_TXEOB) {
1333 mal_rx_eob = mfdcr (maltxeobisr);
1334 mtdcr (maltxeobisr, mal_rx_eob);
1335 mtdcr (uicsr, UIC_MAL_TXEOB);
1336 }
1337 /* handle MAL RX EOB interupt from a receive */
1338 /* check for EOB on valid channels */
1339 if (my_uicmsr & UIC_MAL_RXEOB)
1340 {
1341 mal_rx_eob = mfdcr (malrxeobisr);
1342 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1343 /* clear EOB
1344 mtdcr(malrxeobisr, mal_rx_eob); */
1345 enet_rcv (dev, emac_isr);
1346 /* indicate that we serviced an interrupt */
1347 serviced = 1;
1348 rc = 0;
1349 }
1350 }
1351 mtdcr (uicsr, MAL_UIC_DEF|EMAC_UIC_DEF|EMAC_UIC_DEF1); /* Clear */
1352 #if defined(CONFIG_405EZ)
1353 mtsdr (sdricintstat, SDR_ICRX_STAT | SDR_ICTX0_STAT | SDR_ICTX1_STAT);
1354 #endif /* defined(CONFIG_405EZ) */
1355 }
1356 while (serviced);
1357
1358 return (rc);
1359 }
1360
1361 #endif /* CONFIG_440 */
1362
1363 /*-----------------------------------------------------------------------------+
1364 * MAL Error Routine
1365 *-----------------------------------------------------------------------------*/
1366 static void mal_err (struct eth_device *dev, unsigned long isr,
1367 unsigned long uic, unsigned long maldef,
1368 unsigned long mal_errr)
1369 {
1370 EMAC_4XX_HW_PST hw_p = dev->priv;
1371
1372 mtdcr (malesr, isr); /* clear interrupt */
1373
1374 /* clear DE interrupt */
1375 mtdcr (maltxdeir, 0xC0000000);
1376 mtdcr (malrxdeir, 0x80000000);
1377
1378 #ifdef INFO_4XX_ENET
1379 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
1380 #endif
1381
1382 eth_init (hw_p->bis); /* start again... */
1383 }
1384
1385 /*-----------------------------------------------------------------------------+
1386 * EMAC Error Routine
1387 *-----------------------------------------------------------------------------*/
1388 static void emac_err (struct eth_device *dev, unsigned long isr)
1389 {
1390 EMAC_4XX_HW_PST hw_p = dev->priv;
1391
1392 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1393 out32 (EMAC_ISR + hw_p->hw_addr, isr);
1394 }
1395
1396 /*-----------------------------------------------------------------------------+
1397 * enet_rcv() handles the ethernet receive data
1398 *-----------------------------------------------------------------------------*/
1399 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1400 {
1401 struct enet_frame *ef_ptr;
1402 unsigned long data_len;
1403 unsigned long rx_eob_isr;
1404 EMAC_4XX_HW_PST hw_p = dev->priv;
1405
1406 int handled = 0;
1407 int i;
1408 int loop_count = 0;
1409
1410 rx_eob_isr = mfdcr (malrxeobisr);
1411 if ((0x80000000 >> hw_p->devnum) & rx_eob_isr) {
1412 /* clear EOB */
1413 mtdcr (malrxeobisr, rx_eob_isr);
1414
1415 /* EMAC RX done */
1416 while (1) { /* do all */
1417 i = hw_p->rx_slot;
1418
1419 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1420 || (loop_count >= NUM_RX_BUFF))
1421 break;
1422
1423 loop_count++;
1424 handled++;
1425 data_len = (unsigned long) hw_p->rx[i].data_len; /* Get len */
1426 if (data_len) {
1427 if (data_len > ENET_MAX_MTU) /* Check len */
1428 data_len = 0;
1429 else {
1430 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1431 data_len = 0;
1432 hw_p->stats.rx_err_log[hw_p->
1433 rx_err_index]
1434 = hw_p->rx[i].ctrl;
1435 hw_p->rx_err_index++;
1436 if (hw_p->rx_err_index ==
1437 MAX_ERR_LOG)
1438 hw_p->rx_err_index =
1439 0;
1440 } /* emac_erros */
1441 } /* data_len < max mtu */
1442 } /* if data_len */
1443 if (!data_len) { /* no data */
1444 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1445
1446 hw_p->stats.data_len_err++; /* Error at Rx */
1447 }
1448
1449 /* !data_len */
1450 /* AS.HARNOIS */
1451 /* Check if user has already eaten buffer */
1452 /* if not => ERROR */
1453 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1454 if (hw_p->is_receiving)
1455 printf ("ERROR : Receive buffers are full!\n");
1456 break;
1457 } else {
1458 hw_p->stats.rx_frames++;
1459 hw_p->stats.rx += data_len;
1460 ef_ptr = (struct enet_frame *) hw_p->rx[i].
1461 data_ptr;
1462 #ifdef INFO_4XX_ENET
1463 hw_p->stats.pkts_rx++;
1464 #endif
1465 /* AS.HARNOIS
1466 * use ring buffer
1467 */
1468 hw_p->rx_ready[hw_p->rx_i_index] = i;
1469 hw_p->rx_i_index++;
1470 if (NUM_RX_BUFF == hw_p->rx_i_index)
1471 hw_p->rx_i_index = 0;
1472
1473 hw_p->rx_slot++;
1474 if (NUM_RX_BUFF == hw_p->rx_slot)
1475 hw_p->rx_slot = 0;
1476
1477 /* AS.HARNOIS
1478 * free receive buffer only when
1479 * buffer has been handled (eth_rx)
1480 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1481 */
1482 } /* if data_len */
1483 } /* while */
1484 } /* if EMACK_RXCHL */
1485 }
1486
1487
1488 static int ppc_4xx_eth_rx (struct eth_device *dev)
1489 {
1490 int length;
1491 int user_index;
1492 unsigned long msr;
1493 EMAC_4XX_HW_PST hw_p = dev->priv;
1494
1495 hw_p->is_receiving = 1; /* tell driver */
1496
1497 for (;;) {
1498 /* AS.HARNOIS
1499 * use ring buffer and
1500 * get index from rx buffer desciptor queue
1501 */
1502 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1503 if (user_index == -1) {
1504 length = -1;
1505 break; /* nothing received - leave for() loop */
1506 }
1507
1508 msr = mfmsr ();
1509 mtmsr (msr & ~(MSR_EE));
1510
1511 length = hw_p->rx[user_index].data_len;
1512
1513 /* Pass the packet up to the protocol layers. */
1514 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
1515 /* NetReceive(NetRxPackets[i], length); */
1516 NetReceive (NetRxPackets[user_index], length - 4);
1517 /* Free Recv Buffer */
1518 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1519 /* Free rx buffer descriptor queue */
1520 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1521 hw_p->rx_u_index++;
1522 if (NUM_RX_BUFF == hw_p->rx_u_index)
1523 hw_p->rx_u_index = 0;
1524
1525 #ifdef INFO_4XX_ENET
1526 hw_p->stats.pkts_handled++;
1527 #endif
1528
1529 mtmsr (msr); /* Enable IRQ's */
1530 }
1531
1532 hw_p->is_receiving = 0; /* tell driver */
1533
1534 return length;
1535 }
1536
1537 int ppc_4xx_eth_initialize (bd_t * bis)
1538 {
1539 static int virgin = 0;
1540 struct eth_device *dev;
1541 int eth_num = 0;
1542 EMAC_4XX_HW_PST hw = NULL;
1543 u8 ethaddr[4 + CONFIG_EMAC_NR_START][6];
1544 u32 hw_addr[4];
1545
1546 #if defined(CONFIG_440GX)
1547 unsigned long pfc1;
1548
1549 mfsdr (sdr_pfc1, pfc1);
1550 pfc1 &= ~(0x01e00000);
1551 pfc1 |= 0x01200000;
1552 mtsdr (sdr_pfc1, pfc1);
1553 #endif
1554
1555 /* first clear all mac-addresses */
1556 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++)
1557 memcpy(ethaddr[eth_num], "\0\0\0\0\0\0", 6);
1558
1559 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1560 switch (eth_num) {
1561 default: /* fall through */
1562 case 0:
1563 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1564 bis->bi_enetaddr, 6);
1565 hw_addr[eth_num] = 0x0;
1566 break;
1567 #ifdef CONFIG_HAS_ETH1
1568 case 1:
1569 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1570 bis->bi_enet1addr, 6);
1571 hw_addr[eth_num] = 0x100;
1572 break;
1573 #endif
1574 #ifdef CONFIG_HAS_ETH2
1575 case 2:
1576 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1577 bis->bi_enet2addr, 6);
1578 hw_addr[eth_num] = 0x400;
1579 break;
1580 #endif
1581 #ifdef CONFIG_HAS_ETH3
1582 case 3:
1583 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1584 bis->bi_enet3addr, 6);
1585 hw_addr[eth_num] = 0x600;
1586 break;
1587 #endif
1588 }
1589 }
1590
1591 /* set phy num and mode */
1592 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1593 bis->bi_phymode[0] = 0;
1594
1595 #if defined(CONFIG_PHY1_ADDR)
1596 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1597 bis->bi_phymode[1] = 0;
1598 #endif
1599 #if defined(CONFIG_440GX)
1600 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1601 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1602 bis->bi_phymode[2] = 2;
1603 bis->bi_phymode[3] = 2;
1604
1605 ppc_4xx_eth_setup_bridge(0, bis);
1606 #endif
1607
1608 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1609 /*
1610 * See if we can actually bring up the interface,
1611 * otherwise, skip it
1612 */
1613 if (memcmp (ethaddr[eth_num], "\0\0\0\0\0\0", 6) == 0) {
1614 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1615 continue;
1616 }
1617
1618 /* Allocate device structure */
1619 dev = (struct eth_device *) malloc (sizeof (*dev));
1620 if (dev == NULL) {
1621 printf ("ppc_4xx_eth_initialize: "
1622 "Cannot allocate eth_device %d\n", eth_num);
1623 return (-1);
1624 }
1625 memset(dev, 0, sizeof(*dev));
1626
1627 /* Allocate our private use data */
1628 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
1629 if (hw == NULL) {
1630 printf ("ppc_4xx_eth_initialize: "
1631 "Cannot allocate private hw data for eth_device %d",
1632 eth_num);
1633 free (dev);
1634 return (-1);
1635 }
1636 memset(hw, 0, sizeof(*hw));
1637
1638 hw->hw_addr = hw_addr[eth_num];
1639 memcpy (dev->enetaddr, ethaddr[eth_num], 6);
1640 hw->devnum = eth_num;
1641 hw->print_speed = 1;
1642
1643 sprintf (dev->name, "ppc_4xx_eth%d", eth_num - CONFIG_EMAC_NR_START);
1644 dev->priv = (void *) hw;
1645 dev->init = ppc_4xx_eth_init;
1646 dev->halt = ppc_4xx_eth_halt;
1647 dev->send = ppc_4xx_eth_send;
1648 dev->recv = ppc_4xx_eth_rx;
1649
1650 if (0 == virgin) {
1651 /* set the MAL IER ??? names may change with new spec ??? */
1652 #if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
1653 mal_ier =
1654 MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
1655 MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
1656 #else
1657 mal_ier =
1658 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
1659 MAL_IER_OPBE | MAL_IER_PLBE;
1660 #endif
1661 mtdcr (malesr, 0xffffffff); /* clear pending interrupts */
1662 mtdcr (maltxdeir, 0xffffffff); /* clear pending interrupts */
1663 mtdcr (malrxdeir, 0xffffffff); /* clear pending interrupts */
1664 mtdcr (malier, mal_ier);
1665
1666 /* install MAL interrupt handler */
1667 irq_install_handler (VECNUM_MS,
1668 (interrupt_handler_t *) enetInt,
1669 dev);
1670 irq_install_handler (VECNUM_MTE,
1671 (interrupt_handler_t *) enetInt,
1672 dev);
1673 irq_install_handler (VECNUM_MRE,
1674 (interrupt_handler_t *) enetInt,
1675 dev);
1676 irq_install_handler (VECNUM_TXDE,
1677 (interrupt_handler_t *) enetInt,
1678 dev);
1679 irq_install_handler (VECNUM_RXDE,
1680 (interrupt_handler_t *) enetInt,
1681 dev);
1682 virgin = 1;
1683 }
1684
1685 #if defined(CONFIG_NET_MULTI)
1686 eth_register (dev);
1687 #else
1688 emac0_dev = dev;
1689 #endif
1690
1691 #if defined(CONFIG_NET_MULTI)
1692 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
1693 miiphy_register (dev->name,
1694 emac4xx_miiphy_read, emac4xx_miiphy_write);
1695 #endif
1696 #endif
1697 } /* end for each supported device */
1698 return (1);
1699 }
1700
1701 #if !defined(CONFIG_NET_MULTI)
1702 void eth_halt (void) {
1703 if (emac0_dev) {
1704 ppc_4xx_eth_halt(emac0_dev);
1705 free(emac0_dev);
1706 emac0_dev = NULL;
1707 }
1708 }
1709
1710 int eth_init (bd_t *bis)
1711 {
1712 ppc_4xx_eth_initialize(bis);
1713 if (emac0_dev) {
1714 return ppc_4xx_eth_init(emac0_dev, bis);
1715 } else {
1716 printf("ERROR: ethaddr not set!\n");
1717 return -1;
1718 }
1719 }
1720
1721 int eth_send(volatile void *packet, int length)
1722 {
1723 return (ppc_4xx_eth_send(emac0_dev, packet, length));
1724 }
1725
1726 int eth_rx(void)
1727 {
1728 return (ppc_4xx_eth_rx(emac0_dev));
1729 }
1730
1731 int emac4xx_miiphy_initialize (bd_t * bis)
1732 {
1733 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
1734 miiphy_register ("ppc_4xx_eth0",
1735 emac4xx_miiphy_read, emac4xx_miiphy_write);
1736 #endif
1737
1738 return 0;
1739 }
1740 #endif /* !defined(CONFIG_NET_MULTI) */
1741
1742 #endif