]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/4xx_enet.c
net: Fix problem with 405EZ ethernet interrupt
[people/ms/u-boot.git] / drivers / net / 4xx_enet.c
1 /*-----------------------------------------------------------------------------+
2 * This source code is dual-licensed. You may use it under the terms of the
3 * GNU General Public License version 2, or under the license below.
4 *
5 * This source code has been made available to you by IBM on an AS-IS
6 * basis. Anyone receiving this source is licensed under IBM
7 * copyrights to use it in any way he or she deems fit, including
8 * copying it, modifying it, compiling it, and redistributing it either
9 * with or without modifications. No license under IBM patents or
10 * patent applications is to be implied by the copyright license.
11 *
12 * Any user of this software should understand that IBM cannot provide
13 * technical support for this software and will not be responsible for
14 * any consequences resulting from the use of this software.
15 *
16 * Any person who transfers this source code or any derivative work
17 * must include the IBM copyright notice, this paragraph, and the
18 * preceding two paragraphs in the transferred software.
19 *
20 * COPYRIGHT I B M CORPORATION 1995
21 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
22 *-----------------------------------------------------------------------------*/
23 /*-----------------------------------------------------------------------------+
24 *
25 * File Name: enetemac.c
26 *
27 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
28 *
29 * Author: Mark Wisner
30 *
31 * Change Activity-
32 *
33 * Date Description of Change BY
34 * --------- --------------------- ---
35 * 05-May-99 Created MKW
36 * 27-Jun-99 Clean up JWB
37 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
38 * 29-Jul-99 Added Full duplex support MKW
39 * 06-Aug-99 Changed names for Mal CR reg MKW
40 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
41 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
42 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
43 * to avoid chaining maximum sized packets. Push starting
44 * RX descriptor address up to the next cache line boundary.
45 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
46 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
47 * EMAC_RXM register. JWB
48 * 12-Mar-01 anne-sophie.harnois@nextream.fr
49 * - Variables are compatible with those already defined in
50 * include/net.h
51 * - Receive buffer descriptor ring is used to send buffers
52 * to the user
53 * - Info print about send/received/handled packet number if
54 * INFO_405_ENET is set
55 * 17-Apr-01 stefan.roese@esd-electronics.com
56 * - MAL reset in "eth_halt" included
57 * - Enet speed and duplex output now in one line
58 * 08-May-01 stefan.roese@esd-electronics.com
59 * - MAL error handling added (eth_init called again)
60 * 13-Nov-01 stefan.roese@esd-electronics.com
61 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
62 * 04-Jan-02 stefan.roese@esd-electronics.com
63 * - Wait for PHY auto negotiation to complete added
64 * 06-Feb-02 stefan.roese@esd-electronics.com
65 * - Bug fixed in waiting for auto negotiation to complete
66 * 26-Feb-02 stefan.roese@esd-electronics.com
67 * - rx and tx buffer descriptors now allocated (no fixed address
68 * used anymore)
69 * 17-Jun-02 stefan.roese@esd-electronics.com
70 * - MAL error debug printf 'M' removed (rx de interrupt may
71 * occur upon many incoming packets with only 4 rx buffers).
72 *-----------------------------------------------------------------------------*
73 * 17-Nov-03 travis.sawyer@sandburst.com
74 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
75 * in the 440GX. This port should work with the 440GP
76 * (2 EMACs) also
77 * 15-Aug-05 sr@denx.de
78 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
79 now handling all 4xx cpu's.
80 *-----------------------------------------------------------------------------*/
81
82 #include <config.h>
83 #include <common.h>
84 #include <net.h>
85 #include <asm/processor.h>
86 #include <asm/io.h>
87 #include <asm/cache.h>
88 #include <asm/mmu.h>
89 #include <commproc.h>
90 #include <ppc4xx.h>
91 #include <ppc4xx_enet.h>
92 #include <405_mal.h>
93 #include <miiphy.h>
94 #include <malloc.h>
95
96 #if !(defined(CONFIG_MII) || defined(CONFIG_CMD_MII))
97 #error "CONFIG_MII has to be defined!"
98 #endif
99
100 #if defined(CONFIG_NETCONSOLE) && !defined(CONFIG_NET_MULTI)
101 #error "CONFIG_NET_MULTI has to be defined for NetConsole"
102 #endif
103
104 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
105 #define PHY_AUTONEGOTIATE_TIMEOUT 5000 /* 5000 ms autonegotiate timeout */
106
107 /* Ethernet Transmit and Receive Buffers */
108 /* AS.HARNOIS
109 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
110 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
111 */
112 #define ENET_MAX_MTU PKTSIZE
113 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
114
115 /*-----------------------------------------------------------------------------+
116 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
117 * Interrupt Controller).
118 *-----------------------------------------------------------------------------*/
119 #define ETH_IRQ_NUM(dev) (VECNUM_ETH0 + ((dev) * VECNUM_ETH1_OFFS))
120
121 #if defined(CONFIG_HAS_ETH3)
122 #if !defined(CONFIG_440GX)
123 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
124 UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
125 #else
126 /* Unfortunately 440GX spreads EMAC interrupts on multiple UIC's */
127 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
128 #define UIC_ETHxB (UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
129 #endif /* !defined(CONFIG_440GX) */
130 #elif defined(CONFIG_HAS_ETH2)
131 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
132 UIC_MASK(ETH_IRQ_NUM(2)))
133 #elif defined(CONFIG_HAS_ETH1)
134 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
135 #else
136 #define UIC_ETHx UIC_MASK(ETH_IRQ_NUM(0))
137 #endif
138
139 /*
140 * Define a default version for UIC_ETHxB for non 440GX so that we can
141 * use common code for all 4xx variants
142 */
143 #if !defined(UIC_ETHxB)
144 #define UIC_ETHxB 0
145 #endif
146
147 #define UIC_MAL_SERR UIC_MASK(VECNUM_MAL_SERR)
148 #define UIC_MAL_TXDE UIC_MASK(VECNUM_MAL_TXDE)
149 #define UIC_MAL_RXDE UIC_MASK(VECNUM_MAL_RXDE)
150 #define UIC_MAL_TXEOB UIC_MASK(VECNUM_MAL_TXEOB)
151 #define UIC_MAL_RXEOB UIC_MASK(VECNUM_MAL_RXEOB)
152
153 #define MAL_UIC_ERR (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
154 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
155
156 /*
157 * We have 3 different interrupt types:
158 * - MAL interrupts indicating successful transfer
159 * - MAL error interrupts indicating MAL related errors
160 * - EMAC interrupts indicating EMAC related errors
161 *
162 * All those interrupts can be on different UIC's, but since
163 * now at least all interrupts from one type are on the same
164 * UIC. Only exception is 440GX where the EMAC interrupts are
165 * spread over two UIC's!
166 */
167 #if defined(CONFIG_440GX)
168 #define UIC_BASE_MAL UIC1_DCR_BASE
169 #define UIC_BASE_MAL_ERR UIC2_DCR_BASE
170 #define UIC_BASE_EMAC UIC2_DCR_BASE
171 #define UIC_BASE_EMAC_B UIC3_DCR_BASE
172 #else
173 #define UIC_BASE_MAL (UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_TXEOB) * 0x10))
174 #define UIC_BASE_MAL_ERR (UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_SERR) * 0x10))
175 #define UIC_BASE_EMAC (UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
176 #define UIC_BASE_EMAC_B (UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
177 #endif
178
179 #undef INFO_4XX_ENET
180
181 #define BI_PHYMODE_NONE 0
182 #define BI_PHYMODE_ZMII 1
183 #define BI_PHYMODE_RGMII 2
184 #define BI_PHYMODE_GMII 3
185 #define BI_PHYMODE_RTBI 4
186 #define BI_PHYMODE_TBI 5
187 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
188 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
189 defined(CONFIG_405EX)
190 #define BI_PHYMODE_SMII 6
191 #define BI_PHYMODE_MII 7
192 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
193 #define BI_PHYMODE_RMII 8
194 #endif
195 #endif
196 #define BI_PHYMODE_SGMII 9
197
198 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
199 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
200 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
201 defined(CONFIG_405EX)
202 #define SDR0_MFR_ETH_CLK_SEL_V(n) ((0x01<<27) / (n+1))
203 #endif
204
205 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
206 #define SDR0_ETH_CFG_CLK_SEL_V(n) (0x01 << (8 + n))
207 #endif
208
209 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
210 #define MAL_RX_CHAN_MUL 8 /* 460EX/GT uses MAL channel 8 for EMAC1 */
211 #else
212 #define MAL_RX_CHAN_MUL 1
213 #endif
214
215 /*--------------------------------------------------------------------+
216 * Fixed PHY (PHY-less) support for Ethernet Ports.
217 *--------------------------------------------------------------------*/
218
219 /*
220 * Some boards do not have a PHY for each ethernet port. These ports
221 * are known as Fixed PHY (or PHY-less) ports. For such ports, set
222 * the appropriate CONFIG_PHY_ADDR equal to CONFIG_FIXED_PHY and
223 * then define CONFIG_SYS_FIXED_PHY_PORTS to define what the speed and
224 * duplex should be for these ports in the board configuration
225 * file.
226 *
227 * For Example:
228 * #define CONFIG_FIXED_PHY 0xFFFFFFFF
229 *
230 * #define CONFIG_PHY_ADDR CONFIG_FIXED_PHY
231 * #define CONFIG_PHY1_ADDR 1
232 * #define CONFIG_PHY2_ADDR CONFIG_FIXED_PHY
233 * #define CONFIG_PHY3_ADDR 3
234 *
235 * #define CONFIG_SYS_FIXED_PHY_PORT(devnum,speed,duplex) \
236 * {devnum, speed, duplex},
237 *
238 * #define CONFIG_SYS_FIXED_PHY_PORTS \
239 * CONFIG_SYS_FIXED_PHY_PORT(0,1000,FULL) \
240 * CONFIG_SYS_FIXED_PHY_PORT(2,100,HALF)
241 */
242
243 #ifndef CONFIG_FIXED_PHY
244 #define CONFIG_FIXED_PHY 0xFFFFFFFF /* Fixed PHY (PHY-less) */
245 #endif
246
247 #ifndef CONFIG_SYS_FIXED_PHY_PORTS
248 #define CONFIG_SYS_FIXED_PHY_PORTS /* default is an empty array */
249 #endif
250
251 struct fixed_phy_port {
252 unsigned int devnum; /* ethernet port */
253 unsigned int speed; /* specified speed 10,100 or 1000 */
254 unsigned int duplex; /* specified duplex FULL or HALF */
255 };
256
257 static const struct fixed_phy_port fixed_phy_port[] = {
258 CONFIG_SYS_FIXED_PHY_PORTS /* defined in board configuration file */
259 };
260
261 /*-----------------------------------------------------------------------------+
262 * Global variables. TX and RX descriptors and buffers.
263 *-----------------------------------------------------------------------------*/
264
265 /*
266 * Get count of EMAC devices (doesn't have to be the max. possible number
267 * supported by the cpu)
268 *
269 * CONFIG_BOARD_EMAC_COUNT added so now a "dynamic" way to configure the
270 * EMAC count is possible. As it is needed for the Kilauea/Haleakala
271 * 405EX/405EXr eval board, using the same binary.
272 */
273 #if defined(CONFIG_BOARD_EMAC_COUNT)
274 #define LAST_EMAC_NUM board_emac_count()
275 #else /* CONFIG_BOARD_EMAC_COUNT */
276 #if defined(CONFIG_HAS_ETH3)
277 #define LAST_EMAC_NUM 4
278 #elif defined(CONFIG_HAS_ETH2)
279 #define LAST_EMAC_NUM 3
280 #elif defined(CONFIG_HAS_ETH1)
281 #define LAST_EMAC_NUM 2
282 #else
283 #define LAST_EMAC_NUM 1
284 #endif
285 #endif /* CONFIG_BOARD_EMAC_COUNT */
286
287 /* normal boards start with EMAC0 */
288 #if !defined(CONFIG_EMAC_NR_START)
289 #define CONFIG_EMAC_NR_START 0
290 #endif
291
292 #define MAL_RX_DESC_SIZE 2048
293 #define MAL_TX_DESC_SIZE 2048
294 #define MAL_ALLOC_SIZE (MAL_TX_DESC_SIZE + MAL_RX_DESC_SIZE)
295
296 /*-----------------------------------------------------------------------------+
297 * Prototypes and externals.
298 *-----------------------------------------------------------------------------*/
299 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
300
301 int enetInt (struct eth_device *dev);
302 static void mal_err (struct eth_device *dev, unsigned long isr,
303 unsigned long uic, unsigned long maldef,
304 unsigned long mal_errr);
305 static void emac_err (struct eth_device *dev, unsigned long isr);
306
307 extern int phy_setup_aneg (char *devname, unsigned char addr);
308 extern int emac4xx_miiphy_read (char *devname, unsigned char addr,
309 unsigned char reg, unsigned short *value);
310 extern int emac4xx_miiphy_write (char *devname, unsigned char addr,
311 unsigned char reg, unsigned short value);
312
313 int board_emac_count(void);
314
315 static void emac_loopback_enable(EMAC_4XX_HW_PST hw_p)
316 {
317 #if defined(CONFIG_440SPE) || \
318 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
319 defined(CONFIG_405EX)
320 u32 val;
321
322 mfsdr(SDR0_MFR, val);
323 val |= SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
324 mtsdr(SDR0_MFR, val);
325 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
326 u32 val;
327
328 mfsdr(SDR0_ETH_CFG, val);
329 val |= SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
330 mtsdr(SDR0_ETH_CFG, val);
331 #endif
332 }
333
334 static void emac_loopback_disable(EMAC_4XX_HW_PST hw_p)
335 {
336 #if defined(CONFIG_440SPE) || \
337 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
338 defined(CONFIG_405EX)
339 u32 val;
340
341 mfsdr(SDR0_MFR, val);
342 val &= ~SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
343 mtsdr(SDR0_MFR, val);
344 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
345 u32 val;
346
347 mfsdr(SDR0_ETH_CFG, val);
348 val &= ~SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
349 mtsdr(SDR0_ETH_CFG, val);
350 #endif
351 }
352
353 /*-----------------------------------------------------------------------------+
354 | ppc_4xx_eth_halt
355 | Disable MAL channel, and EMACn
356 +-----------------------------------------------------------------------------*/
357 static void ppc_4xx_eth_halt (struct eth_device *dev)
358 {
359 EMAC_4XX_HW_PST hw_p = dev->priv;
360 u32 val = 10000;
361
362 out_be32((void *)EMAC_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
363
364 /* 1st reset MAL channel */
365 /* Note: writing a 0 to a channel has no effect */
366 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
367 mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
368 #else
369 mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> hw_p->devnum));
370 #endif
371 mtdcr (MAL0_RXCARR, (MAL_CR_MMSR >> hw_p->devnum));
372
373 /* wait for reset */
374 while (mfdcr (MAL0_RXCASR) & (MAL_CR_MMSR >> hw_p->devnum)) {
375 udelay (1000); /* Delay 1 MS so as not to hammer the register */
376 val--;
377 if (val == 0)
378 break;
379 }
380
381 /* provide clocks for EMAC internal loopback */
382 emac_loopback_enable(hw_p);
383
384 /* EMAC RESET */
385 out_be32((void *)EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
386
387 /* remove clocks for EMAC internal loopback */
388 emac_loopback_disable(hw_p);
389
390 #ifndef CONFIG_NETCONSOLE
391 hw_p->print_speed = 1; /* print speed message again next time */
392 #endif
393
394 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
395 /* don't bypass the TAHOE0/TAHOE1 cores for Linux */
396 mfsdr(SDR0_ETH_CFG, val);
397 val &= ~(SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
398 mtsdr(SDR0_ETH_CFG, val);
399 #endif
400
401 return;
402 }
403
404 #if defined (CONFIG_440GX)
405 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
406 {
407 unsigned long pfc1;
408 unsigned long zmiifer;
409 unsigned long rmiifer;
410
411 mfsdr(SDR0_PFC1, pfc1);
412 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
413
414 zmiifer = 0;
415 rmiifer = 0;
416
417 switch (pfc1) {
418 case 1:
419 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
420 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
421 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
422 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
423 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
424 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
425 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
426 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
427 break;
428 case 2:
429 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
430 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
431 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
432 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
433 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
434 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
435 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
436 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
437 break;
438 case 3:
439 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
440 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
441 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
442 bis->bi_phymode[1] = BI_PHYMODE_NONE;
443 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
444 bis->bi_phymode[3] = BI_PHYMODE_NONE;
445 break;
446 case 4:
447 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
448 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
449 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
450 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
451 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
452 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
453 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
454 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
455 break;
456 case 5:
457 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
458 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
459 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
460 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
461 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
462 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
463 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
464 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
465 break;
466 case 6:
467 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
468 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
469 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
470 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
471 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
472 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
473 break;
474 case 0:
475 default:
476 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
477 rmiifer = 0x0;
478 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
479 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
480 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
481 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
482 break;
483 }
484
485 /* Ensure we setup mdio for this devnum and ONLY this devnum */
486 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
487
488 out_be32((void *)ZMII_FER, zmiifer);
489 out_be32((void *)RGMII_FER, rmiifer);
490
491 return ((int)pfc1);
492 }
493 #endif /* CONFIG_440_GX */
494
495 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
496 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
497 {
498 unsigned long zmiifer=0x0;
499 unsigned long pfc1;
500
501 mfsdr(SDR0_PFC1, pfc1);
502 pfc1 &= SDR0_PFC1_SELECT_MASK;
503
504 switch (pfc1) {
505 case SDR0_PFC1_SELECT_CONFIG_2:
506 /* 1 x GMII port */
507 out_be32((void *)ZMII_FER, 0x00);
508 out_be32((void *)RGMII_FER, 0x00000037);
509 bis->bi_phymode[0] = BI_PHYMODE_GMII;
510 bis->bi_phymode[1] = BI_PHYMODE_NONE;
511 break;
512 case SDR0_PFC1_SELECT_CONFIG_4:
513 /* 2 x RGMII ports */
514 out_be32((void *)ZMII_FER, 0x00);
515 out_be32((void *)RGMII_FER, 0x00000055);
516 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
517 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
518 break;
519 case SDR0_PFC1_SELECT_CONFIG_6:
520 /* 2 x SMII ports */
521 out_be32((void *)ZMII_FER,
522 ((ZMII_FER_SMII) << ZMII_FER_V(0)) |
523 ((ZMII_FER_SMII) << ZMII_FER_V(1)));
524 out_be32((void *)RGMII_FER, 0x00000000);
525 bis->bi_phymode[0] = BI_PHYMODE_SMII;
526 bis->bi_phymode[1] = BI_PHYMODE_SMII;
527 break;
528 case SDR0_PFC1_SELECT_CONFIG_1_2:
529 /* only 1 x MII supported */
530 out_be32((void *)ZMII_FER, (ZMII_FER_MII) << ZMII_FER_V(0));
531 out_be32((void *)RGMII_FER, 0x00000000);
532 bis->bi_phymode[0] = BI_PHYMODE_MII;
533 bis->bi_phymode[1] = BI_PHYMODE_NONE;
534 break;
535 default:
536 break;
537 }
538
539 /* Ensure we setup mdio for this devnum and ONLY this devnum */
540 zmiifer = in_be32((void *)ZMII_FER);
541 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
542 out_be32((void *)ZMII_FER, zmiifer);
543
544 return ((int)0x0);
545 }
546 #endif /* CONFIG_440EPX */
547
548 #if defined(CONFIG_405EX)
549 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
550 {
551 u32 rgmiifer = 0;
552
553 /*
554 * The 405EX(r)'s RGMII bridge can operate in one of several
555 * modes, only one of which (2 x RGMII) allows the
556 * simultaneous use of both EMACs on the 405EX.
557 */
558
559 switch (CONFIG_EMAC_PHY_MODE) {
560
561 case EMAC_PHY_MODE_NONE:
562 /* No ports */
563 rgmiifer |= RGMII_FER_DIS << 0;
564 rgmiifer |= RGMII_FER_DIS << 4;
565 out_be32((void *)RGMII_FER, rgmiifer);
566 bis->bi_phymode[0] = BI_PHYMODE_NONE;
567 bis->bi_phymode[1] = BI_PHYMODE_NONE;
568 break;
569 case EMAC_PHY_MODE_NONE_RGMII:
570 /* 1 x RGMII port on channel 0 */
571 rgmiifer |= RGMII_FER_RGMII << 0;
572 rgmiifer |= RGMII_FER_DIS << 4;
573 out_be32((void *)RGMII_FER, rgmiifer);
574 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
575 bis->bi_phymode[1] = BI_PHYMODE_NONE;
576 break;
577 case EMAC_PHY_MODE_RGMII_NONE:
578 /* 1 x RGMII port on channel 1 */
579 rgmiifer |= RGMII_FER_DIS << 0;
580 rgmiifer |= RGMII_FER_RGMII << 4;
581 out_be32((void *)RGMII_FER, rgmiifer);
582 bis->bi_phymode[0] = BI_PHYMODE_NONE;
583 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
584 break;
585 case EMAC_PHY_MODE_RGMII_RGMII:
586 /* 2 x RGMII ports */
587 rgmiifer |= RGMII_FER_RGMII << 0;
588 rgmiifer |= RGMII_FER_RGMII << 4;
589 out_be32((void *)RGMII_FER, rgmiifer);
590 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
591 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
592 break;
593 case EMAC_PHY_MODE_NONE_GMII:
594 /* 1 x GMII port on channel 0 */
595 rgmiifer |= RGMII_FER_GMII << 0;
596 rgmiifer |= RGMII_FER_DIS << 4;
597 out_be32((void *)RGMII_FER, rgmiifer);
598 bis->bi_phymode[0] = BI_PHYMODE_GMII;
599 bis->bi_phymode[1] = BI_PHYMODE_NONE;
600 break;
601 case EMAC_PHY_MODE_NONE_MII:
602 /* 1 x MII port on channel 0 */
603 rgmiifer |= RGMII_FER_MII << 0;
604 rgmiifer |= RGMII_FER_DIS << 4;
605 out_be32((void *)RGMII_FER, rgmiifer);
606 bis->bi_phymode[0] = BI_PHYMODE_MII;
607 bis->bi_phymode[1] = BI_PHYMODE_NONE;
608 break;
609 case EMAC_PHY_MODE_GMII_NONE:
610 /* 1 x GMII port on channel 1 */
611 rgmiifer |= RGMII_FER_DIS << 0;
612 rgmiifer |= RGMII_FER_GMII << 4;
613 out_be32((void *)RGMII_FER, rgmiifer);
614 bis->bi_phymode[0] = BI_PHYMODE_NONE;
615 bis->bi_phymode[1] = BI_PHYMODE_GMII;
616 break;
617 case EMAC_PHY_MODE_MII_NONE:
618 /* 1 x MII port on channel 1 */
619 rgmiifer |= RGMII_FER_DIS << 0;
620 rgmiifer |= RGMII_FER_MII << 4;
621 out_be32((void *)RGMII_FER, rgmiifer);
622 bis->bi_phymode[0] = BI_PHYMODE_NONE;
623 bis->bi_phymode[1] = BI_PHYMODE_MII;
624 break;
625 default:
626 break;
627 }
628
629 /* Ensure we setup mdio for this devnum and ONLY this devnum */
630 rgmiifer = in_be32((void *)RGMII_FER);
631 rgmiifer |= (1 << (19-devnum));
632 out_be32((void *)RGMII_FER, rgmiifer);
633
634 return ((int)0x0);
635 }
636 #endif /* CONFIG_405EX */
637
638 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
639 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
640 {
641 u32 eth_cfg;
642 u32 zmiifer; /* ZMII0_FER reg. */
643 u32 rmiifer; /* RGMII0_FER reg. Bridge 0 */
644 u32 rmiifer1; /* RGMII0_FER reg. Bridge 1 */
645 int mode;
646
647 zmiifer = 0;
648 rmiifer = 0;
649 rmiifer1 = 0;
650
651 #if defined(CONFIG_460EX)
652 mode = 9;
653 mfsdr(SDR0_ETH_CFG, eth_cfg);
654 if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
655 ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0))
656 mode = 11; /* config SGMII */
657 #else
658 mode = 10;
659 mfsdr(SDR0_ETH_CFG, eth_cfg);
660 if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
661 ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0) &&
662 ((eth_cfg & SDR0_ETH_CFG_SGMII2_ENABLE) > 0))
663 mode = 12; /* config SGMII */
664 #endif
665
666 /* TODO:
667 * NOTE: 460GT has 2 RGMII bridge cores:
668 * emac0 ------ RGMII0_BASE
669 * |
670 * emac1 -----+
671 *
672 * emac2 ------ RGMII1_BASE
673 * |
674 * emac3 -----+
675 *
676 * 460EX has 1 RGMII bridge core:
677 * and RGMII1_BASE is disabled
678 * emac0 ------ RGMII0_BASE
679 * |
680 * emac1 -----+
681 */
682
683 /*
684 * Right now only 2*RGMII is supported. Please extend when needed.
685 * sr - 2008-02-19
686 * Add SGMII support.
687 * vg - 2008-07-28
688 */
689 switch (mode) {
690 case 1:
691 /* 1 MII - 460EX */
692 /* GMC0 EMAC4_0, ZMII Bridge */
693 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
694 bis->bi_phymode[0] = BI_PHYMODE_MII;
695 bis->bi_phymode[1] = BI_PHYMODE_NONE;
696 bis->bi_phymode[2] = BI_PHYMODE_NONE;
697 bis->bi_phymode[3] = BI_PHYMODE_NONE;
698 break;
699 case 2:
700 /* 2 MII - 460GT */
701 /* GMC0 EMAC4_0, GMC1 EMAC4_2, ZMII Bridge */
702 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
703 zmiifer |= ZMII_FER_MII << ZMII_FER_V(2);
704 bis->bi_phymode[0] = BI_PHYMODE_MII;
705 bis->bi_phymode[1] = BI_PHYMODE_NONE;
706 bis->bi_phymode[2] = BI_PHYMODE_MII;
707 bis->bi_phymode[3] = BI_PHYMODE_NONE;
708 break;
709 case 3:
710 /* 2 RMII - 460EX */
711 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
712 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
713 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
714 bis->bi_phymode[0] = BI_PHYMODE_RMII;
715 bis->bi_phymode[1] = BI_PHYMODE_RMII;
716 bis->bi_phymode[2] = BI_PHYMODE_NONE;
717 bis->bi_phymode[3] = BI_PHYMODE_NONE;
718 break;
719 case 4:
720 /* 4 RMII - 460GT */
721 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC1 EMAC4_2, GMC1, EMAC4_3 */
722 /* ZMII Bridge */
723 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
724 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
725 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
726 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
727 bis->bi_phymode[0] = BI_PHYMODE_RMII;
728 bis->bi_phymode[1] = BI_PHYMODE_RMII;
729 bis->bi_phymode[2] = BI_PHYMODE_RMII;
730 bis->bi_phymode[3] = BI_PHYMODE_RMII;
731 break;
732 case 5:
733 /* 2 SMII - 460EX */
734 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
735 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
736 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
737 bis->bi_phymode[0] = BI_PHYMODE_SMII;
738 bis->bi_phymode[1] = BI_PHYMODE_SMII;
739 bis->bi_phymode[2] = BI_PHYMODE_NONE;
740 bis->bi_phymode[3] = BI_PHYMODE_NONE;
741 break;
742 case 6:
743 /* 4 SMII - 460GT */
744 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC0 EMAC4_3, GMC0 EMAC4_3 */
745 /* ZMII Bridge */
746 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
747 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
748 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
749 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
750 bis->bi_phymode[0] = BI_PHYMODE_SMII;
751 bis->bi_phymode[1] = BI_PHYMODE_SMII;
752 bis->bi_phymode[2] = BI_PHYMODE_SMII;
753 bis->bi_phymode[3] = BI_PHYMODE_SMII;
754 break;
755 case 7:
756 /* This is the default mode that we want for board bringup - Maple */
757 /* 1 GMII - 460EX */
758 /* GMC0 EMAC4_0, RGMII Bridge 0 */
759 rmiifer |= RGMII_FER_MDIO(0);
760
761 if (devnum == 0) {
762 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
763 bis->bi_phymode[0] = BI_PHYMODE_GMII;
764 bis->bi_phymode[1] = BI_PHYMODE_NONE;
765 bis->bi_phymode[2] = BI_PHYMODE_NONE;
766 bis->bi_phymode[3] = BI_PHYMODE_NONE;
767 } else {
768 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(3); /* CH1CFG - EMAC1 */
769 bis->bi_phymode[0] = BI_PHYMODE_NONE;
770 bis->bi_phymode[1] = BI_PHYMODE_GMII;
771 bis->bi_phymode[2] = BI_PHYMODE_NONE;
772 bis->bi_phymode[3] = BI_PHYMODE_NONE;
773 }
774 break;
775 case 8:
776 /* 2 GMII - 460GT */
777 /* GMC0 EMAC4_0, RGMII Bridge 0 */
778 /* GMC1 EMAC4_2, RGMII Bridge 1 */
779 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
780 rmiifer1 |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC2 */
781 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
782 rmiifer1 |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC2 */
783
784 bis->bi_phymode[0] = BI_PHYMODE_GMII;
785 bis->bi_phymode[1] = BI_PHYMODE_NONE;
786 bis->bi_phymode[2] = BI_PHYMODE_GMII;
787 bis->bi_phymode[3] = BI_PHYMODE_NONE;
788 break;
789 case 9:
790 /* 2 RGMII - 460EX */
791 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
792 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
793 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
794 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
795
796 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
797 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
798 bis->bi_phymode[2] = BI_PHYMODE_NONE;
799 bis->bi_phymode[3] = BI_PHYMODE_NONE;
800 break;
801 case 10:
802 /* 4 RGMII - 460GT */
803 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
804 /* GMC1 EMAC4_2, GMC1 EMAC4_3, RGMII Bridge 1 */
805 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
806 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
807 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(2);
808 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(3);
809 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
810 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
811 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
812 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
813 break;
814 case 11:
815 /* 2 SGMII - 460EX */
816 bis->bi_phymode[0] = BI_PHYMODE_SGMII;
817 bis->bi_phymode[1] = BI_PHYMODE_SGMII;
818 bis->bi_phymode[2] = BI_PHYMODE_NONE;
819 bis->bi_phymode[3] = BI_PHYMODE_NONE;
820 break;
821 case 12:
822 /* 3 SGMII - 460GT */
823 bis->bi_phymode[0] = BI_PHYMODE_SGMII;
824 bis->bi_phymode[1] = BI_PHYMODE_SGMII;
825 bis->bi_phymode[2] = BI_PHYMODE_SGMII;
826 bis->bi_phymode[3] = BI_PHYMODE_NONE;
827 break;
828 default:
829 break;
830 }
831
832 /* Set EMAC for MDIO */
833 mfsdr(SDR0_ETH_CFG, eth_cfg);
834 eth_cfg |= SDR0_ETH_CFG_MDIO_SEL_EMAC0;
835 mtsdr(SDR0_ETH_CFG, eth_cfg);
836
837 out_be32((void *)RGMII_FER, rmiifer);
838 #if defined(CONFIG_460GT)
839 out_be32((void *)RGMII_FER + RGMII1_BASE_OFFSET, rmiifer1);
840 #endif
841
842 /* bypass the TAHOE0/TAHOE1 cores for U-Boot */
843 mfsdr(SDR0_ETH_CFG, eth_cfg);
844 eth_cfg |= (SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
845 mtsdr(SDR0_ETH_CFG, eth_cfg);
846
847 return 0;
848 }
849 #endif /* CONFIG_460EX || CONFIG_460GT */
850
851 static inline void *malloc_aligned(u32 size, u32 align)
852 {
853 return (void *)(((u32)malloc(size + align) + align - 1) &
854 ~(align - 1));
855 }
856
857 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
858 {
859 int i;
860 unsigned long reg = 0;
861 unsigned long msr;
862 unsigned long speed;
863 unsigned long duplex;
864 unsigned long failsafe;
865 unsigned mode_reg;
866 unsigned short devnum;
867 unsigned short reg_short;
868 #if defined(CONFIG_440GX) || \
869 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
870 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
871 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
872 defined(CONFIG_405EX)
873 u32 opbfreq;
874 sys_info_t sysinfo;
875 #if defined(CONFIG_440GX) || \
876 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
877 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
878 defined(CONFIG_405EX)
879 int ethgroup = -1;
880 #endif
881 #endif
882 u32 bd_cached;
883 u32 bd_uncached = 0;
884 #ifdef CONFIG_4xx_DCACHE
885 static u32 last_used_ea = 0;
886 #endif
887 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
888 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
889 defined(CONFIG_405EX)
890 int rgmii_channel;
891 #endif
892
893 EMAC_4XX_HW_PST hw_p = dev->priv;
894
895 /* before doing anything, figure out if we have a MAC address */
896 /* if not, bail */
897 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
898 printf("ERROR: ethaddr not set!\n");
899 return -1;
900 }
901
902 #if defined(CONFIG_440GX) || \
903 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
904 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
905 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
906 defined(CONFIG_405EX)
907 /* Need to get the OPB frequency so we can access the PHY */
908 get_sys_info (&sysinfo);
909 #endif
910
911 msr = mfmsr ();
912 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
913
914 devnum = hw_p->devnum;
915
916 #ifdef INFO_4XX_ENET
917 /* AS.HARNOIS
918 * We should have :
919 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
920 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
921 * is possible that new packets (without relationship with
922 * current transfer) have got the time to arrived before
923 * netloop calls eth_halt
924 */
925 printf ("About preceeding transfer (eth%d):\n"
926 "- Sent packet number %d\n"
927 "- Received packet number %d\n"
928 "- Handled packet number %d\n",
929 hw_p->devnum,
930 hw_p->stats.pkts_tx,
931 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
932
933 hw_p->stats.pkts_tx = 0;
934 hw_p->stats.pkts_rx = 0;
935 hw_p->stats.pkts_handled = 0;
936 hw_p->print_speed = 1; /* print speed message again next time */
937 #endif
938
939 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
940 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
941
942 hw_p->rx_slot = 0; /* MAL Receive Slot */
943 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
944 hw_p->rx_u_index = 0; /* Receive User Queue Index */
945
946 hw_p->tx_slot = 0; /* MAL Transmit Slot */
947 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
948 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
949
950 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
951 /* set RMII mode */
952 /* NOTE: 440GX spec states that mode is mutually exclusive */
953 /* NOTE: Therefore, disable all other EMACS, since we handle */
954 /* NOTE: only one emac at a time */
955 reg = 0;
956 out_be32((void *)ZMII_FER, 0);
957 udelay (100);
958
959 #if defined(CONFIG_440GP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
960 out_be32((void *)ZMII_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
961 #elif defined(CONFIG_440GX) || \
962 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
963 defined(CONFIG_460EX) || defined(CONFIG_460GT)
964 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
965 #endif
966
967 out_be32((void *)ZMII_SSR, ZMII_SSR_SP << ZMII_SSR_V(devnum));
968 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
969 #if defined(CONFIG_405EX)
970 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
971 #endif
972
973 sync();
974
975 /* provide clocks for EMAC internal loopback */
976 emac_loopback_enable(hw_p);
977
978 /* EMAC RESET */
979 out_be32((void *)EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
980
981 /* remove clocks for EMAC internal loopback */
982 emac_loopback_disable(hw_p);
983
984 failsafe = 1000;
985 while ((in_be32((void *)EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) {
986 udelay (1000);
987 failsafe--;
988 }
989 if (failsafe <= 0)
990 printf("\nProblem resetting EMAC!\n");
991
992 #if defined(CONFIG_440GX) || \
993 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
994 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
995 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
996 defined(CONFIG_405EX)
997 /* Whack the M1 register */
998 mode_reg = 0x0;
999 mode_reg &= ~0x00000038;
1000 opbfreq = sysinfo.freqOPB / 1000000;
1001 if (opbfreq <= 50);
1002 else if (opbfreq <= 66)
1003 mode_reg |= EMAC_M1_OBCI_66;
1004 else if (opbfreq <= 83)
1005 mode_reg |= EMAC_M1_OBCI_83;
1006 else if (opbfreq <= 100)
1007 mode_reg |= EMAC_M1_OBCI_100;
1008 else
1009 mode_reg |= EMAC_M1_OBCI_GT100;
1010
1011 out_be32((void *)EMAC_M1 + hw_p->hw_addr, mode_reg);
1012 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
1013
1014 #if defined(CONFIG_GPCS_PHY_ADDR) || defined(CONFIG_GPCS_PHY1_ADDR) || \
1015 defined(CONFIG_GPCS_PHY2_ADDR) || defined(CONFIG_GPCS_PHY3_ADDR)
1016 if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
1017 /*
1018 * In SGMII mode, GPCS access is needed for
1019 * communication with the internal SGMII SerDes.
1020 */
1021 switch (devnum) {
1022 #if defined(CONFIG_GPCS_PHY_ADDR)
1023 case 0:
1024 reg = CONFIG_GPCS_PHY_ADDR;
1025 break;
1026 #endif
1027 #if defined(CONFIG_GPCS_PHY1_ADDR)
1028 case 1:
1029 reg = CONFIG_GPCS_PHY1_ADDR;
1030 break;
1031 #endif
1032 #if defined(CONFIG_GPCS_PHY2_ADDR)
1033 case 2:
1034 reg = CONFIG_GPCS_PHY2_ADDR;
1035 break;
1036 #endif
1037 #if defined(CONFIG_GPCS_PHY3_ADDR)
1038 case 3:
1039 reg = CONFIG_GPCS_PHY3_ADDR;
1040 break;
1041 #endif
1042 }
1043
1044 mode_reg = in_be32((void *)EMAC_M1 + hw_p->hw_addr);
1045 mode_reg |= EMAC_M1_MF_1000GPCS | EMAC_M1_IPPA_SET(reg);
1046 out_be32((void *)EMAC_M1 + hw_p->hw_addr, mode_reg);
1047
1048 /* Configure GPCS interface to recommended setting for SGMII */
1049 miiphy_reset(dev->name, reg);
1050 miiphy_write(dev->name, reg, 0x04, 0x8120); /* AsymPause, FDX */
1051 miiphy_write(dev->name, reg, 0x07, 0x2801); /* msg_pg, toggle */
1052 miiphy_write(dev->name, reg, 0x00, 0x0140); /* 1Gbps, FDX */
1053 }
1054 #endif /* defined(CONFIG_GPCS_PHY_ADDR) */
1055
1056 /* wait for PHY to complete auto negotiation */
1057 reg_short = 0;
1058 switch (devnum) {
1059 case 0:
1060 reg = CONFIG_PHY_ADDR;
1061 break;
1062 #if defined (CONFIG_PHY1_ADDR)
1063 case 1:
1064 reg = CONFIG_PHY1_ADDR;
1065 break;
1066 #endif
1067 #if defined (CONFIG_PHY2_ADDR)
1068 case 2:
1069 reg = CONFIG_PHY2_ADDR;
1070 break;
1071 #endif
1072 #if defined (CONFIG_PHY3_ADDR)
1073 case 3:
1074 reg = CONFIG_PHY3_ADDR;
1075 break;
1076 #endif
1077 default:
1078 reg = CONFIG_PHY_ADDR;
1079 break;
1080 }
1081
1082 bis->bi_phynum[devnum] = reg;
1083
1084 if (reg == CONFIG_FIXED_PHY)
1085 goto get_speed;
1086
1087 #if defined(CONFIG_PHY_RESET)
1088 /*
1089 * Reset the phy, only if its the first time through
1090 * otherwise, just check the speeds & feeds
1091 */
1092 if (hw_p->first_init == 0) {
1093 #if defined(CONFIG_M88E1111_PHY)
1094 miiphy_write (dev->name, reg, 0x14, 0x0ce3);
1095 miiphy_write (dev->name, reg, 0x18, 0x4101);
1096 miiphy_write (dev->name, reg, 0x09, 0x0e00);
1097 miiphy_write (dev->name, reg, 0x04, 0x01e1);
1098 #endif
1099 #if defined(CONFIG_M88E1112_PHY)
1100 if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
1101 /*
1102 * Marvell 88E1112 PHY needs to have the SGMII MAC
1103 * interace (page 2) properly configured to
1104 * communicate with the 460EX/GT GPCS interface.
1105 */
1106
1107 /* Set access to Page 2 */
1108 miiphy_write(dev->name, reg, 0x16, 0x0002);
1109
1110 miiphy_write(dev->name, reg, 0x00, 0x0040); /* 1Gbps */
1111 miiphy_read(dev->name, reg, 0x1a, &reg_short);
1112 reg_short |= 0x8000; /* bypass Auto-Negotiation */
1113 miiphy_write(dev->name, reg, 0x1a, reg_short);
1114 miiphy_reset(dev->name, reg); /* reset MAC interface */
1115
1116 /* Reset access to Page 0 */
1117 miiphy_write(dev->name, reg, 0x16, 0x0000);
1118 }
1119 #endif /* defined(CONFIG_M88E1112_PHY) */
1120 miiphy_reset (dev->name, reg);
1121
1122 #if defined(CONFIG_440GX) || \
1123 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1124 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1125 defined(CONFIG_405EX)
1126
1127 #if defined(CONFIG_CIS8201_PHY)
1128 /*
1129 * Cicada 8201 PHY needs to have an extended register whacked
1130 * for RGMII mode.
1131 */
1132 if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {
1133 #if defined(CONFIG_CIS8201_SHORT_ETCH)
1134 miiphy_write (dev->name, reg, 23, 0x1300);
1135 #else
1136 miiphy_write (dev->name, reg, 23, 0x1000);
1137 #endif
1138 /*
1139 * Vitesse VSC8201/Cicada CIS8201 errata:
1140 * Interoperability problem with Intel 82547EI phys
1141 * This work around (provided by Vitesse) changes
1142 * the default timer convergence from 8ms to 12ms
1143 */
1144 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1145 miiphy_write (dev->name, reg, 0x08, 0x0200);
1146 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
1147 miiphy_write (dev->name, reg, 0x02, 0x0004);
1148 miiphy_write (dev->name, reg, 0x01, 0x0671);
1149 miiphy_write (dev->name, reg, 0x00, 0x8fae);
1150 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1151 miiphy_write (dev->name, reg, 0x08, 0x0000);
1152 miiphy_write (dev->name, reg, 0x1f, 0x0000);
1153 /* end Vitesse/Cicada errata */
1154 }
1155 #endif /* defined(CONFIG_CIS8201_PHY) */
1156
1157 #if defined(CONFIG_ET1011C_PHY)
1158 /*
1159 * Agere ET1011c PHY needs to have an extended register whacked
1160 * for RGMII mode.
1161 */
1162 if (((devnum == 2) || (devnum ==3)) && (4 == ethgroup)) {
1163 miiphy_read (dev->name, reg, 0x16, &reg_short);
1164 reg_short &= ~(0x7);
1165 reg_short |= 0x6; /* RGMII DLL Delay*/
1166 miiphy_write (dev->name, reg, 0x16, reg_short);
1167
1168 miiphy_read (dev->name, reg, 0x17, &reg_short);
1169 reg_short &= ~(0x40);
1170 miiphy_write (dev->name, reg, 0x17, reg_short);
1171
1172 miiphy_write(dev->name, reg, 0x1c, 0x74f0);
1173 }
1174 #endif /* defined(CONFIG_ET1011C_PHY) */
1175
1176 #endif /* defined(CONFIG_440GX) ... */
1177 /* Start/Restart autonegotiation */
1178 phy_setup_aneg (dev->name, reg);
1179 udelay (1000);
1180 }
1181 #endif /* defined(CONFIG_PHY_RESET) */
1182
1183 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
1184
1185 /*
1186 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
1187 */
1188 if ((reg_short & PHY_BMSR_AUTN_ABLE)
1189 && !(reg_short & PHY_BMSR_AUTN_COMP)) {
1190 puts ("Waiting for PHY auto negotiation to complete");
1191 i = 0;
1192 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
1193 /*
1194 * Timeout reached ?
1195 */
1196 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
1197 puts (" TIMEOUT !\n");
1198 break;
1199 }
1200
1201 if ((i++ % 1000) == 0) {
1202 putc ('.');
1203 }
1204 udelay (1000); /* 1 ms */
1205 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
1206 }
1207 puts (" done\n");
1208 udelay (500000); /* another 500 ms (results in faster booting) */
1209 }
1210
1211 get_speed:
1212 if (reg == CONFIG_FIXED_PHY) {
1213 for (i = 0; i < ARRAY_SIZE(fixed_phy_port); i++) {
1214 if (devnum == fixed_phy_port[i].devnum) {
1215 speed = fixed_phy_port[i].speed;
1216 duplex = fixed_phy_port[i].duplex;
1217 break;
1218 }
1219 }
1220
1221 if (i == ARRAY_SIZE(fixed_phy_port)) {
1222 printf("ERROR: PHY (%s) not configured correctly!\n",
1223 dev->name);
1224 return -1;
1225 }
1226 } else {
1227 speed = miiphy_speed(dev->name, reg);
1228 duplex = miiphy_duplex(dev->name, reg);
1229 }
1230
1231 if (hw_p->print_speed) {
1232 hw_p->print_speed = 0;
1233 printf ("ENET Speed is %d Mbps - %s duplex connection (EMAC%d)\n",
1234 (int) speed, (duplex == HALF) ? "HALF" : "FULL",
1235 hw_p->devnum);
1236 }
1237
1238 #if defined(CONFIG_440) && \
1239 !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \
1240 !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX) && \
1241 !defined(CONFIG_460EX) && !defined(CONFIG_460GT)
1242 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
1243 mfsdr(SDR0_MFR, reg);
1244 if (speed == 100) {
1245 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
1246 } else {
1247 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
1248 }
1249 mtsdr(SDR0_MFR, reg);
1250 #endif
1251
1252 /* Set ZMII/RGMII speed according to the phy link speed */
1253 reg = in_be32((void *)ZMII_SSR);
1254 if ( (speed == 100) || (speed == 1000) )
1255 out_be32((void *)ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));
1256 else
1257 out_be32((void *)ZMII_SSR, reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));
1258
1259 if ((devnum == 2) || (devnum == 3)) {
1260 if (speed == 1000)
1261 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
1262 else if (speed == 100)
1263 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
1264 else if (speed == 10)
1265 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
1266 else {
1267 printf("Error in RGMII Speed\n");
1268 return -1;
1269 }
1270 out_be32((void *)RGMII_SSR, reg);
1271 }
1272 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
1273
1274 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1275 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1276 defined(CONFIG_405EX)
1277 if (devnum >= 2)
1278 rgmii_channel = devnum - 2;
1279 else
1280 rgmii_channel = devnum;
1281
1282 if (speed == 1000)
1283 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V(rgmii_channel));
1284 else if (speed == 100)
1285 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V(rgmii_channel));
1286 else if (speed == 10)
1287 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V(rgmii_channel));
1288 else {
1289 printf("Error in RGMII Speed\n");
1290 return -1;
1291 }
1292 out_be32((void *)RGMII_SSR, reg);
1293 #if defined(CONFIG_460GT)
1294 if ((devnum == 2) || (devnum == 3))
1295 out_be32((void *)RGMII_SSR + RGMII1_BASE_OFFSET, reg);
1296 #endif
1297 #endif
1298
1299 /* set the Mal configuration reg */
1300 #if defined(CONFIG_440GX) || \
1301 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1302 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
1303 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1304 defined(CONFIG_405EX)
1305 mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
1306 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
1307 #else
1308 mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
1309 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
1310 if (get_pvr() == PVR_440GP_RB) {
1311 mtdcr (MAL0_CFG, mfdcr(MAL0_CFG) & ~MAL_CR_PLBB);
1312 }
1313 #endif
1314
1315 /*
1316 * Malloc MAL buffer desciptors, make sure they are
1317 * aligned on cache line boundary size
1318 * (401/403/IOP480 = 16, 405 = 32)
1319 * and doesn't cross cache block boundaries.
1320 */
1321 if (hw_p->first_init == 0) {
1322 debug("*** Allocating descriptor memory ***\n");
1323
1324 bd_cached = (u32)malloc_aligned(MAL_ALLOC_SIZE, 4096);
1325 if (!bd_cached) {
1326 printf("%s: Error allocating MAL descriptor buffers!\n", __func__);
1327 return -1;
1328 }
1329
1330 #ifdef CONFIG_4xx_DCACHE
1331 flush_dcache_range(bd_cached, bd_cached + MAL_ALLOC_SIZE);
1332 if (!last_used_ea)
1333 #if defined(CONFIG_SYS_MEM_TOP_HIDE)
1334 bd_uncached = bis->bi_memsize + CONFIG_SYS_MEM_TOP_HIDE;
1335 #else
1336 bd_uncached = bis->bi_memsize;
1337 #endif
1338 else
1339 bd_uncached = last_used_ea + MAL_ALLOC_SIZE;
1340
1341 last_used_ea = bd_uncached;
1342 program_tlb(bd_cached, bd_uncached, MAL_ALLOC_SIZE,
1343 TLB_WORD2_I_ENABLE);
1344 #else
1345 bd_uncached = bd_cached;
1346 #endif
1347 hw_p->tx_phys = bd_cached;
1348 hw_p->rx_phys = bd_cached + MAL_TX_DESC_SIZE;
1349 hw_p->tx = (mal_desc_t *)(bd_uncached);
1350 hw_p->rx = (mal_desc_t *)(bd_uncached + MAL_TX_DESC_SIZE);
1351 debug("hw_p->tx=%08x, hw_p->rx=%08x\n", hw_p->tx, hw_p->rx);
1352 }
1353
1354 for (i = 0; i < NUM_TX_BUFF; i++) {
1355 hw_p->tx[i].ctrl = 0;
1356 hw_p->tx[i].data_len = 0;
1357 if (hw_p->first_init == 0)
1358 hw_p->txbuf_ptr = malloc_aligned(MAL_ALLOC_SIZE,
1359 L1_CACHE_BYTES);
1360 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
1361 if ((NUM_TX_BUFF - 1) == i)
1362 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
1363 hw_p->tx_run[i] = -1;
1364 debug("TX_BUFF %d @ 0x%08lx\n", i, (u32)hw_p->tx[i].data_ptr);
1365 }
1366
1367 for (i = 0; i < NUM_RX_BUFF; i++) {
1368 hw_p->rx[i].ctrl = 0;
1369 hw_p->rx[i].data_len = 0;
1370 hw_p->rx[i].data_ptr = (char *)NetRxPackets[i];
1371 if ((NUM_RX_BUFF - 1) == i)
1372 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
1373 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
1374 hw_p->rx_ready[i] = -1;
1375 debug("RX_BUFF %d @ 0x%08lx\n", i, (u32)hw_p->rx[i].data_ptr);
1376 }
1377
1378 reg = 0x00000000;
1379
1380 reg |= dev->enetaddr[0]; /* set high address */
1381 reg = reg << 8;
1382 reg |= dev->enetaddr[1];
1383
1384 out_be32((void *)EMAC_IAH + hw_p->hw_addr, reg);
1385
1386 reg = 0x00000000;
1387 reg |= dev->enetaddr[2]; /* set low address */
1388 reg = reg << 8;
1389 reg |= dev->enetaddr[3];
1390 reg = reg << 8;
1391 reg |= dev->enetaddr[4];
1392 reg = reg << 8;
1393 reg |= dev->enetaddr[5];
1394
1395 out_be32((void *)EMAC_IAL + hw_p->hw_addr, reg);
1396
1397 switch (devnum) {
1398 case 1:
1399 /* setup MAL tx & rx channel pointers */
1400 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
1401 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1402 #else
1403 mtdcr (MAL0_TXCTP1R, hw_p->tx_phys);
1404 #endif
1405 #if defined(CONFIG_440)
1406 mtdcr (MAL0_TXBADDR, 0x0);
1407 mtdcr (MAL0_RXBADDR, 0x0);
1408 #endif
1409
1410 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
1411 mtdcr (MAL0_RXCTP8R, hw_p->rx_phys);
1412 /* set RX buffer size */
1413 mtdcr (MAL0_RCBS8, ENET_MAX_MTU_ALIGNED / 16);
1414 #else
1415 mtdcr (MAL0_RXCTP1R, hw_p->rx_phys);
1416 /* set RX buffer size */
1417 mtdcr (MAL0_RCBS1, ENET_MAX_MTU_ALIGNED / 16);
1418 #endif
1419 break;
1420 #if defined (CONFIG_440GX)
1421 case 2:
1422 /* setup MAL tx & rx channel pointers */
1423 mtdcr (MAL0_TXBADDR, 0x0);
1424 mtdcr (MAL0_RXBADDR, 0x0);
1425 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1426 mtdcr (MAL0_RXCTP2R, hw_p->rx_phys);
1427 /* set RX buffer size */
1428 mtdcr (MAL0_RCBS2, ENET_MAX_MTU_ALIGNED / 16);
1429 break;
1430 case 3:
1431 /* setup MAL tx & rx channel pointers */
1432 mtdcr (MAL0_TXBADDR, 0x0);
1433 mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1434 mtdcr (MAL0_RXBADDR, 0x0);
1435 mtdcr (MAL0_RXCTP3R, hw_p->rx_phys);
1436 /* set RX buffer size */
1437 mtdcr (MAL0_RCBS3, ENET_MAX_MTU_ALIGNED / 16);
1438 break;
1439 #endif /* CONFIG_440GX */
1440 #if defined (CONFIG_460GT)
1441 case 2:
1442 /* setup MAL tx & rx channel pointers */
1443 mtdcr (MAL0_TXBADDR, 0x0);
1444 mtdcr (MAL0_RXBADDR, 0x0);
1445 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1446 mtdcr (MAL0_RXCTP16R, hw_p->rx_phys);
1447 /* set RX buffer size */
1448 mtdcr (MAL0_RCBS16, ENET_MAX_MTU_ALIGNED / 16);
1449 break;
1450 case 3:
1451 /* setup MAL tx & rx channel pointers */
1452 mtdcr (MAL0_TXBADDR, 0x0);
1453 mtdcr (MAL0_RXBADDR, 0x0);
1454 mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1455 mtdcr (MAL0_RXCTP24R, hw_p->rx_phys);
1456 /* set RX buffer size */
1457 mtdcr (MAL0_RCBS24, ENET_MAX_MTU_ALIGNED / 16);
1458 break;
1459 #endif /* CONFIG_460GT */
1460 case 0:
1461 default:
1462 /* setup MAL tx & rx channel pointers */
1463 #if defined(CONFIG_440)
1464 mtdcr (MAL0_TXBADDR, 0x0);
1465 mtdcr (MAL0_RXBADDR, 0x0);
1466 #endif
1467 mtdcr (MAL0_TXCTP0R, hw_p->tx_phys);
1468 mtdcr (MAL0_RXCTP0R, hw_p->rx_phys);
1469 /* set RX buffer size */
1470 mtdcr (MAL0_RCBS0, ENET_MAX_MTU_ALIGNED / 16);
1471 break;
1472 }
1473
1474 /* Enable MAL transmit and receive channels */
1475 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
1476 mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
1477 #else
1478 mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1479 #endif
1480 mtdcr (MAL0_RXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1481
1482 /* set transmit enable & receive enable */
1483 out_be32((void *)EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);
1484
1485 mode_reg = in_be32((void *)EMAC_M1 + hw_p->hw_addr);
1486
1487 /* set rx-/tx-fifo size */
1488 mode_reg = (mode_reg & ~EMAC_MR1_FIFO_MASK) | EMAC_MR1_FIFO_SIZE;
1489
1490 /* set speed */
1491 if (speed == _1000BASET) {
1492 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1493 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1494 unsigned long pfc1;
1495
1496 mfsdr (SDR0_PFC1, pfc1);
1497 pfc1 |= SDR0_PFC1_EM_1000;
1498 mtsdr (SDR0_PFC1, pfc1);
1499 #endif
1500 mode_reg = mode_reg | EMAC_M1_MF_1000MBPS | EMAC_M1_IST;
1501 } else if (speed == _100BASET)
1502 mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;
1503 else
1504 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
1505 if (duplex == FULL)
1506 mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;
1507
1508 out_be32((void *)EMAC_M1 + hw_p->hw_addr, mode_reg);
1509
1510 /* Enable broadcast and indvidual address */
1511 /* TBS: enabling runts as some misbehaved nics will send runts */
1512 out_be32((void *)EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
1513
1514 /* we probably need to set the tx mode1 reg? maybe at tx time */
1515
1516 /* set transmit request threshold register */
1517 out_be32((void *)EMAC_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
1518
1519 /* set receive low/high water mark register */
1520 #if defined(CONFIG_440)
1521 /* 440s has a 64 byte burst length */
1522 out_be32((void *)EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
1523 #else
1524 /* 405s have a 16 byte burst length */
1525 out_be32((void *)EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
1526 #endif /* defined(CONFIG_440) */
1527 out_be32((void *)EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);
1528
1529 /* Set fifo limit entry in tx mode 0 */
1530 out_be32((void *)EMAC_TXM0 + hw_p->hw_addr, 0x00000003);
1531 /* Frame gap set */
1532 out_be32((void *)EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
1533
1534 /* Set EMAC IER */
1535 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1536 if (speed == _100BASET)
1537 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
1538
1539 out_be32((void *)EMAC_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
1540 out_be32((void *)EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);
1541
1542 if (hw_p->first_init == 0) {
1543 /*
1544 * Connect interrupt service routines
1545 */
1546 irq_install_handler(ETH_IRQ_NUM(hw_p->devnum),
1547 (interrupt_handler_t *) enetInt, dev);
1548 }
1549
1550 mtmsr (msr); /* enable interrupts again */
1551
1552 hw_p->bis = bis;
1553 hw_p->first_init = 1;
1554
1555 return 0;
1556 }
1557
1558
1559 static int ppc_4xx_eth_send (struct eth_device *dev, volatile void *ptr,
1560 int len)
1561 {
1562 struct enet_frame *ef_ptr;
1563 ulong time_start, time_now;
1564 unsigned long temp_txm0;
1565 EMAC_4XX_HW_PST hw_p = dev->priv;
1566
1567 ef_ptr = (struct enet_frame *) ptr;
1568
1569 /*-----------------------------------------------------------------------+
1570 * Copy in our address into the frame.
1571 *-----------------------------------------------------------------------*/
1572 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
1573
1574 /*-----------------------------------------------------------------------+
1575 * If frame is too long or too short, modify length.
1576 *-----------------------------------------------------------------------*/
1577 /* TBS: where does the fragment go???? */
1578 if (len > ENET_MAX_MTU)
1579 len = ENET_MAX_MTU;
1580
1581 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
1582 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
1583 flush_dcache_range((u32)hw_p->txbuf_ptr, (u32)hw_p->txbuf_ptr + len);
1584
1585 /*-----------------------------------------------------------------------+
1586 * set TX Buffer busy, and send it
1587 *-----------------------------------------------------------------------*/
1588 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
1589 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
1590 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
1591 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
1592 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
1593
1594 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
1595 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
1596
1597 sync();
1598
1599 out_be32((void *)EMAC_TXM0 + hw_p->hw_addr,
1600 in_be32((void *)EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);
1601 #ifdef INFO_4XX_ENET
1602 hw_p->stats.pkts_tx++;
1603 #endif
1604
1605 /*-----------------------------------------------------------------------+
1606 * poll unitl the packet is sent and then make sure it is OK
1607 *-----------------------------------------------------------------------*/
1608 time_start = get_timer (0);
1609 while (1) {
1610 temp_txm0 = in_be32((void *)EMAC_TXM0 + hw_p->hw_addr);
1611 /* loop until either TINT turns on or 3 seconds elapse */
1612 if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {
1613 /* transmit is done, so now check for errors
1614 * If there is an error, an interrupt should
1615 * happen when we return
1616 */
1617 time_now = get_timer (0);
1618 if ((time_now - time_start) > 3000) {
1619 return (-1);
1620 }
1621 } else {
1622 return (len);
1623 }
1624 }
1625 }
1626
1627 int enetInt (struct eth_device *dev)
1628 {
1629 int serviced;
1630 int rc = -1; /* default to not us */
1631 u32 mal_isr;
1632 u32 emac_isr = 0;
1633 u32 mal_eob;
1634 u32 uic_mal;
1635 u32 uic_mal_err;
1636 u32 uic_emac;
1637 u32 uic_emac_b;
1638 EMAC_4XX_HW_PST hw_p;
1639
1640 /*
1641 * Because the mal is generic, we need to get the current
1642 * eth device
1643 */
1644 dev = eth_get_dev();
1645
1646 hw_p = dev->priv;
1647
1648 /* enter loop that stays in interrupt code until nothing to service */
1649 do {
1650 serviced = 0;
1651
1652 uic_mal = mfdcr(UIC_BASE_MAL + UIC_MSR);
1653 uic_mal_err = mfdcr(UIC_BASE_MAL_ERR + UIC_MSR);
1654 uic_emac = mfdcr(UIC_BASE_EMAC + UIC_MSR);
1655 uic_emac_b = mfdcr(UIC_BASE_EMAC_B + UIC_MSR);
1656
1657 if (!(uic_mal & (UIC_MAL_RXEOB | UIC_MAL_TXEOB))
1658 && !(uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE))
1659 && !(uic_emac & UIC_ETHx) && !(uic_emac_b & UIC_ETHxB)) {
1660 /* not for us */
1661 return (rc);
1662 }
1663
1664 /* get and clear controller status interrupts */
1665 /* look at MAL and EMAC error interrupts */
1666 if (uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)) {
1667 /* we have a MAL error interrupt */
1668 mal_isr = mfdcr(MAL0_ESR);
1669 mal_err(dev, mal_isr, uic_mal_err,
1670 MAL_UIC_DEF, MAL_UIC_ERR);
1671
1672 /* clear MAL error interrupt status bits */
1673 mtdcr(UIC_BASE_MAL_ERR + UIC_SR,
1674 UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE);
1675
1676 return -1;
1677 }
1678
1679 /* look for EMAC errors */
1680 if ((uic_emac & UIC_ETHx) || (uic_emac_b & UIC_ETHxB)) {
1681 emac_isr = in_be32((void *)EMAC_ISR + hw_p->hw_addr);
1682 emac_err(dev, emac_isr);
1683
1684 /* clear EMAC error interrupt status bits */
1685 mtdcr(UIC_BASE_EMAC + UIC_SR, UIC_ETHx);
1686 mtdcr(UIC_BASE_EMAC_B + UIC_SR, UIC_ETHxB);
1687
1688 return -1;
1689 }
1690
1691 /* handle MAX TX EOB interrupt from a tx */
1692 if (uic_mal & UIC_MAL_TXEOB) {
1693 /* clear MAL interrupt status bits */
1694 mal_eob = mfdcr(MAL0_TXEOBISR);
1695 mtdcr(MAL0_TXEOBISR, mal_eob);
1696 mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_TXEOB);
1697
1698 /* indicate that we serviced an interrupt */
1699 serviced = 1;
1700 rc = 0;
1701 }
1702
1703 /* handle MAL RX EOB interupt from a receive */
1704 /* check for EOB on valid channels */
1705 if (uic_mal & UIC_MAL_RXEOB) {
1706 mal_eob = mfdcr(MAL0_RXEOBISR);
1707 if (mal_eob &
1708 (0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL))) {
1709 /* push packet to upper layer */
1710 enet_rcv(dev, emac_isr);
1711
1712 /* clear MAL interrupt status bits */
1713 mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_RXEOB);
1714
1715 /* indicate that we serviced an interrupt */
1716 serviced = 1;
1717 rc = 0;
1718 }
1719 }
1720 #if defined(CONFIG_405EZ)
1721 /*
1722 * On 405EZ the RX-/TX-interrupts are coalesced into
1723 * one IRQ bit in the UIC. We need to acknowledge the
1724 * RX-/TX-interrupts in the SDR0_ICINTSTAT reg as well.
1725 */
1726 mtsdr(SDR0_ICINTSTAT,
1727 SDR_ICRX_STAT | SDR_ICTX0_STAT | SDR_ICTX1_STAT);
1728 #endif /* defined(CONFIG_405EZ) */
1729 } while (serviced);
1730
1731 return (rc);
1732 }
1733
1734 /*-----------------------------------------------------------------------------+
1735 * MAL Error Routine
1736 *-----------------------------------------------------------------------------*/
1737 static void mal_err (struct eth_device *dev, unsigned long isr,
1738 unsigned long uic, unsigned long maldef,
1739 unsigned long mal_errr)
1740 {
1741 EMAC_4XX_HW_PST hw_p = dev->priv;
1742
1743 mtdcr (MAL0_ESR, isr); /* clear interrupt */
1744
1745 /* clear DE interrupt */
1746 mtdcr (MAL0_TXDEIR, 0xC0000000);
1747 mtdcr (MAL0_RXDEIR, 0x80000000);
1748
1749 #ifdef INFO_4XX_ENET
1750 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
1751 #endif
1752
1753 eth_init (hw_p->bis); /* start again... */
1754 }
1755
1756 /*-----------------------------------------------------------------------------+
1757 * EMAC Error Routine
1758 *-----------------------------------------------------------------------------*/
1759 static void emac_err (struct eth_device *dev, unsigned long isr)
1760 {
1761 EMAC_4XX_HW_PST hw_p = dev->priv;
1762
1763 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1764 out_be32((void *)EMAC_ISR + hw_p->hw_addr, isr);
1765 }
1766
1767 /*-----------------------------------------------------------------------------+
1768 * enet_rcv() handles the ethernet receive data
1769 *-----------------------------------------------------------------------------*/
1770 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1771 {
1772 struct enet_frame *ef_ptr;
1773 unsigned long data_len;
1774 unsigned long rx_eob_isr;
1775 EMAC_4XX_HW_PST hw_p = dev->priv;
1776
1777 int handled = 0;
1778 int i;
1779 int loop_count = 0;
1780
1781 rx_eob_isr = mfdcr (MAL0_RXEOBISR);
1782 if ((0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL)) & rx_eob_isr) {
1783 /* clear EOB */
1784 mtdcr (MAL0_RXEOBISR, rx_eob_isr);
1785
1786 /* EMAC RX done */
1787 while (1) { /* do all */
1788 i = hw_p->rx_slot;
1789
1790 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1791 || (loop_count >= NUM_RX_BUFF))
1792 break;
1793
1794 loop_count++;
1795 handled++;
1796 data_len = (unsigned long) hw_p->rx[i].data_len & 0x0fff; /* Get len */
1797 if (data_len) {
1798 if (data_len > ENET_MAX_MTU) /* Check len */
1799 data_len = 0;
1800 else {
1801 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1802 data_len = 0;
1803 hw_p->stats.rx_err_log[hw_p->
1804 rx_err_index]
1805 = hw_p->rx[i].ctrl;
1806 hw_p->rx_err_index++;
1807 if (hw_p->rx_err_index ==
1808 MAX_ERR_LOG)
1809 hw_p->rx_err_index =
1810 0;
1811 } /* emac_erros */
1812 } /* data_len < max mtu */
1813 } /* if data_len */
1814 if (!data_len) { /* no data */
1815 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1816
1817 hw_p->stats.data_len_err++; /* Error at Rx */
1818 }
1819
1820 /* !data_len */
1821 /* AS.HARNOIS */
1822 /* Check if user has already eaten buffer */
1823 /* if not => ERROR */
1824 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1825 if (hw_p->is_receiving)
1826 printf ("ERROR : Receive buffers are full!\n");
1827 break;
1828 } else {
1829 hw_p->stats.rx_frames++;
1830 hw_p->stats.rx += data_len;
1831 ef_ptr = (struct enet_frame *) hw_p->rx[i].
1832 data_ptr;
1833 #ifdef INFO_4XX_ENET
1834 hw_p->stats.pkts_rx++;
1835 #endif
1836 /* AS.HARNOIS
1837 * use ring buffer
1838 */
1839 hw_p->rx_ready[hw_p->rx_i_index] = i;
1840 hw_p->rx_i_index++;
1841 if (NUM_RX_BUFF == hw_p->rx_i_index)
1842 hw_p->rx_i_index = 0;
1843
1844 hw_p->rx_slot++;
1845 if (NUM_RX_BUFF == hw_p->rx_slot)
1846 hw_p->rx_slot = 0;
1847
1848 /* AS.HARNOIS
1849 * free receive buffer only when
1850 * buffer has been handled (eth_rx)
1851 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1852 */
1853 } /* if data_len */
1854 } /* while */
1855 } /* if EMACK_RXCHL */
1856 }
1857
1858
1859 static int ppc_4xx_eth_rx (struct eth_device *dev)
1860 {
1861 int length;
1862 int user_index;
1863 unsigned long msr;
1864 EMAC_4XX_HW_PST hw_p = dev->priv;
1865
1866 hw_p->is_receiving = 1; /* tell driver */
1867
1868 for (;;) {
1869 /* AS.HARNOIS
1870 * use ring buffer and
1871 * get index from rx buffer desciptor queue
1872 */
1873 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1874 if (user_index == -1) {
1875 length = -1;
1876 break; /* nothing received - leave for() loop */
1877 }
1878
1879 msr = mfmsr ();
1880 mtmsr (msr & ~(MSR_EE));
1881
1882 length = hw_p->rx[user_index].data_len & 0x0fff;
1883
1884 /* Pass the packet up to the protocol layers. */
1885 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
1886 /* NetReceive(NetRxPackets[i], length); */
1887 invalidate_dcache_range((u32)hw_p->rx[user_index].data_ptr,
1888 (u32)hw_p->rx[user_index].data_ptr +
1889 length - 4);
1890 NetReceive (NetRxPackets[user_index], length - 4);
1891 /* Free Recv Buffer */
1892 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1893 /* Free rx buffer descriptor queue */
1894 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1895 hw_p->rx_u_index++;
1896 if (NUM_RX_BUFF == hw_p->rx_u_index)
1897 hw_p->rx_u_index = 0;
1898
1899 #ifdef INFO_4XX_ENET
1900 hw_p->stats.pkts_handled++;
1901 #endif
1902
1903 mtmsr (msr); /* Enable IRQ's */
1904 }
1905
1906 hw_p->is_receiving = 0; /* tell driver */
1907
1908 return length;
1909 }
1910
1911 int ppc_4xx_eth_initialize (bd_t * bis)
1912 {
1913 static int virgin = 0;
1914 struct eth_device *dev;
1915 int eth_num = 0;
1916 EMAC_4XX_HW_PST hw = NULL;
1917 u8 ethaddr[4 + CONFIG_EMAC_NR_START][6];
1918 u32 hw_addr[4];
1919 u32 mal_ier;
1920
1921 #if defined(CONFIG_440GX)
1922 unsigned long pfc1;
1923
1924 mfsdr (SDR0_PFC1, pfc1);
1925 pfc1 &= ~(0x01e00000);
1926 pfc1 |= 0x01200000;
1927 mtsdr (SDR0_PFC1, pfc1);
1928 #endif
1929
1930 /* first clear all mac-addresses */
1931 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++)
1932 memcpy(ethaddr[eth_num], "\0\0\0\0\0\0", 6);
1933
1934 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1935 int ethaddr_idx = eth_num + CONFIG_EMAC_NR_START;
1936 switch (eth_num) {
1937 default: /* fall through */
1938 case 0:
1939 eth_getenv_enetaddr("ethaddr", ethaddr[ethaddr_idx]);
1940 hw_addr[eth_num] = 0x0;
1941 break;
1942 #ifdef CONFIG_HAS_ETH1
1943 case 1:
1944 eth_getenv_enetaddr("eth1addr", ethaddr[ethaddr_idx]);
1945 hw_addr[eth_num] = 0x100;
1946 break;
1947 #endif
1948 #ifdef CONFIG_HAS_ETH2
1949 case 2:
1950 eth_getenv_enetaddr("eth2addr", ethaddr[ethaddr_idx]);
1951 #if defined(CONFIG_460GT)
1952 hw_addr[eth_num] = 0x300;
1953 #else
1954 hw_addr[eth_num] = 0x400;
1955 #endif
1956 break;
1957 #endif
1958 #ifdef CONFIG_HAS_ETH3
1959 case 3:
1960 eth_getenv_enetaddr("eth3addr", ethaddr[ethaddr_idx]);
1961 #if defined(CONFIG_460GT)
1962 hw_addr[eth_num] = 0x400;
1963 #else
1964 hw_addr[eth_num] = 0x600;
1965 #endif
1966 break;
1967 #endif
1968 }
1969 }
1970
1971 /* set phy num and mode */
1972 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1973 bis->bi_phymode[0] = 0;
1974
1975 #if defined(CONFIG_PHY1_ADDR)
1976 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1977 bis->bi_phymode[1] = 0;
1978 #endif
1979 #if defined(CONFIG_440GX)
1980 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1981 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1982 bis->bi_phymode[2] = 2;
1983 bis->bi_phymode[3] = 2;
1984 #endif
1985
1986 #if defined(CONFIG_440GX) || \
1987 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1988 defined(CONFIG_405EX)
1989 ppc_4xx_eth_setup_bridge(0, bis);
1990 #endif
1991
1992 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1993 /*
1994 * See if we can actually bring up the interface,
1995 * otherwise, skip it
1996 */
1997 if (memcmp (ethaddr[eth_num], "\0\0\0\0\0\0", 6) == 0) {
1998 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1999 continue;
2000 }
2001
2002 /* Allocate device structure */
2003 dev = (struct eth_device *) malloc (sizeof (*dev));
2004 if (dev == NULL) {
2005 printf ("ppc_4xx_eth_initialize: "
2006 "Cannot allocate eth_device %d\n", eth_num);
2007 return (-1);
2008 }
2009 memset(dev, 0, sizeof(*dev));
2010
2011 /* Allocate our private use data */
2012 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
2013 if (hw == NULL) {
2014 printf ("ppc_4xx_eth_initialize: "
2015 "Cannot allocate private hw data for eth_device %d",
2016 eth_num);
2017 free (dev);
2018 return (-1);
2019 }
2020 memset(hw, 0, sizeof(*hw));
2021
2022 hw->hw_addr = hw_addr[eth_num];
2023 memcpy (dev->enetaddr, ethaddr[eth_num], 6);
2024 hw->devnum = eth_num;
2025 hw->print_speed = 1;
2026
2027 sprintf (dev->name, "ppc_4xx_eth%d", eth_num - CONFIG_EMAC_NR_START);
2028 dev->priv = (void *) hw;
2029 dev->init = ppc_4xx_eth_init;
2030 dev->halt = ppc_4xx_eth_halt;
2031 dev->send = ppc_4xx_eth_send;
2032 dev->recv = ppc_4xx_eth_rx;
2033
2034 if (0 == virgin) {
2035 /* set the MAL IER ??? names may change with new spec ??? */
2036 #if defined(CONFIG_440SPE) || \
2037 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
2038 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
2039 defined(CONFIG_405EX)
2040 mal_ier =
2041 MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
2042 MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
2043 #else
2044 mal_ier =
2045 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
2046 MAL_IER_OPBE | MAL_IER_PLBE;
2047 #endif
2048 mtdcr (MAL0_ESR, 0xffffffff); /* clear pending interrupts */
2049 mtdcr (MAL0_TXDEIR, 0xffffffff); /* clear pending interrupts */
2050 mtdcr (MAL0_RXDEIR, 0xffffffff); /* clear pending interrupts */
2051 mtdcr (MAL0_IER, mal_ier);
2052
2053 /* install MAL interrupt handler */
2054 irq_install_handler (VECNUM_MAL_SERR,
2055 (interrupt_handler_t *) enetInt,
2056 dev);
2057 irq_install_handler (VECNUM_MAL_TXEOB,
2058 (interrupt_handler_t *) enetInt,
2059 dev);
2060 irq_install_handler (VECNUM_MAL_RXEOB,
2061 (interrupt_handler_t *) enetInt,
2062 dev);
2063 irq_install_handler (VECNUM_MAL_TXDE,
2064 (interrupt_handler_t *) enetInt,
2065 dev);
2066 irq_install_handler (VECNUM_MAL_RXDE,
2067 (interrupt_handler_t *) enetInt,
2068 dev);
2069 virgin = 1;
2070 }
2071
2072 eth_register (dev);
2073
2074 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
2075 miiphy_register (dev->name,
2076 emac4xx_miiphy_read, emac4xx_miiphy_write);
2077 #endif
2078 } /* end for each supported device */
2079
2080 return 0;
2081 }