]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/4xx_enet.c
miiphy: convert to linux/mii.h
[people/ms/u-boot.git] / drivers / net / 4xx_enet.c
1 /*-----------------------------------------------------------------------------+
2 * This source code is dual-licensed. You may use it under the terms of the
3 * GNU General Public License version 2, or under the license below.
4 *
5 * This source code has been made available to you by IBM on an AS-IS
6 * basis. Anyone receiving this source is licensed under IBM
7 * copyrights to use it in any way he or she deems fit, including
8 * copying it, modifying it, compiling it, and redistributing it either
9 * with or without modifications. No license under IBM patents or
10 * patent applications is to be implied by the copyright license.
11 *
12 * Any user of this software should understand that IBM cannot provide
13 * technical support for this software and will not be responsible for
14 * any consequences resulting from the use of this software.
15 *
16 * Any person who transfers this source code or any derivative work
17 * must include the IBM copyright notice, this paragraph, and the
18 * preceding two paragraphs in the transferred software.
19 *
20 * COPYRIGHT I B M CORPORATION 1995
21 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
22 *-----------------------------------------------------------------------------*/
23 /*-----------------------------------------------------------------------------+
24 *
25 * File Name: enetemac.c
26 *
27 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
28 *
29 * Author: Mark Wisner
30 *
31 * Change Activity-
32 *
33 * Date Description of Change BY
34 * --------- --------------------- ---
35 * 05-May-99 Created MKW
36 * 27-Jun-99 Clean up JWB
37 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
38 * 29-Jul-99 Added Full duplex support MKW
39 * 06-Aug-99 Changed names for Mal CR reg MKW
40 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
41 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
42 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
43 * to avoid chaining maximum sized packets. Push starting
44 * RX descriptor address up to the next cache line boundary.
45 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
46 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
47 * EMAC0_RXM register. JWB
48 * 12-Mar-01 anne-sophie.harnois@nextream.fr
49 * - Variables are compatible with those already defined in
50 * include/net.h
51 * - Receive buffer descriptor ring is used to send buffers
52 * to the user
53 * - Info print about send/received/handled packet number if
54 * INFO_405_ENET is set
55 * 17-Apr-01 stefan.roese@esd-electronics.com
56 * - MAL reset in "eth_halt" included
57 * - Enet speed and duplex output now in one line
58 * 08-May-01 stefan.roese@esd-electronics.com
59 * - MAL error handling added (eth_init called again)
60 * 13-Nov-01 stefan.roese@esd-electronics.com
61 * - Set IST bit in EMAC0_MR1 reg upon 100MBit or full duplex
62 * 04-Jan-02 stefan.roese@esd-electronics.com
63 * - Wait for PHY auto negotiation to complete added
64 * 06-Feb-02 stefan.roese@esd-electronics.com
65 * - Bug fixed in waiting for auto negotiation to complete
66 * 26-Feb-02 stefan.roese@esd-electronics.com
67 * - rx and tx buffer descriptors now allocated (no fixed address
68 * used anymore)
69 * 17-Jun-02 stefan.roese@esd-electronics.com
70 * - MAL error debug printf 'M' removed (rx de interrupt may
71 * occur upon many incoming packets with only 4 rx buffers).
72 *-----------------------------------------------------------------------------*
73 * 17-Nov-03 travis.sawyer@sandburst.com
74 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
75 * in the 440GX. This port should work with the 440GP
76 * (2 EMACs) also
77 * 15-Aug-05 sr@denx.de
78 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
79 now handling all 4xx cpu's.
80 *-----------------------------------------------------------------------------*/
81
82 #include <config.h>
83 #include <common.h>
84 #include <net.h>
85 #include <asm/processor.h>
86 #include <asm/io.h>
87 #include <asm/cache.h>
88 #include <asm/mmu.h>
89 #include <commproc.h>
90 #include <asm/ppc4xx.h>
91 #include <asm/ppc4xx-emac.h>
92 #include <asm/ppc4xx-mal.h>
93 #include <miiphy.h>
94 #include <malloc.h>
95
96 #if !(defined(CONFIG_MII) || defined(CONFIG_CMD_MII))
97 #error "CONFIG_MII has to be defined!"
98 #endif
99
100 #if defined(CONFIG_NETCONSOLE) && !defined(CONFIG_NET_MULTI)
101 #error "CONFIG_NET_MULTI has to be defined for NetConsole"
102 #endif
103
104 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
105 #define PHY_AUTONEGOTIATE_TIMEOUT 5000 /* 5000 ms autonegotiate timeout */
106
107 /* Ethernet Transmit and Receive Buffers */
108 /* AS.HARNOIS
109 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
110 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
111 */
112 #define ENET_MAX_MTU PKTSIZE
113 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
114
115 /*-----------------------------------------------------------------------------+
116 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
117 * Interrupt Controller).
118 *-----------------------------------------------------------------------------*/
119 #define ETH_IRQ_NUM(dev) (VECNUM_ETH0 + ((dev) * VECNUM_ETH1_OFFS))
120
121 #if defined(CONFIG_HAS_ETH3)
122 #if !defined(CONFIG_440GX)
123 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
124 UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
125 #else
126 /* Unfortunately 440GX spreads EMAC interrupts on multiple UIC's */
127 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
128 #define UIC_ETHxB (UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
129 #endif /* !defined(CONFIG_440GX) */
130 #elif defined(CONFIG_HAS_ETH2)
131 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
132 UIC_MASK(ETH_IRQ_NUM(2)))
133 #elif defined(CONFIG_HAS_ETH1)
134 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
135 #else
136 #define UIC_ETHx UIC_MASK(ETH_IRQ_NUM(0))
137 #endif
138
139 /*
140 * Define a default version for UIC_ETHxB for non 440GX so that we can
141 * use common code for all 4xx variants
142 */
143 #if !defined(UIC_ETHxB)
144 #define UIC_ETHxB 0
145 #endif
146
147 #define UIC_MAL_SERR UIC_MASK(VECNUM_MAL_SERR)
148 #define UIC_MAL_TXDE UIC_MASK(VECNUM_MAL_TXDE)
149 #define UIC_MAL_RXDE UIC_MASK(VECNUM_MAL_RXDE)
150 #define UIC_MAL_TXEOB UIC_MASK(VECNUM_MAL_TXEOB)
151 #define UIC_MAL_RXEOB UIC_MASK(VECNUM_MAL_RXEOB)
152
153 #define MAL_UIC_ERR (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
154 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
155
156 /*
157 * We have 3 different interrupt types:
158 * - MAL interrupts indicating successful transfer
159 * - MAL error interrupts indicating MAL related errors
160 * - EMAC interrupts indicating EMAC related errors
161 *
162 * All those interrupts can be on different UIC's, but since
163 * now at least all interrupts from one type are on the same
164 * UIC. Only exception is 440GX where the EMAC interrupts are
165 * spread over two UIC's!
166 */
167 #if defined(CONFIG_440GX)
168 #define UIC_BASE_MAL UIC1_DCR_BASE
169 #define UIC_BASE_MAL_ERR UIC2_DCR_BASE
170 #define UIC_BASE_EMAC UIC2_DCR_BASE
171 #define UIC_BASE_EMAC_B UIC3_DCR_BASE
172 #else
173 #define UIC_BASE_MAL (UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_TXEOB) * 0x10))
174 #define UIC_BASE_MAL_ERR (UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_SERR) * 0x10))
175 #define UIC_BASE_EMAC (UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
176 #define UIC_BASE_EMAC_B (UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
177 #endif
178
179 #undef INFO_4XX_ENET
180
181 #define BI_PHYMODE_NONE 0
182 #define BI_PHYMODE_ZMII 1
183 #define BI_PHYMODE_RGMII 2
184 #define BI_PHYMODE_GMII 3
185 #define BI_PHYMODE_RTBI 4
186 #define BI_PHYMODE_TBI 5
187 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
188 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
189 defined(CONFIG_405EX)
190 #define BI_PHYMODE_SMII 6
191 #define BI_PHYMODE_MII 7
192 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
193 #define BI_PHYMODE_RMII 8
194 #endif
195 #endif
196 #define BI_PHYMODE_SGMII 9
197
198 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
199 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
200 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
201 defined(CONFIG_405EX)
202 #define SDR0_MFR_ETH_CLK_SEL_V(n) ((0x01<<27) / (n+1))
203 #endif
204
205 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
206 #define SDR0_ETH_CFG_CLK_SEL_V(n) (0x01 << (8 + n))
207 #endif
208
209 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
210 #define MAL_RX_CHAN_MUL 8 /* 460EX/GT uses MAL channel 8 for EMAC1 */
211 #else
212 #define MAL_RX_CHAN_MUL 1
213 #endif
214
215 /*--------------------------------------------------------------------+
216 * Fixed PHY (PHY-less) support for Ethernet Ports.
217 *--------------------------------------------------------------------*/
218
219 /*
220 * Some boards do not have a PHY for each ethernet port. These ports
221 * are known as Fixed PHY (or PHY-less) ports. For such ports, set
222 * the appropriate CONFIG_PHY_ADDR equal to CONFIG_FIXED_PHY and
223 * then define CONFIG_SYS_FIXED_PHY_PORTS to define what the speed and
224 * duplex should be for these ports in the board configuration
225 * file.
226 *
227 * For Example:
228 * #define CONFIG_FIXED_PHY 0xFFFFFFFF
229 *
230 * #define CONFIG_PHY_ADDR CONFIG_FIXED_PHY
231 * #define CONFIG_PHY1_ADDR 1
232 * #define CONFIG_PHY2_ADDR CONFIG_FIXED_PHY
233 * #define CONFIG_PHY3_ADDR 3
234 *
235 * #define CONFIG_SYS_FIXED_PHY_PORT(devnum,speed,duplex) \
236 * {devnum, speed, duplex},
237 *
238 * #define CONFIG_SYS_FIXED_PHY_PORTS \
239 * CONFIG_SYS_FIXED_PHY_PORT(0,1000,FULL) \
240 * CONFIG_SYS_FIXED_PHY_PORT(2,100,HALF)
241 */
242
243 #ifndef CONFIG_FIXED_PHY
244 #define CONFIG_FIXED_PHY 0xFFFFFFFF /* Fixed PHY (PHY-less) */
245 #endif
246
247 #ifndef CONFIG_SYS_FIXED_PHY_PORTS
248 #define CONFIG_SYS_FIXED_PHY_PORTS /* default is an empty array */
249 #endif
250
251 struct fixed_phy_port {
252 unsigned int devnum; /* ethernet port */
253 unsigned int speed; /* specified speed 10,100 or 1000 */
254 unsigned int duplex; /* specified duplex FULL or HALF */
255 };
256
257 static const struct fixed_phy_port fixed_phy_port[] = {
258 CONFIG_SYS_FIXED_PHY_PORTS /* defined in board configuration file */
259 };
260
261 /*-----------------------------------------------------------------------------+
262 * Global variables. TX and RX descriptors and buffers.
263 *-----------------------------------------------------------------------------*/
264
265 /*
266 * Get count of EMAC devices (doesn't have to be the max. possible number
267 * supported by the cpu)
268 *
269 * CONFIG_BOARD_EMAC_COUNT added so now a "dynamic" way to configure the
270 * EMAC count is possible. As it is needed for the Kilauea/Haleakala
271 * 405EX/405EXr eval board, using the same binary.
272 */
273 #if defined(CONFIG_BOARD_EMAC_COUNT)
274 #define LAST_EMAC_NUM board_emac_count()
275 #else /* CONFIG_BOARD_EMAC_COUNT */
276 #if defined(CONFIG_HAS_ETH3)
277 #define LAST_EMAC_NUM 4
278 #elif defined(CONFIG_HAS_ETH2)
279 #define LAST_EMAC_NUM 3
280 #elif defined(CONFIG_HAS_ETH1)
281 #define LAST_EMAC_NUM 2
282 #else
283 #define LAST_EMAC_NUM 1
284 #endif
285 #endif /* CONFIG_BOARD_EMAC_COUNT */
286
287 /* normal boards start with EMAC0 */
288 #if !defined(CONFIG_EMAC_NR_START)
289 #define CONFIG_EMAC_NR_START 0
290 #endif
291
292 #define MAL_RX_DESC_SIZE 2048
293 #define MAL_TX_DESC_SIZE 2048
294 #define MAL_ALLOC_SIZE (MAL_TX_DESC_SIZE + MAL_RX_DESC_SIZE)
295
296 /*-----------------------------------------------------------------------------+
297 * Prototypes and externals.
298 *-----------------------------------------------------------------------------*/
299 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
300
301 int enetInt (struct eth_device *dev);
302 static void mal_err (struct eth_device *dev, unsigned long isr,
303 unsigned long uic, unsigned long maldef,
304 unsigned long mal_errr);
305 static void emac_err (struct eth_device *dev, unsigned long isr);
306
307 extern int phy_setup_aneg (char *devname, unsigned char addr);
308 extern int emac4xx_miiphy_read (const char *devname, unsigned char addr,
309 unsigned char reg, unsigned short *value);
310 extern int emac4xx_miiphy_write (const char *devname, unsigned char addr,
311 unsigned char reg, unsigned short value);
312
313 int board_emac_count(void);
314
315 static void emac_loopback_enable(EMAC_4XX_HW_PST hw_p)
316 {
317 #if defined(CONFIG_440SPE) || \
318 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
319 defined(CONFIG_405EX)
320 u32 val;
321
322 mfsdr(SDR0_MFR, val);
323 val |= SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
324 mtsdr(SDR0_MFR, val);
325 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
326 u32 val;
327
328 mfsdr(SDR0_ETH_CFG, val);
329 val |= SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
330 mtsdr(SDR0_ETH_CFG, val);
331 #endif
332 }
333
334 static void emac_loopback_disable(EMAC_4XX_HW_PST hw_p)
335 {
336 #if defined(CONFIG_440SPE) || \
337 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
338 defined(CONFIG_405EX)
339 u32 val;
340
341 mfsdr(SDR0_MFR, val);
342 val &= ~SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
343 mtsdr(SDR0_MFR, val);
344 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
345 u32 val;
346
347 mfsdr(SDR0_ETH_CFG, val);
348 val &= ~SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
349 mtsdr(SDR0_ETH_CFG, val);
350 #endif
351 }
352
353 /*-----------------------------------------------------------------------------+
354 | ppc_4xx_eth_halt
355 | Disable MAL channel, and EMACn
356 +-----------------------------------------------------------------------------*/
357 static void ppc_4xx_eth_halt (struct eth_device *dev)
358 {
359 EMAC_4XX_HW_PST hw_p = dev->priv;
360 u32 val = 10000;
361
362 out_be32((void *)EMAC0_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
363
364 /* 1st reset MAL channel */
365 /* Note: writing a 0 to a channel has no effect */
366 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
367 mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
368 #else
369 mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> hw_p->devnum));
370 #endif
371 mtdcr (MAL0_RXCARR, (MAL_CR_MMSR >> hw_p->devnum));
372
373 /* wait for reset */
374 while (mfdcr (MAL0_RXCASR) & (MAL_CR_MMSR >> hw_p->devnum)) {
375 udelay (1000); /* Delay 1 MS so as not to hammer the register */
376 val--;
377 if (val == 0)
378 break;
379 }
380
381 /* provide clocks for EMAC internal loopback */
382 emac_loopback_enable(hw_p);
383
384 /* EMAC RESET */
385 out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_SRST);
386
387 /* remove clocks for EMAC internal loopback */
388 emac_loopback_disable(hw_p);
389
390 #ifndef CONFIG_NETCONSOLE
391 hw_p->print_speed = 1; /* print speed message again next time */
392 #endif
393
394 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
395 /* don't bypass the TAHOE0/TAHOE1 cores for Linux */
396 mfsdr(SDR0_ETH_CFG, val);
397 val &= ~(SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
398 mtsdr(SDR0_ETH_CFG, val);
399 #endif
400
401 return;
402 }
403
404 #if defined (CONFIG_440GX)
405 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
406 {
407 unsigned long pfc1;
408 unsigned long zmiifer;
409 unsigned long rmiifer;
410
411 mfsdr(SDR0_PFC1, pfc1);
412 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
413
414 zmiifer = 0;
415 rmiifer = 0;
416
417 switch (pfc1) {
418 case 1:
419 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
420 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
421 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
422 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
423 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
424 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
425 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
426 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
427 break;
428 case 2:
429 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
430 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
431 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
432 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
433 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
434 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
435 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
436 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
437 break;
438 case 3:
439 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
440 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
441 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
442 bis->bi_phymode[1] = BI_PHYMODE_NONE;
443 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
444 bis->bi_phymode[3] = BI_PHYMODE_NONE;
445 break;
446 case 4:
447 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
448 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
449 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
450 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
451 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
452 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
453 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
454 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
455 break;
456 case 5:
457 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
458 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
459 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
460 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
461 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
462 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
463 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
464 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
465 break;
466 case 6:
467 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
468 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
469 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
470 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
471 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
472 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
473 break;
474 case 0:
475 default:
476 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
477 rmiifer = 0x0;
478 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
479 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
480 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
481 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
482 break;
483 }
484
485 /* Ensure we setup mdio for this devnum and ONLY this devnum */
486 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
487
488 out_be32((void *)ZMII0_FER, zmiifer);
489 out_be32((void *)RGMII_FER, rmiifer);
490
491 return ((int)pfc1);
492 }
493 #endif /* CONFIG_440_GX */
494
495 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
496 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
497 {
498 unsigned long zmiifer=0x0;
499 unsigned long pfc1;
500
501 mfsdr(SDR0_PFC1, pfc1);
502 pfc1 &= SDR0_PFC1_SELECT_MASK;
503
504 switch (pfc1) {
505 case SDR0_PFC1_SELECT_CONFIG_2:
506 /* 1 x GMII port */
507 out_be32((void *)ZMII0_FER, 0x00);
508 out_be32((void *)RGMII_FER, 0x00000037);
509 bis->bi_phymode[0] = BI_PHYMODE_GMII;
510 bis->bi_phymode[1] = BI_PHYMODE_NONE;
511 break;
512 case SDR0_PFC1_SELECT_CONFIG_4:
513 /* 2 x RGMII ports */
514 out_be32((void *)ZMII0_FER, 0x00);
515 out_be32((void *)RGMII_FER, 0x00000055);
516 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
517 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
518 break;
519 case SDR0_PFC1_SELECT_CONFIG_6:
520 /* 2 x SMII ports */
521 out_be32((void *)ZMII0_FER,
522 ((ZMII_FER_SMII) << ZMII_FER_V(0)) |
523 ((ZMII_FER_SMII) << ZMII_FER_V(1)));
524 out_be32((void *)RGMII_FER, 0x00000000);
525 bis->bi_phymode[0] = BI_PHYMODE_SMII;
526 bis->bi_phymode[1] = BI_PHYMODE_SMII;
527 break;
528 case SDR0_PFC1_SELECT_CONFIG_1_2:
529 /* only 1 x MII supported */
530 out_be32((void *)ZMII0_FER, (ZMII_FER_MII) << ZMII_FER_V(0));
531 out_be32((void *)RGMII_FER, 0x00000000);
532 bis->bi_phymode[0] = BI_PHYMODE_MII;
533 bis->bi_phymode[1] = BI_PHYMODE_NONE;
534 break;
535 default:
536 break;
537 }
538
539 /* Ensure we setup mdio for this devnum and ONLY this devnum */
540 zmiifer = in_be32((void *)ZMII0_FER);
541 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
542 out_be32((void *)ZMII0_FER, zmiifer);
543
544 return ((int)0x0);
545 }
546 #endif /* CONFIG_440EPX */
547
548 #if defined(CONFIG_405EX)
549 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
550 {
551 u32 rgmiifer = 0;
552
553 /*
554 * The 405EX(r)'s RGMII bridge can operate in one of several
555 * modes, only one of which (2 x RGMII) allows the
556 * simultaneous use of both EMACs on the 405EX.
557 */
558
559 switch (CONFIG_EMAC_PHY_MODE) {
560
561 case EMAC_PHY_MODE_NONE:
562 /* No ports */
563 rgmiifer |= RGMII_FER_DIS << 0;
564 rgmiifer |= RGMII_FER_DIS << 4;
565 out_be32((void *)RGMII_FER, rgmiifer);
566 bis->bi_phymode[0] = BI_PHYMODE_NONE;
567 bis->bi_phymode[1] = BI_PHYMODE_NONE;
568 break;
569 case EMAC_PHY_MODE_NONE_RGMII:
570 /* 1 x RGMII port on channel 0 */
571 rgmiifer |= RGMII_FER_RGMII << 0;
572 rgmiifer |= RGMII_FER_DIS << 4;
573 out_be32((void *)RGMII_FER, rgmiifer);
574 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
575 bis->bi_phymode[1] = BI_PHYMODE_NONE;
576 break;
577 case EMAC_PHY_MODE_RGMII_NONE:
578 /* 1 x RGMII port on channel 1 */
579 rgmiifer |= RGMII_FER_DIS << 0;
580 rgmiifer |= RGMII_FER_RGMII << 4;
581 out_be32((void *)RGMII_FER, rgmiifer);
582 bis->bi_phymode[0] = BI_PHYMODE_NONE;
583 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
584 break;
585 case EMAC_PHY_MODE_RGMII_RGMII:
586 /* 2 x RGMII ports */
587 rgmiifer |= RGMII_FER_RGMII << 0;
588 rgmiifer |= RGMII_FER_RGMII << 4;
589 out_be32((void *)RGMII_FER, rgmiifer);
590 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
591 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
592 break;
593 case EMAC_PHY_MODE_NONE_GMII:
594 /* 1 x GMII port on channel 0 */
595 rgmiifer |= RGMII_FER_GMII << 0;
596 rgmiifer |= RGMII_FER_DIS << 4;
597 out_be32((void *)RGMII_FER, rgmiifer);
598 bis->bi_phymode[0] = BI_PHYMODE_GMII;
599 bis->bi_phymode[1] = BI_PHYMODE_NONE;
600 break;
601 case EMAC_PHY_MODE_NONE_MII:
602 /* 1 x MII port on channel 0 */
603 rgmiifer |= RGMII_FER_MII << 0;
604 rgmiifer |= RGMII_FER_DIS << 4;
605 out_be32((void *)RGMII_FER, rgmiifer);
606 bis->bi_phymode[0] = BI_PHYMODE_MII;
607 bis->bi_phymode[1] = BI_PHYMODE_NONE;
608 break;
609 case EMAC_PHY_MODE_GMII_NONE:
610 /* 1 x GMII port on channel 1 */
611 rgmiifer |= RGMII_FER_DIS << 0;
612 rgmiifer |= RGMII_FER_GMII << 4;
613 out_be32((void *)RGMII_FER, rgmiifer);
614 bis->bi_phymode[0] = BI_PHYMODE_NONE;
615 bis->bi_phymode[1] = BI_PHYMODE_GMII;
616 break;
617 case EMAC_PHY_MODE_MII_NONE:
618 /* 1 x MII port on channel 1 */
619 rgmiifer |= RGMII_FER_DIS << 0;
620 rgmiifer |= RGMII_FER_MII << 4;
621 out_be32((void *)RGMII_FER, rgmiifer);
622 bis->bi_phymode[0] = BI_PHYMODE_NONE;
623 bis->bi_phymode[1] = BI_PHYMODE_MII;
624 break;
625 default:
626 break;
627 }
628
629 /* Ensure we setup mdio for this devnum and ONLY this devnum */
630 rgmiifer = in_be32((void *)RGMII_FER);
631 rgmiifer |= (1 << (19-devnum));
632 out_be32((void *)RGMII_FER, rgmiifer);
633
634 return ((int)0x0);
635 }
636 #endif /* CONFIG_405EX */
637
638 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
639 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
640 {
641 u32 eth_cfg;
642 u32 zmiifer; /* ZMII0_FER reg. */
643 u32 rmiifer; /* RGMII0_FER reg. Bridge 0 */
644 u32 rmiifer1; /* RGMII0_FER reg. Bridge 1 */
645 int mode;
646
647 zmiifer = 0;
648 rmiifer = 0;
649 rmiifer1 = 0;
650
651 #if defined(CONFIG_460EX)
652 mode = 9;
653 mfsdr(SDR0_ETH_CFG, eth_cfg);
654 if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
655 ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0))
656 mode = 11; /* config SGMII */
657 #else
658 mode = 10;
659 mfsdr(SDR0_ETH_CFG, eth_cfg);
660 if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
661 ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0) &&
662 ((eth_cfg & SDR0_ETH_CFG_SGMII2_ENABLE) > 0))
663 mode = 12; /* config SGMII */
664 #endif
665
666 /* TODO:
667 * NOTE: 460GT has 2 RGMII bridge cores:
668 * emac0 ------ RGMII0_BASE
669 * |
670 * emac1 -----+
671 *
672 * emac2 ------ RGMII1_BASE
673 * |
674 * emac3 -----+
675 *
676 * 460EX has 1 RGMII bridge core:
677 * and RGMII1_BASE is disabled
678 * emac0 ------ RGMII0_BASE
679 * |
680 * emac1 -----+
681 */
682
683 /*
684 * Right now only 2*RGMII is supported. Please extend when needed.
685 * sr - 2008-02-19
686 * Add SGMII support.
687 * vg - 2008-07-28
688 */
689 switch (mode) {
690 case 1:
691 /* 1 MII - 460EX */
692 /* GMC0 EMAC4_0, ZMII Bridge */
693 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
694 bis->bi_phymode[0] = BI_PHYMODE_MII;
695 bis->bi_phymode[1] = BI_PHYMODE_NONE;
696 bis->bi_phymode[2] = BI_PHYMODE_NONE;
697 bis->bi_phymode[3] = BI_PHYMODE_NONE;
698 break;
699 case 2:
700 /* 2 MII - 460GT */
701 /* GMC0 EMAC4_0, GMC1 EMAC4_2, ZMII Bridge */
702 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
703 zmiifer |= ZMII_FER_MII << ZMII_FER_V(2);
704 bis->bi_phymode[0] = BI_PHYMODE_MII;
705 bis->bi_phymode[1] = BI_PHYMODE_NONE;
706 bis->bi_phymode[2] = BI_PHYMODE_MII;
707 bis->bi_phymode[3] = BI_PHYMODE_NONE;
708 break;
709 case 3:
710 /* 2 RMII - 460EX */
711 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
712 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
713 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
714 bis->bi_phymode[0] = BI_PHYMODE_RMII;
715 bis->bi_phymode[1] = BI_PHYMODE_RMII;
716 bis->bi_phymode[2] = BI_PHYMODE_NONE;
717 bis->bi_phymode[3] = BI_PHYMODE_NONE;
718 break;
719 case 4:
720 /* 4 RMII - 460GT */
721 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC1 EMAC4_2, GMC1, EMAC4_3 */
722 /* ZMII Bridge */
723 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
724 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
725 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
726 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
727 bis->bi_phymode[0] = BI_PHYMODE_RMII;
728 bis->bi_phymode[1] = BI_PHYMODE_RMII;
729 bis->bi_phymode[2] = BI_PHYMODE_RMII;
730 bis->bi_phymode[3] = BI_PHYMODE_RMII;
731 break;
732 case 5:
733 /* 2 SMII - 460EX */
734 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
735 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
736 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
737 bis->bi_phymode[0] = BI_PHYMODE_SMII;
738 bis->bi_phymode[1] = BI_PHYMODE_SMII;
739 bis->bi_phymode[2] = BI_PHYMODE_NONE;
740 bis->bi_phymode[3] = BI_PHYMODE_NONE;
741 break;
742 case 6:
743 /* 4 SMII - 460GT */
744 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC0 EMAC4_3, GMC0 EMAC4_3 */
745 /* ZMII Bridge */
746 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
747 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
748 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
749 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
750 bis->bi_phymode[0] = BI_PHYMODE_SMII;
751 bis->bi_phymode[1] = BI_PHYMODE_SMII;
752 bis->bi_phymode[2] = BI_PHYMODE_SMII;
753 bis->bi_phymode[3] = BI_PHYMODE_SMII;
754 break;
755 case 7:
756 /* This is the default mode that we want for board bringup - Maple */
757 /* 1 GMII - 460EX */
758 /* GMC0 EMAC4_0, RGMII Bridge 0 */
759 rmiifer |= RGMII_FER_MDIO(0);
760
761 if (devnum == 0) {
762 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
763 bis->bi_phymode[0] = BI_PHYMODE_GMII;
764 bis->bi_phymode[1] = BI_PHYMODE_NONE;
765 bis->bi_phymode[2] = BI_PHYMODE_NONE;
766 bis->bi_phymode[3] = BI_PHYMODE_NONE;
767 } else {
768 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(3); /* CH1CFG - EMAC1 */
769 bis->bi_phymode[0] = BI_PHYMODE_NONE;
770 bis->bi_phymode[1] = BI_PHYMODE_GMII;
771 bis->bi_phymode[2] = BI_PHYMODE_NONE;
772 bis->bi_phymode[3] = BI_PHYMODE_NONE;
773 }
774 break;
775 case 8:
776 /* 2 GMII - 460GT */
777 /* GMC0 EMAC4_0, RGMII Bridge 0 */
778 /* GMC1 EMAC4_2, RGMII Bridge 1 */
779 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
780 rmiifer1 |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC2 */
781 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
782 rmiifer1 |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC2 */
783
784 bis->bi_phymode[0] = BI_PHYMODE_GMII;
785 bis->bi_phymode[1] = BI_PHYMODE_NONE;
786 bis->bi_phymode[2] = BI_PHYMODE_GMII;
787 bis->bi_phymode[3] = BI_PHYMODE_NONE;
788 break;
789 case 9:
790 /* 2 RGMII - 460EX */
791 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
792 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
793 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
794 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
795
796 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
797 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
798 bis->bi_phymode[2] = BI_PHYMODE_NONE;
799 bis->bi_phymode[3] = BI_PHYMODE_NONE;
800 break;
801 case 10:
802 /* 4 RGMII - 460GT */
803 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
804 /* GMC1 EMAC4_2, GMC1 EMAC4_3, RGMII Bridge 1 */
805 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
806 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
807 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(2);
808 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(3);
809 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
810 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
811 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
812 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
813 break;
814 case 11:
815 /* 2 SGMII - 460EX */
816 bis->bi_phymode[0] = BI_PHYMODE_SGMII;
817 bis->bi_phymode[1] = BI_PHYMODE_SGMII;
818 bis->bi_phymode[2] = BI_PHYMODE_NONE;
819 bis->bi_phymode[3] = BI_PHYMODE_NONE;
820 break;
821 case 12:
822 /* 3 SGMII - 460GT */
823 bis->bi_phymode[0] = BI_PHYMODE_SGMII;
824 bis->bi_phymode[1] = BI_PHYMODE_SGMII;
825 bis->bi_phymode[2] = BI_PHYMODE_SGMII;
826 bis->bi_phymode[3] = BI_PHYMODE_NONE;
827 break;
828 default:
829 break;
830 }
831
832 /* Set EMAC for MDIO */
833 mfsdr(SDR0_ETH_CFG, eth_cfg);
834 eth_cfg |= SDR0_ETH_CFG_MDIO_SEL_EMAC0;
835 mtsdr(SDR0_ETH_CFG, eth_cfg);
836
837 out_be32((void *)RGMII_FER, rmiifer);
838 #if defined(CONFIG_460GT)
839 out_be32((void *)RGMII_FER + RGMII1_BASE_OFFSET, rmiifer1);
840 #endif
841
842 /* bypass the TAHOE0/TAHOE1 cores for U-Boot */
843 mfsdr(SDR0_ETH_CFG, eth_cfg);
844 eth_cfg |= (SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
845 mtsdr(SDR0_ETH_CFG, eth_cfg);
846
847 return 0;
848 }
849 #endif /* CONFIG_460EX || CONFIG_460GT */
850
851 static inline void *malloc_aligned(u32 size, u32 align)
852 {
853 return (void *)(((u32)malloc(size + align) + align - 1) &
854 ~(align - 1));
855 }
856
857 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
858 {
859 int i;
860 unsigned long reg = 0;
861 unsigned long msr;
862 unsigned long speed;
863 unsigned long duplex;
864 unsigned long failsafe;
865 unsigned mode_reg;
866 unsigned short devnum;
867 unsigned short reg_short;
868 #if defined(CONFIG_440GX) || \
869 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
870 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
871 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
872 defined(CONFIG_405EX)
873 u32 opbfreq;
874 sys_info_t sysinfo;
875 #if defined(CONFIG_440GX) || \
876 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
877 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
878 defined(CONFIG_405EX)
879 int ethgroup = -1;
880 #endif
881 #endif
882 u32 bd_cached;
883 u32 bd_uncached = 0;
884 #ifdef CONFIG_4xx_DCACHE
885 static u32 last_used_ea = 0;
886 #endif
887 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
888 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
889 defined(CONFIG_405EX)
890 int rgmii_channel;
891 #endif
892
893 EMAC_4XX_HW_PST hw_p = dev->priv;
894
895 /* before doing anything, figure out if we have a MAC address */
896 /* if not, bail */
897 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
898 printf("ERROR: ethaddr not set!\n");
899 return -1;
900 }
901
902 #if defined(CONFIG_440GX) || \
903 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
904 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
905 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
906 defined(CONFIG_405EX)
907 /* Need to get the OPB frequency so we can access the PHY */
908 get_sys_info (&sysinfo);
909 #endif
910
911 msr = mfmsr ();
912 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
913
914 devnum = hw_p->devnum;
915
916 #ifdef INFO_4XX_ENET
917 /* AS.HARNOIS
918 * We should have :
919 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
920 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
921 * is possible that new packets (without relationship with
922 * current transfer) have got the time to arrived before
923 * netloop calls eth_halt
924 */
925 printf ("About preceeding transfer (eth%d):\n"
926 "- Sent packet number %d\n"
927 "- Received packet number %d\n"
928 "- Handled packet number %d\n",
929 hw_p->devnum,
930 hw_p->stats.pkts_tx,
931 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
932
933 hw_p->stats.pkts_tx = 0;
934 hw_p->stats.pkts_rx = 0;
935 hw_p->stats.pkts_handled = 0;
936 hw_p->print_speed = 1; /* print speed message again next time */
937 #endif
938
939 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
940 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
941
942 hw_p->rx_slot = 0; /* MAL Receive Slot */
943 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
944 hw_p->rx_u_index = 0; /* Receive User Queue Index */
945
946 hw_p->tx_slot = 0; /* MAL Transmit Slot */
947 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
948 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
949
950 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
951 /* set RMII mode */
952 /* NOTE: 440GX spec states that mode is mutually exclusive */
953 /* NOTE: Therefore, disable all other EMACS, since we handle */
954 /* NOTE: only one emac at a time */
955 reg = 0;
956 out_be32((void *)ZMII0_FER, 0);
957 udelay (100);
958
959 #if defined(CONFIG_440GP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
960 out_be32((void *)ZMII0_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
961 #elif defined(CONFIG_440GX) || \
962 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
963 defined(CONFIG_460EX) || defined(CONFIG_460GT)
964 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
965 #endif
966
967 out_be32((void *)ZMII0_SSR, ZMII0_SSR_SP << ZMII0_SSR_V(devnum));
968 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
969 #if defined(CONFIG_405EX)
970 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
971 #endif
972
973 sync();
974
975 /* provide clocks for EMAC internal loopback */
976 emac_loopback_enable(hw_p);
977
978 /* EMAC RESET */
979 out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_SRST);
980
981 /* remove clocks for EMAC internal loopback */
982 emac_loopback_disable(hw_p);
983
984 failsafe = 1000;
985 while ((in_be32((void *)EMAC0_MR0 + hw_p->hw_addr) & (EMAC_MR0_SRST)) && failsafe) {
986 udelay (1000);
987 failsafe--;
988 }
989 if (failsafe <= 0)
990 printf("\nProblem resetting EMAC!\n");
991
992 #if defined(CONFIG_440GX) || \
993 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
994 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
995 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
996 defined(CONFIG_405EX)
997 /* Whack the M1 register */
998 mode_reg = 0x0;
999 mode_reg &= ~0x00000038;
1000 opbfreq = sysinfo.freqOPB / 1000000;
1001 if (opbfreq <= 50);
1002 else if (opbfreq <= 66)
1003 mode_reg |= EMAC_MR1_OBCI_66;
1004 else if (opbfreq <= 83)
1005 mode_reg |= EMAC_MR1_OBCI_83;
1006 else if (opbfreq <= 100)
1007 mode_reg |= EMAC_MR1_OBCI_100;
1008 else
1009 mode_reg |= EMAC_MR1_OBCI_GT100;
1010
1011 out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
1012 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
1013
1014 #if defined(CONFIG_GPCS_PHY_ADDR) || defined(CONFIG_GPCS_PHY1_ADDR) || \
1015 defined(CONFIG_GPCS_PHY2_ADDR) || defined(CONFIG_GPCS_PHY3_ADDR)
1016 if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
1017 /*
1018 * In SGMII mode, GPCS access is needed for
1019 * communication with the internal SGMII SerDes.
1020 */
1021 switch (devnum) {
1022 #if defined(CONFIG_GPCS_PHY_ADDR)
1023 case 0:
1024 reg = CONFIG_GPCS_PHY_ADDR;
1025 break;
1026 #endif
1027 #if defined(CONFIG_GPCS_PHY1_ADDR)
1028 case 1:
1029 reg = CONFIG_GPCS_PHY1_ADDR;
1030 break;
1031 #endif
1032 #if defined(CONFIG_GPCS_PHY2_ADDR)
1033 case 2:
1034 reg = CONFIG_GPCS_PHY2_ADDR;
1035 break;
1036 #endif
1037 #if defined(CONFIG_GPCS_PHY3_ADDR)
1038 case 3:
1039 reg = CONFIG_GPCS_PHY3_ADDR;
1040 break;
1041 #endif
1042 }
1043
1044 mode_reg = in_be32((void *)EMAC0_MR1 + hw_p->hw_addr);
1045 mode_reg |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_IPPA_SET(reg);
1046 out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
1047
1048 /* Configure GPCS interface to recommended setting for SGMII */
1049 miiphy_reset(dev->name, reg);
1050 miiphy_write(dev->name, reg, 0x04, 0x8120); /* AsymPause, FDX */
1051 miiphy_write(dev->name, reg, 0x07, 0x2801); /* msg_pg, toggle */
1052 miiphy_write(dev->name, reg, 0x00, 0x0140); /* 1Gbps, FDX */
1053 }
1054 #endif /* defined(CONFIG_GPCS_PHY_ADDR) */
1055
1056 /* wait for PHY to complete auto negotiation */
1057 reg_short = 0;
1058 switch (devnum) {
1059 case 0:
1060 reg = CONFIG_PHY_ADDR;
1061 break;
1062 #if defined (CONFIG_PHY1_ADDR)
1063 case 1:
1064 reg = CONFIG_PHY1_ADDR;
1065 break;
1066 #endif
1067 #if defined (CONFIG_PHY2_ADDR)
1068 case 2:
1069 reg = CONFIG_PHY2_ADDR;
1070 break;
1071 #endif
1072 #if defined (CONFIG_PHY3_ADDR)
1073 case 3:
1074 reg = CONFIG_PHY3_ADDR;
1075 break;
1076 #endif
1077 default:
1078 reg = CONFIG_PHY_ADDR;
1079 break;
1080 }
1081
1082 bis->bi_phynum[devnum] = reg;
1083
1084 if (reg == CONFIG_FIXED_PHY)
1085 goto get_speed;
1086
1087 #if defined(CONFIG_PHY_RESET)
1088 /*
1089 * Reset the phy, only if its the first time through
1090 * otherwise, just check the speeds & feeds
1091 */
1092 if (hw_p->first_init == 0) {
1093 #if defined(CONFIG_M88E1111_PHY)
1094 miiphy_write (dev->name, reg, 0x14, 0x0ce3);
1095 miiphy_write (dev->name, reg, 0x18, 0x4101);
1096 miiphy_write (dev->name, reg, 0x09, 0x0e00);
1097 miiphy_write (dev->name, reg, 0x04, 0x01e1);
1098 #if defined(CONFIG_M88E1111_DISABLE_FIBER)
1099 miiphy_read(dev->name, reg, 0x1b, &reg_short);
1100 reg_short |= 0x8000;
1101 miiphy_write(dev->name, reg, 0x1b, reg_short);
1102 #endif
1103 #endif
1104 #if defined(CONFIG_M88E1112_PHY)
1105 if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
1106 /*
1107 * Marvell 88E1112 PHY needs to have the SGMII MAC
1108 * interace (page 2) properly configured to
1109 * communicate with the 460EX/GT GPCS interface.
1110 */
1111
1112 /* Set access to Page 2 */
1113 miiphy_write(dev->name, reg, 0x16, 0x0002);
1114
1115 miiphy_write(dev->name, reg, 0x00, 0x0040); /* 1Gbps */
1116 miiphy_read(dev->name, reg, 0x1a, &reg_short);
1117 reg_short |= 0x8000; /* bypass Auto-Negotiation */
1118 miiphy_write(dev->name, reg, 0x1a, reg_short);
1119 miiphy_reset(dev->name, reg); /* reset MAC interface */
1120
1121 /* Reset access to Page 0 */
1122 miiphy_write(dev->name, reg, 0x16, 0x0000);
1123 }
1124 #endif /* defined(CONFIG_M88E1112_PHY) */
1125 miiphy_reset (dev->name, reg);
1126
1127 #if defined(CONFIG_440GX) || \
1128 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1129 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1130 defined(CONFIG_405EX)
1131
1132 #if defined(CONFIG_CIS8201_PHY)
1133 /*
1134 * Cicada 8201 PHY needs to have an extended register whacked
1135 * for RGMII mode.
1136 */
1137 if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {
1138 #if defined(CONFIG_CIS8201_SHORT_ETCH)
1139 miiphy_write (dev->name, reg, 23, 0x1300);
1140 #else
1141 miiphy_write (dev->name, reg, 23, 0x1000);
1142 #endif
1143 /*
1144 * Vitesse VSC8201/Cicada CIS8201 errata:
1145 * Interoperability problem with Intel 82547EI phys
1146 * This work around (provided by Vitesse) changes
1147 * the default timer convergence from 8ms to 12ms
1148 */
1149 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1150 miiphy_write (dev->name, reg, 0x08, 0x0200);
1151 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
1152 miiphy_write (dev->name, reg, 0x02, 0x0004);
1153 miiphy_write (dev->name, reg, 0x01, 0x0671);
1154 miiphy_write (dev->name, reg, 0x00, 0x8fae);
1155 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1156 miiphy_write (dev->name, reg, 0x08, 0x0000);
1157 miiphy_write (dev->name, reg, 0x1f, 0x0000);
1158 /* end Vitesse/Cicada errata */
1159 }
1160 #endif /* defined(CONFIG_CIS8201_PHY) */
1161
1162 #if defined(CONFIG_ET1011C_PHY)
1163 /*
1164 * Agere ET1011c PHY needs to have an extended register whacked
1165 * for RGMII mode.
1166 */
1167 if (((devnum == 2) || (devnum ==3)) && (4 == ethgroup)) {
1168 miiphy_read (dev->name, reg, 0x16, &reg_short);
1169 reg_short &= ~(0x7);
1170 reg_short |= 0x6; /* RGMII DLL Delay*/
1171 miiphy_write (dev->name, reg, 0x16, reg_short);
1172
1173 miiphy_read (dev->name, reg, 0x17, &reg_short);
1174 reg_short &= ~(0x40);
1175 miiphy_write (dev->name, reg, 0x17, reg_short);
1176
1177 miiphy_write(dev->name, reg, 0x1c, 0x74f0);
1178 }
1179 #endif /* defined(CONFIG_ET1011C_PHY) */
1180
1181 #endif /* defined(CONFIG_440GX) ... */
1182 /* Start/Restart autonegotiation */
1183 phy_setup_aneg (dev->name, reg);
1184 udelay (1000);
1185 }
1186 #endif /* defined(CONFIG_PHY_RESET) */
1187
1188 miiphy_read (dev->name, reg, MII_BMSR, &reg_short);
1189
1190 /*
1191 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
1192 */
1193 if ((reg_short & BMSR_ANEGCAPABLE)
1194 && !(reg_short & BMSR_ANEGCOMPLETE)) {
1195 puts ("Waiting for PHY auto negotiation to complete");
1196 i = 0;
1197 while (!(reg_short & BMSR_ANEGCOMPLETE)) {
1198 /*
1199 * Timeout reached ?
1200 */
1201 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
1202 puts (" TIMEOUT !\n");
1203 break;
1204 }
1205
1206 if ((i++ % 1000) == 0) {
1207 putc ('.');
1208 }
1209 udelay (1000); /* 1 ms */
1210 miiphy_read (dev->name, reg, MII_BMSR, &reg_short);
1211 }
1212 puts (" done\n");
1213 udelay (500000); /* another 500 ms (results in faster booting) */
1214 }
1215
1216 get_speed:
1217 if (reg == CONFIG_FIXED_PHY) {
1218 for (i = 0; i < ARRAY_SIZE(fixed_phy_port); i++) {
1219 if (devnum == fixed_phy_port[i].devnum) {
1220 speed = fixed_phy_port[i].speed;
1221 duplex = fixed_phy_port[i].duplex;
1222 break;
1223 }
1224 }
1225
1226 if (i == ARRAY_SIZE(fixed_phy_port)) {
1227 printf("ERROR: PHY (%s) not configured correctly!\n",
1228 dev->name);
1229 return -1;
1230 }
1231 } else {
1232 speed = miiphy_speed(dev->name, reg);
1233 duplex = miiphy_duplex(dev->name, reg);
1234 }
1235
1236 if (hw_p->print_speed) {
1237 hw_p->print_speed = 0;
1238 printf ("ENET Speed is %d Mbps - %s duplex connection (EMAC%d)\n",
1239 (int) speed, (duplex == HALF) ? "HALF" : "FULL",
1240 hw_p->devnum);
1241 }
1242
1243 #if defined(CONFIG_440) && \
1244 !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \
1245 !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX) && \
1246 !defined(CONFIG_460EX) && !defined(CONFIG_460GT)
1247 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
1248 mfsdr(SDR0_MFR, reg);
1249 if (speed == 100) {
1250 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
1251 } else {
1252 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
1253 }
1254 mtsdr(SDR0_MFR, reg);
1255 #endif
1256
1257 /* Set ZMII/RGMII speed according to the phy link speed */
1258 reg = in_be32((void *)ZMII0_SSR);
1259 if ( (speed == 100) || (speed == 1000) )
1260 out_be32((void *)ZMII0_SSR, reg | (ZMII0_SSR_SP << ZMII0_SSR_V (devnum)));
1261 else
1262 out_be32((void *)ZMII0_SSR, reg & (~(ZMII0_SSR_SP << ZMII0_SSR_V (devnum))));
1263
1264 if ((devnum == 2) || (devnum == 3)) {
1265 if (speed == 1000)
1266 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
1267 else if (speed == 100)
1268 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
1269 else if (speed == 10)
1270 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
1271 else {
1272 printf("Error in RGMII Speed\n");
1273 return -1;
1274 }
1275 out_be32((void *)RGMII_SSR, reg);
1276 }
1277 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
1278
1279 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1280 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1281 defined(CONFIG_405EX)
1282 if (devnum >= 2)
1283 rgmii_channel = devnum - 2;
1284 else
1285 rgmii_channel = devnum;
1286
1287 if (speed == 1000)
1288 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V(rgmii_channel));
1289 else if (speed == 100)
1290 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V(rgmii_channel));
1291 else if (speed == 10)
1292 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V(rgmii_channel));
1293 else {
1294 printf("Error in RGMII Speed\n");
1295 return -1;
1296 }
1297 out_be32((void *)RGMII_SSR, reg);
1298 #if defined(CONFIG_460GT)
1299 if ((devnum == 2) || (devnum == 3))
1300 out_be32((void *)RGMII_SSR + RGMII1_BASE_OFFSET, reg);
1301 #endif
1302 #endif
1303
1304 /* set the Mal configuration reg */
1305 #if defined(CONFIG_440GX) || \
1306 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1307 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
1308 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1309 defined(CONFIG_405EX)
1310 mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
1311 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
1312 #else
1313 mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
1314 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
1315 if (get_pvr() == PVR_440GP_RB) {
1316 mtdcr (MAL0_CFG, mfdcr(MAL0_CFG) & ~MAL_CR_PLBB);
1317 }
1318 #endif
1319
1320 /*
1321 * Malloc MAL buffer desciptors, make sure they are
1322 * aligned on cache line boundary size
1323 * (401/403/IOP480 = 16, 405 = 32)
1324 * and doesn't cross cache block boundaries.
1325 */
1326 if (hw_p->first_init == 0) {
1327 debug("*** Allocating descriptor memory ***\n");
1328
1329 bd_cached = (u32)malloc_aligned(MAL_ALLOC_SIZE, 4096);
1330 if (!bd_cached) {
1331 printf("%s: Error allocating MAL descriptor buffers!\n", __func__);
1332 return -1;
1333 }
1334
1335 #ifdef CONFIG_4xx_DCACHE
1336 flush_dcache_range(bd_cached, bd_cached + MAL_ALLOC_SIZE);
1337 if (!last_used_ea)
1338 #if defined(CONFIG_SYS_MEM_TOP_HIDE)
1339 bd_uncached = bis->bi_memsize + CONFIG_SYS_MEM_TOP_HIDE;
1340 #else
1341 bd_uncached = bis->bi_memsize;
1342 #endif
1343 else
1344 bd_uncached = last_used_ea + MAL_ALLOC_SIZE;
1345
1346 last_used_ea = bd_uncached;
1347 program_tlb(bd_cached, bd_uncached, MAL_ALLOC_SIZE,
1348 TLB_WORD2_I_ENABLE);
1349 #else
1350 bd_uncached = bd_cached;
1351 #endif
1352 hw_p->tx_phys = bd_cached;
1353 hw_p->rx_phys = bd_cached + MAL_TX_DESC_SIZE;
1354 hw_p->tx = (mal_desc_t *)(bd_uncached);
1355 hw_p->rx = (mal_desc_t *)(bd_uncached + MAL_TX_DESC_SIZE);
1356 debug("hw_p->tx=%08x, hw_p->rx=%08x\n", hw_p->tx, hw_p->rx);
1357 }
1358
1359 for (i = 0; i < NUM_TX_BUFF; i++) {
1360 hw_p->tx[i].ctrl = 0;
1361 hw_p->tx[i].data_len = 0;
1362 if (hw_p->first_init == 0)
1363 hw_p->txbuf_ptr = malloc_aligned(MAL_ALLOC_SIZE,
1364 L1_CACHE_BYTES);
1365 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
1366 if ((NUM_TX_BUFF - 1) == i)
1367 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
1368 hw_p->tx_run[i] = -1;
1369 debug("TX_BUFF %d @ 0x%08lx\n", i, (u32)hw_p->tx[i].data_ptr);
1370 }
1371
1372 for (i = 0; i < NUM_RX_BUFF; i++) {
1373 hw_p->rx[i].ctrl = 0;
1374 hw_p->rx[i].data_len = 0;
1375 hw_p->rx[i].data_ptr = (char *)NetRxPackets[i];
1376 if ((NUM_RX_BUFF - 1) == i)
1377 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
1378 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
1379 hw_p->rx_ready[i] = -1;
1380 debug("RX_BUFF %d @ 0x%08lx\n", i, (u32)hw_p->rx[i].data_ptr);
1381 }
1382
1383 reg = 0x00000000;
1384
1385 reg |= dev->enetaddr[0]; /* set high address */
1386 reg = reg << 8;
1387 reg |= dev->enetaddr[1];
1388
1389 out_be32((void *)EMAC0_IAH + hw_p->hw_addr, reg);
1390
1391 reg = 0x00000000;
1392 reg |= dev->enetaddr[2]; /* set low address */
1393 reg = reg << 8;
1394 reg |= dev->enetaddr[3];
1395 reg = reg << 8;
1396 reg |= dev->enetaddr[4];
1397 reg = reg << 8;
1398 reg |= dev->enetaddr[5];
1399
1400 out_be32((void *)EMAC0_IAL + hw_p->hw_addr, reg);
1401
1402 switch (devnum) {
1403 case 1:
1404 /* setup MAL tx & rx channel pointers */
1405 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
1406 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1407 #else
1408 mtdcr (MAL0_TXCTP1R, hw_p->tx_phys);
1409 #endif
1410 #if defined(CONFIG_440)
1411 mtdcr (MAL0_TXBADDR, 0x0);
1412 mtdcr (MAL0_RXBADDR, 0x0);
1413 #endif
1414
1415 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
1416 mtdcr (MAL0_RXCTP8R, hw_p->rx_phys);
1417 /* set RX buffer size */
1418 mtdcr (MAL0_RCBS8, ENET_MAX_MTU_ALIGNED / 16);
1419 #else
1420 mtdcr (MAL0_RXCTP1R, hw_p->rx_phys);
1421 /* set RX buffer size */
1422 mtdcr (MAL0_RCBS1, ENET_MAX_MTU_ALIGNED / 16);
1423 #endif
1424 break;
1425 #if defined (CONFIG_440GX)
1426 case 2:
1427 /* setup MAL tx & rx channel pointers */
1428 mtdcr (MAL0_TXBADDR, 0x0);
1429 mtdcr (MAL0_RXBADDR, 0x0);
1430 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1431 mtdcr (MAL0_RXCTP2R, hw_p->rx_phys);
1432 /* set RX buffer size */
1433 mtdcr (MAL0_RCBS2, ENET_MAX_MTU_ALIGNED / 16);
1434 break;
1435 case 3:
1436 /* setup MAL tx & rx channel pointers */
1437 mtdcr (MAL0_TXBADDR, 0x0);
1438 mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1439 mtdcr (MAL0_RXBADDR, 0x0);
1440 mtdcr (MAL0_RXCTP3R, hw_p->rx_phys);
1441 /* set RX buffer size */
1442 mtdcr (MAL0_RCBS3, ENET_MAX_MTU_ALIGNED / 16);
1443 break;
1444 #endif /* CONFIG_440GX */
1445 #if defined (CONFIG_460GT)
1446 case 2:
1447 /* setup MAL tx & rx channel pointers */
1448 mtdcr (MAL0_TXBADDR, 0x0);
1449 mtdcr (MAL0_RXBADDR, 0x0);
1450 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1451 mtdcr (MAL0_RXCTP16R, hw_p->rx_phys);
1452 /* set RX buffer size */
1453 mtdcr (MAL0_RCBS16, ENET_MAX_MTU_ALIGNED / 16);
1454 break;
1455 case 3:
1456 /* setup MAL tx & rx channel pointers */
1457 mtdcr (MAL0_TXBADDR, 0x0);
1458 mtdcr (MAL0_RXBADDR, 0x0);
1459 mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1460 mtdcr (MAL0_RXCTP24R, hw_p->rx_phys);
1461 /* set RX buffer size */
1462 mtdcr (MAL0_RCBS24, ENET_MAX_MTU_ALIGNED / 16);
1463 break;
1464 #endif /* CONFIG_460GT */
1465 case 0:
1466 default:
1467 /* setup MAL tx & rx channel pointers */
1468 #if defined(CONFIG_440)
1469 mtdcr (MAL0_TXBADDR, 0x0);
1470 mtdcr (MAL0_RXBADDR, 0x0);
1471 #endif
1472 mtdcr (MAL0_TXCTP0R, hw_p->tx_phys);
1473 mtdcr (MAL0_RXCTP0R, hw_p->rx_phys);
1474 /* set RX buffer size */
1475 mtdcr (MAL0_RCBS0, ENET_MAX_MTU_ALIGNED / 16);
1476 break;
1477 }
1478
1479 /* Enable MAL transmit and receive channels */
1480 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
1481 mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
1482 #else
1483 mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1484 #endif
1485 mtdcr (MAL0_RXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1486
1487 /* set transmit enable & receive enable */
1488 out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_TXE | EMAC_MR0_RXE);
1489
1490 mode_reg = in_be32((void *)EMAC0_MR1 + hw_p->hw_addr);
1491
1492 /* set rx-/tx-fifo size */
1493 mode_reg = (mode_reg & ~EMAC_MR1_FIFO_MASK) | EMAC_MR1_FIFO_SIZE;
1494
1495 /* set speed */
1496 if (speed == _1000BASET) {
1497 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1498 unsigned long pfc1;
1499
1500 mfsdr (SDR0_PFC1, pfc1);
1501 pfc1 |= SDR0_PFC1_EM_1000;
1502 mtsdr (SDR0_PFC1, pfc1);
1503 #endif
1504 mode_reg = mode_reg | EMAC_MR1_MF_1000MBPS | EMAC_MR1_IST;
1505 } else if (speed == _100BASET)
1506 mode_reg = mode_reg | EMAC_MR1_MF_100MBPS | EMAC_MR1_IST;
1507 else
1508 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
1509 if (duplex == FULL)
1510 mode_reg = mode_reg | 0x80000000 | EMAC_MR1_IST;
1511
1512 out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
1513
1514 /* Enable broadcast and indvidual address */
1515 /* TBS: enabling runts as some misbehaved nics will send runts */
1516 out_be32((void *)EMAC0_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
1517
1518 /* we probably need to set the tx mode1 reg? maybe at tx time */
1519
1520 /* set transmit request threshold register */
1521 out_be32((void *)EMAC0_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
1522
1523 /* set receive low/high water mark register */
1524 #if defined(CONFIG_440)
1525 /* 440s has a 64 byte burst length */
1526 out_be32((void *)EMAC0_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
1527 #else
1528 /* 405s have a 16 byte burst length */
1529 out_be32((void *)EMAC0_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
1530 #endif /* defined(CONFIG_440) */
1531 out_be32((void *)EMAC0_TMR1 + hw_p->hw_addr, 0xf8640000);
1532
1533 /* Set fifo limit entry in tx mode 0 */
1534 out_be32((void *)EMAC0_TMR0 + hw_p->hw_addr, 0x00000003);
1535 /* Frame gap set */
1536 out_be32((void *)EMAC0_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
1537
1538 /* Set EMAC IER */
1539 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1540 if (speed == _100BASET)
1541 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
1542
1543 out_be32((void *)EMAC0_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
1544 out_be32((void *)EMAC0_IER + hw_p->hw_addr, hw_p->emac_ier);
1545
1546 if (hw_p->first_init == 0) {
1547 /*
1548 * Connect interrupt service routines
1549 */
1550 irq_install_handler(ETH_IRQ_NUM(hw_p->devnum),
1551 (interrupt_handler_t *) enetInt, dev);
1552 }
1553
1554 mtmsr (msr); /* enable interrupts again */
1555
1556 hw_p->bis = bis;
1557 hw_p->first_init = 1;
1558
1559 return 0;
1560 }
1561
1562
1563 static int ppc_4xx_eth_send (struct eth_device *dev, volatile void *ptr,
1564 int len)
1565 {
1566 struct enet_frame *ef_ptr;
1567 ulong time_start, time_now;
1568 unsigned long temp_txm0;
1569 EMAC_4XX_HW_PST hw_p = dev->priv;
1570
1571 ef_ptr = (struct enet_frame *) ptr;
1572
1573 /*-----------------------------------------------------------------------+
1574 * Copy in our address into the frame.
1575 *-----------------------------------------------------------------------*/
1576 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
1577
1578 /*-----------------------------------------------------------------------+
1579 * If frame is too long or too short, modify length.
1580 *-----------------------------------------------------------------------*/
1581 /* TBS: where does the fragment go???? */
1582 if (len > ENET_MAX_MTU)
1583 len = ENET_MAX_MTU;
1584
1585 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
1586 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
1587 flush_dcache_range((u32)hw_p->txbuf_ptr, (u32)hw_p->txbuf_ptr + len);
1588
1589 /*-----------------------------------------------------------------------+
1590 * set TX Buffer busy, and send it
1591 *-----------------------------------------------------------------------*/
1592 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
1593 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
1594 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
1595 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
1596 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
1597
1598 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
1599 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
1600
1601 sync();
1602
1603 out_be32((void *)EMAC0_TMR0 + hw_p->hw_addr,
1604 in_be32((void *)EMAC0_TMR0 + hw_p->hw_addr) | EMAC_TMR0_GNP0);
1605 #ifdef INFO_4XX_ENET
1606 hw_p->stats.pkts_tx++;
1607 #endif
1608
1609 /*-----------------------------------------------------------------------+
1610 * poll unitl the packet is sent and then make sure it is OK
1611 *-----------------------------------------------------------------------*/
1612 time_start = get_timer (0);
1613 while (1) {
1614 temp_txm0 = in_be32((void *)EMAC0_TMR0 + hw_p->hw_addr);
1615 /* loop until either TINT turns on or 3 seconds elapse */
1616 if ((temp_txm0 & EMAC_TMR0_GNP0) != 0) {
1617 /* transmit is done, so now check for errors
1618 * If there is an error, an interrupt should
1619 * happen when we return
1620 */
1621 time_now = get_timer (0);
1622 if ((time_now - time_start) > 3000) {
1623 return (-1);
1624 }
1625 } else {
1626 return (len);
1627 }
1628 }
1629 }
1630
1631 int enetInt (struct eth_device *dev)
1632 {
1633 int serviced;
1634 int rc = -1; /* default to not us */
1635 u32 mal_isr;
1636 u32 emac_isr = 0;
1637 u32 mal_eob;
1638 u32 uic_mal;
1639 u32 uic_mal_err;
1640 u32 uic_emac;
1641 u32 uic_emac_b;
1642 EMAC_4XX_HW_PST hw_p;
1643
1644 /*
1645 * Because the mal is generic, we need to get the current
1646 * eth device
1647 */
1648 dev = eth_get_dev();
1649
1650 hw_p = dev->priv;
1651
1652 /* enter loop that stays in interrupt code until nothing to service */
1653 do {
1654 serviced = 0;
1655
1656 uic_mal = mfdcr(UIC_BASE_MAL + UIC_MSR);
1657 uic_mal_err = mfdcr(UIC_BASE_MAL_ERR + UIC_MSR);
1658 uic_emac = mfdcr(UIC_BASE_EMAC + UIC_MSR);
1659 uic_emac_b = mfdcr(UIC_BASE_EMAC_B + UIC_MSR);
1660
1661 if (!(uic_mal & (UIC_MAL_RXEOB | UIC_MAL_TXEOB))
1662 && !(uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE))
1663 && !(uic_emac & UIC_ETHx) && !(uic_emac_b & UIC_ETHxB)) {
1664 /* not for us */
1665 return (rc);
1666 }
1667
1668 /* get and clear controller status interrupts */
1669 /* look at MAL and EMAC error interrupts */
1670 if (uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)) {
1671 /* we have a MAL error interrupt */
1672 mal_isr = mfdcr(MAL0_ESR);
1673 mal_err(dev, mal_isr, uic_mal_err,
1674 MAL_UIC_DEF, MAL_UIC_ERR);
1675
1676 /* clear MAL error interrupt status bits */
1677 mtdcr(UIC_BASE_MAL_ERR + UIC_SR,
1678 UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE);
1679
1680 return -1;
1681 }
1682
1683 /* look for EMAC errors */
1684 if ((uic_emac & UIC_ETHx) || (uic_emac_b & UIC_ETHxB)) {
1685 emac_isr = in_be32((void *)EMAC0_ISR + hw_p->hw_addr);
1686 emac_err(dev, emac_isr);
1687
1688 /* clear EMAC error interrupt status bits */
1689 mtdcr(UIC_BASE_EMAC + UIC_SR, UIC_ETHx);
1690 mtdcr(UIC_BASE_EMAC_B + UIC_SR, UIC_ETHxB);
1691
1692 return -1;
1693 }
1694
1695 /* handle MAX TX EOB interrupt from a tx */
1696 if (uic_mal & UIC_MAL_TXEOB) {
1697 /* clear MAL interrupt status bits */
1698 mal_eob = mfdcr(MAL0_TXEOBISR);
1699 mtdcr(MAL0_TXEOBISR, mal_eob);
1700 mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_TXEOB);
1701
1702 /* indicate that we serviced an interrupt */
1703 serviced = 1;
1704 rc = 0;
1705 }
1706
1707 /* handle MAL RX EOB interupt from a receive */
1708 /* check for EOB on valid channels */
1709 if (uic_mal & UIC_MAL_RXEOB) {
1710 mal_eob = mfdcr(MAL0_RXEOBISR);
1711 if (mal_eob &
1712 (0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL))) {
1713 /* push packet to upper layer */
1714 enet_rcv(dev, emac_isr);
1715
1716 /* clear MAL interrupt status bits */
1717 mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_RXEOB);
1718
1719 /* indicate that we serviced an interrupt */
1720 serviced = 1;
1721 rc = 0;
1722 }
1723 }
1724 #if defined(CONFIG_405EZ)
1725 /*
1726 * On 405EZ the RX-/TX-interrupts are coalesced into
1727 * one IRQ bit in the UIC. We need to acknowledge the
1728 * RX-/TX-interrupts in the SDR0_ICINTSTAT reg as well.
1729 */
1730 mtsdr(SDR0_ICINTSTAT,
1731 SDR_ICRX_STAT | SDR_ICTX0_STAT | SDR_ICTX1_STAT);
1732 #endif /* defined(CONFIG_405EZ) */
1733 } while (serviced);
1734
1735 return (rc);
1736 }
1737
1738 /*-----------------------------------------------------------------------------+
1739 * MAL Error Routine
1740 *-----------------------------------------------------------------------------*/
1741 static void mal_err (struct eth_device *dev, unsigned long isr,
1742 unsigned long uic, unsigned long maldef,
1743 unsigned long mal_errr)
1744 {
1745 EMAC_4XX_HW_PST hw_p = dev->priv;
1746
1747 mtdcr (MAL0_ESR, isr); /* clear interrupt */
1748
1749 /* clear DE interrupt */
1750 mtdcr (MAL0_TXDEIR, 0xC0000000);
1751 mtdcr (MAL0_RXDEIR, 0x80000000);
1752
1753 #ifdef INFO_4XX_ENET
1754 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
1755 #endif
1756
1757 eth_init (hw_p->bis); /* start again... */
1758 }
1759
1760 /*-----------------------------------------------------------------------------+
1761 * EMAC Error Routine
1762 *-----------------------------------------------------------------------------*/
1763 static void emac_err (struct eth_device *dev, unsigned long isr)
1764 {
1765 EMAC_4XX_HW_PST hw_p = dev->priv;
1766
1767 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1768 out_be32((void *)EMAC0_ISR + hw_p->hw_addr, isr);
1769 }
1770
1771 /*-----------------------------------------------------------------------------+
1772 * enet_rcv() handles the ethernet receive data
1773 *-----------------------------------------------------------------------------*/
1774 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1775 {
1776 struct enet_frame *ef_ptr;
1777 unsigned long data_len;
1778 unsigned long rx_eob_isr;
1779 EMAC_4XX_HW_PST hw_p = dev->priv;
1780
1781 int handled = 0;
1782 int i;
1783 int loop_count = 0;
1784
1785 rx_eob_isr = mfdcr (MAL0_RXEOBISR);
1786 if ((0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL)) & rx_eob_isr) {
1787 /* clear EOB */
1788 mtdcr (MAL0_RXEOBISR, rx_eob_isr);
1789
1790 /* EMAC RX done */
1791 while (1) { /* do all */
1792 i = hw_p->rx_slot;
1793
1794 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1795 || (loop_count >= NUM_RX_BUFF))
1796 break;
1797
1798 loop_count++;
1799 handled++;
1800 data_len = (unsigned long) hw_p->rx[i].data_len & 0x0fff; /* Get len */
1801 if (data_len) {
1802 if (data_len > ENET_MAX_MTU) /* Check len */
1803 data_len = 0;
1804 else {
1805 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1806 data_len = 0;
1807 hw_p->stats.rx_err_log[hw_p->
1808 rx_err_index]
1809 = hw_p->rx[i].ctrl;
1810 hw_p->rx_err_index++;
1811 if (hw_p->rx_err_index ==
1812 MAX_ERR_LOG)
1813 hw_p->rx_err_index =
1814 0;
1815 } /* emac_erros */
1816 } /* data_len < max mtu */
1817 } /* if data_len */
1818 if (!data_len) { /* no data */
1819 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1820
1821 hw_p->stats.data_len_err++; /* Error at Rx */
1822 }
1823
1824 /* !data_len */
1825 /* AS.HARNOIS */
1826 /* Check if user has already eaten buffer */
1827 /* if not => ERROR */
1828 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1829 if (hw_p->is_receiving)
1830 printf ("ERROR : Receive buffers are full!\n");
1831 break;
1832 } else {
1833 hw_p->stats.rx_frames++;
1834 hw_p->stats.rx += data_len;
1835 ef_ptr = (struct enet_frame *) hw_p->rx[i].
1836 data_ptr;
1837 #ifdef INFO_4XX_ENET
1838 hw_p->stats.pkts_rx++;
1839 #endif
1840 /* AS.HARNOIS
1841 * use ring buffer
1842 */
1843 hw_p->rx_ready[hw_p->rx_i_index] = i;
1844 hw_p->rx_i_index++;
1845 if (NUM_RX_BUFF == hw_p->rx_i_index)
1846 hw_p->rx_i_index = 0;
1847
1848 hw_p->rx_slot++;
1849 if (NUM_RX_BUFF == hw_p->rx_slot)
1850 hw_p->rx_slot = 0;
1851
1852 /* AS.HARNOIS
1853 * free receive buffer only when
1854 * buffer has been handled (eth_rx)
1855 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1856 */
1857 } /* if data_len */
1858 } /* while */
1859 } /* if EMACK_RXCHL */
1860 }
1861
1862
1863 static int ppc_4xx_eth_rx (struct eth_device *dev)
1864 {
1865 int length;
1866 int user_index;
1867 unsigned long msr;
1868 EMAC_4XX_HW_PST hw_p = dev->priv;
1869
1870 hw_p->is_receiving = 1; /* tell driver */
1871
1872 for (;;) {
1873 /* AS.HARNOIS
1874 * use ring buffer and
1875 * get index from rx buffer desciptor queue
1876 */
1877 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1878 if (user_index == -1) {
1879 length = -1;
1880 break; /* nothing received - leave for() loop */
1881 }
1882
1883 msr = mfmsr ();
1884 mtmsr (msr & ~(MSR_EE));
1885
1886 length = hw_p->rx[user_index].data_len & 0x0fff;
1887
1888 /* Pass the packet up to the protocol layers. */
1889 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
1890 /* NetReceive(NetRxPackets[i], length); */
1891 invalidate_dcache_range((u32)hw_p->rx[user_index].data_ptr,
1892 (u32)hw_p->rx[user_index].data_ptr +
1893 length - 4);
1894 NetReceive (NetRxPackets[user_index], length - 4);
1895 /* Free Recv Buffer */
1896 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1897 /* Free rx buffer descriptor queue */
1898 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1899 hw_p->rx_u_index++;
1900 if (NUM_RX_BUFF == hw_p->rx_u_index)
1901 hw_p->rx_u_index = 0;
1902
1903 #ifdef INFO_4XX_ENET
1904 hw_p->stats.pkts_handled++;
1905 #endif
1906
1907 mtmsr (msr); /* Enable IRQ's */
1908 }
1909
1910 hw_p->is_receiving = 0; /* tell driver */
1911
1912 return length;
1913 }
1914
1915 int ppc_4xx_eth_initialize (bd_t * bis)
1916 {
1917 static int virgin = 0;
1918 struct eth_device *dev;
1919 int eth_num = 0;
1920 EMAC_4XX_HW_PST hw = NULL;
1921 u8 ethaddr[4 + CONFIG_EMAC_NR_START][6];
1922 u32 hw_addr[4];
1923 u32 mal_ier;
1924
1925 #if defined(CONFIG_440GX)
1926 unsigned long pfc1;
1927
1928 mfsdr (SDR0_PFC1, pfc1);
1929 pfc1 &= ~(0x01e00000);
1930 pfc1 |= 0x01200000;
1931 mtsdr (SDR0_PFC1, pfc1);
1932 #endif
1933
1934 /* first clear all mac-addresses */
1935 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++)
1936 memcpy(ethaddr[eth_num], "\0\0\0\0\0\0", 6);
1937
1938 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1939 int ethaddr_idx = eth_num + CONFIG_EMAC_NR_START;
1940 switch (eth_num) {
1941 default: /* fall through */
1942 case 0:
1943 eth_getenv_enetaddr("ethaddr", ethaddr[ethaddr_idx]);
1944 hw_addr[eth_num] = 0x0;
1945 break;
1946 #ifdef CONFIG_HAS_ETH1
1947 case 1:
1948 eth_getenv_enetaddr("eth1addr", ethaddr[ethaddr_idx]);
1949 hw_addr[eth_num] = 0x100;
1950 break;
1951 #endif
1952 #ifdef CONFIG_HAS_ETH2
1953 case 2:
1954 eth_getenv_enetaddr("eth2addr", ethaddr[ethaddr_idx]);
1955 #if defined(CONFIG_460GT)
1956 hw_addr[eth_num] = 0x300;
1957 #else
1958 hw_addr[eth_num] = 0x400;
1959 #endif
1960 break;
1961 #endif
1962 #ifdef CONFIG_HAS_ETH3
1963 case 3:
1964 eth_getenv_enetaddr("eth3addr", ethaddr[ethaddr_idx]);
1965 #if defined(CONFIG_460GT)
1966 hw_addr[eth_num] = 0x400;
1967 #else
1968 hw_addr[eth_num] = 0x600;
1969 #endif
1970 break;
1971 #endif
1972 }
1973 }
1974
1975 /* set phy num and mode */
1976 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1977 bis->bi_phymode[0] = 0;
1978
1979 #if defined(CONFIG_PHY1_ADDR)
1980 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1981 bis->bi_phymode[1] = 0;
1982 #endif
1983 #if defined(CONFIG_440GX)
1984 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1985 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1986 bis->bi_phymode[2] = 2;
1987 bis->bi_phymode[3] = 2;
1988 #endif
1989
1990 #if defined(CONFIG_440GX) || \
1991 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1992 defined(CONFIG_405EX)
1993 ppc_4xx_eth_setup_bridge(0, bis);
1994 #endif
1995
1996 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1997 /*
1998 * See if we can actually bring up the interface,
1999 * otherwise, skip it
2000 */
2001 if (memcmp (ethaddr[eth_num], "\0\0\0\0\0\0", 6) == 0) {
2002 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
2003 continue;
2004 }
2005
2006 /* Allocate device structure */
2007 dev = (struct eth_device *) malloc (sizeof (*dev));
2008 if (dev == NULL) {
2009 printf ("ppc_4xx_eth_initialize: "
2010 "Cannot allocate eth_device %d\n", eth_num);
2011 return (-1);
2012 }
2013 memset(dev, 0, sizeof(*dev));
2014
2015 /* Allocate our private use data */
2016 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
2017 if (hw == NULL) {
2018 printf ("ppc_4xx_eth_initialize: "
2019 "Cannot allocate private hw data for eth_device %d",
2020 eth_num);
2021 free (dev);
2022 return (-1);
2023 }
2024 memset(hw, 0, sizeof(*hw));
2025
2026 hw->hw_addr = hw_addr[eth_num];
2027 memcpy (dev->enetaddr, ethaddr[eth_num], 6);
2028 hw->devnum = eth_num;
2029 hw->print_speed = 1;
2030
2031 sprintf (dev->name, "ppc_4xx_eth%d", eth_num - CONFIG_EMAC_NR_START);
2032 dev->priv = (void *) hw;
2033 dev->init = ppc_4xx_eth_init;
2034 dev->halt = ppc_4xx_eth_halt;
2035 dev->send = ppc_4xx_eth_send;
2036 dev->recv = ppc_4xx_eth_rx;
2037
2038 if (0 == virgin) {
2039 /* set the MAL IER ??? names may change with new spec ??? */
2040 #if defined(CONFIG_440SPE) || \
2041 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
2042 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
2043 defined(CONFIG_405EX)
2044 mal_ier =
2045 MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
2046 MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
2047 #else
2048 mal_ier =
2049 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
2050 MAL_IER_OPBE | MAL_IER_PLBE;
2051 #endif
2052 mtdcr (MAL0_ESR, 0xffffffff); /* clear pending interrupts */
2053 mtdcr (MAL0_TXDEIR, 0xffffffff); /* clear pending interrupts */
2054 mtdcr (MAL0_RXDEIR, 0xffffffff); /* clear pending interrupts */
2055 mtdcr (MAL0_IER, mal_ier);
2056
2057 /* install MAL interrupt handler */
2058 irq_install_handler (VECNUM_MAL_SERR,
2059 (interrupt_handler_t *) enetInt,
2060 dev);
2061 irq_install_handler (VECNUM_MAL_TXEOB,
2062 (interrupt_handler_t *) enetInt,
2063 dev);
2064 irq_install_handler (VECNUM_MAL_RXEOB,
2065 (interrupt_handler_t *) enetInt,
2066 dev);
2067 irq_install_handler (VECNUM_MAL_TXDE,
2068 (interrupt_handler_t *) enetInt,
2069 dev);
2070 irq_install_handler (VECNUM_MAL_RXDE,
2071 (interrupt_handler_t *) enetInt,
2072 dev);
2073 virgin = 1;
2074 }
2075
2076 eth_register (dev);
2077
2078 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
2079 miiphy_register (dev->name,
2080 emac4xx_miiphy_read, emac4xx_miiphy_write);
2081 #endif
2082 } /* end for each supported device */
2083
2084 return 0;
2085 }