]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/4xx_enet.c
drivers, block: remove sil680 driver
[people/ms/u-boot.git] / drivers / net / 4xx_enet.c
1 /*
2 * SPDX-License-Identifier: GPL-2.0 IBM-pibs
3 */
4 /*-----------------------------------------------------------------------------+
5 *
6 * File Name: enetemac.c
7 *
8 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
9 *
10 * Author: Mark Wisner
11 *
12 * Change Activity-
13 *
14 * Date Description of Change BY
15 * --------- --------------------- ---
16 * 05-May-99 Created MKW
17 * 27-Jun-99 Clean up JWB
18 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
19 * 29-Jul-99 Added Full duplex support MKW
20 * 06-Aug-99 Changed names for Mal CR reg MKW
21 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
22 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
23 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
24 * to avoid chaining maximum sized packets. Push starting
25 * RX descriptor address up to the next cache line boundary.
26 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
27 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
28 * EMAC0_RXM register. JWB
29 * 12-Mar-01 anne-sophie.harnois@nextream.fr
30 * - Variables are compatible with those already defined in
31 * include/net.h
32 * - Receive buffer descriptor ring is used to send buffers
33 * to the user
34 * - Info print about send/received/handled packet number if
35 * INFO_405_ENET is set
36 * 17-Apr-01 stefan.roese@esd-electronics.com
37 * - MAL reset in "eth_halt" included
38 * - Enet speed and duplex output now in one line
39 * 08-May-01 stefan.roese@esd-electronics.com
40 * - MAL error handling added (eth_init called again)
41 * 13-Nov-01 stefan.roese@esd-electronics.com
42 * - Set IST bit in EMAC0_MR1 reg upon 100MBit or full duplex
43 * 04-Jan-02 stefan.roese@esd-electronics.com
44 * - Wait for PHY auto negotiation to complete added
45 * 06-Feb-02 stefan.roese@esd-electronics.com
46 * - Bug fixed in waiting for auto negotiation to complete
47 * 26-Feb-02 stefan.roese@esd-electronics.com
48 * - rx and tx buffer descriptors now allocated (no fixed address
49 * used anymore)
50 * 17-Jun-02 stefan.roese@esd-electronics.com
51 * - MAL error debug printf 'M' removed (rx de interrupt may
52 * occur upon many incoming packets with only 4 rx buffers).
53 *-----------------------------------------------------------------------------*
54 * 17-Nov-03 travis.sawyer@sandburst.com
55 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
56 * in the 440GX. This port should work with the 440GP
57 * (2 EMACs) also
58 * 15-Aug-05 sr@denx.de
59 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
60 now handling all 4xx cpu's.
61 *-----------------------------------------------------------------------------*/
62
63 #include <config.h>
64 #include <common.h>
65 #include <net.h>
66 #include <asm/processor.h>
67 #include <asm/io.h>
68 #include <asm/cache.h>
69 #include <asm/mmu.h>
70 #include <asm/ppc4xx.h>
71 #include <asm/ppc4xx-emac.h>
72 #include <asm/ppc4xx-mal.h>
73 #include <miiphy.h>
74 #include <malloc.h>
75 #include <linux/compiler.h>
76
77 #if !(defined(CONFIG_MII) || defined(CONFIG_CMD_MII))
78 #error "CONFIG_MII has to be defined!"
79 #endif
80
81 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
82 #define PHY_AUTONEGOTIATE_TIMEOUT 5000 /* 5000 ms autonegotiate timeout */
83
84 /* Ethernet Transmit and Receive Buffers */
85 /* AS.HARNOIS
86 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
87 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
88 */
89 #define ENET_MAX_MTU PKTSIZE
90 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
91
92 /*-----------------------------------------------------------------------------+
93 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
94 * Interrupt Controller).
95 *-----------------------------------------------------------------------------*/
96 #define ETH_IRQ_NUM(dev) (VECNUM_ETH0 + ((dev) * VECNUM_ETH1_OFFS))
97
98 #if defined(CONFIG_HAS_ETH3)
99 #if !defined(CONFIG_440GX)
100 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
101 UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
102 #else
103 /* Unfortunately 440GX spreads EMAC interrupts on multiple UIC's */
104 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
105 #define UIC_ETHxB (UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
106 #endif /* !defined(CONFIG_440GX) */
107 #elif defined(CONFIG_HAS_ETH2)
108 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
109 UIC_MASK(ETH_IRQ_NUM(2)))
110 #elif defined(CONFIG_HAS_ETH1)
111 #define UIC_ETHx (UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
112 #else
113 #define UIC_ETHx UIC_MASK(ETH_IRQ_NUM(0))
114 #endif
115
116 /*
117 * Define a default version for UIC_ETHxB for non 440GX so that we can
118 * use common code for all 4xx variants
119 */
120 #if !defined(UIC_ETHxB)
121 #define UIC_ETHxB 0
122 #endif
123
124 #define UIC_MAL_SERR UIC_MASK(VECNUM_MAL_SERR)
125 #define UIC_MAL_TXDE UIC_MASK(VECNUM_MAL_TXDE)
126 #define UIC_MAL_RXDE UIC_MASK(VECNUM_MAL_RXDE)
127 #define UIC_MAL_TXEOB UIC_MASK(VECNUM_MAL_TXEOB)
128 #define UIC_MAL_RXEOB UIC_MASK(VECNUM_MAL_RXEOB)
129
130 #define MAL_UIC_ERR (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
131 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
132
133 /*
134 * We have 3 different interrupt types:
135 * - MAL interrupts indicating successful transfer
136 * - MAL error interrupts indicating MAL related errors
137 * - EMAC interrupts indicating EMAC related errors
138 *
139 * All those interrupts can be on different UIC's, but since
140 * now at least all interrupts from one type are on the same
141 * UIC. Only exception is 440GX where the EMAC interrupts are
142 * spread over two UIC's!
143 */
144 #if defined(CONFIG_440GX)
145 #define UIC_BASE_MAL UIC1_DCR_BASE
146 #define UIC_BASE_MAL_ERR UIC2_DCR_BASE
147 #define UIC_BASE_EMAC UIC2_DCR_BASE
148 #define UIC_BASE_EMAC_B UIC3_DCR_BASE
149 #else
150 #define UIC_BASE_MAL (UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_TXEOB) * 0x10))
151 #define UIC_BASE_MAL_ERR (UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_SERR) * 0x10))
152 #define UIC_BASE_EMAC (UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
153 #define UIC_BASE_EMAC_B (UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
154 #endif
155
156 #undef INFO_4XX_ENET
157
158 #define BI_PHYMODE_NONE 0
159 #define BI_PHYMODE_ZMII 1
160 #define BI_PHYMODE_RGMII 2
161 #define BI_PHYMODE_GMII 3
162 #define BI_PHYMODE_RTBI 4
163 #define BI_PHYMODE_TBI 5
164 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
165 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
166 defined(CONFIG_405EX)
167 #define BI_PHYMODE_SMII 6
168 #define BI_PHYMODE_MII 7
169 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
170 #define BI_PHYMODE_RMII 8
171 #endif
172 #endif
173 #define BI_PHYMODE_SGMII 9
174
175 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
176 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
177 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
178 defined(CONFIG_405EX)
179 #define SDR0_MFR_ETH_CLK_SEL_V(n) ((0x01<<27) / (n+1))
180 #endif
181
182 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
183 #define SDR0_ETH_CFG_CLK_SEL_V(n) (0x01 << (8 + n))
184 #endif
185
186 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
187 #define MAL_RX_CHAN_MUL 8 /* 460EX/GT uses MAL channel 8 for EMAC1 */
188 #else
189 #define MAL_RX_CHAN_MUL 1
190 #endif
191
192 /*--------------------------------------------------------------------+
193 * Fixed PHY (PHY-less) support for Ethernet Ports.
194 *--------------------------------------------------------------------*/
195
196 /*
197 * Some boards do not have a PHY for each ethernet port. These ports
198 * are known as Fixed PHY (or PHY-less) ports. For such ports, set
199 * the appropriate CONFIG_PHY_ADDR equal to CONFIG_FIXED_PHY and
200 * then define CONFIG_SYS_FIXED_PHY_PORTS to define what the speed and
201 * duplex should be for these ports in the board configuration
202 * file.
203 *
204 * For Example:
205 * #define CONFIG_FIXED_PHY 0xFFFFFFFF
206 *
207 * #define CONFIG_PHY_ADDR CONFIG_FIXED_PHY
208 * #define CONFIG_PHY1_ADDR 1
209 * #define CONFIG_PHY2_ADDR CONFIG_FIXED_PHY
210 * #define CONFIG_PHY3_ADDR 3
211 *
212 * #define CONFIG_SYS_FIXED_PHY_PORT(devnum,speed,duplex) \
213 * {devnum, speed, duplex},
214 *
215 * #define CONFIG_SYS_FIXED_PHY_PORTS \
216 * CONFIG_SYS_FIXED_PHY_PORT(0,1000,FULL) \
217 * CONFIG_SYS_FIXED_PHY_PORT(2,100,HALF)
218 */
219
220 #ifndef CONFIG_FIXED_PHY
221 #define CONFIG_FIXED_PHY 0xFFFFFFFF /* Fixed PHY (PHY-less) */
222 #endif
223
224 #ifndef CONFIG_SYS_FIXED_PHY_PORTS
225 #define CONFIG_SYS_FIXED_PHY_PORTS /* default is an empty array */
226 #endif
227
228 struct fixed_phy_port {
229 unsigned int devnum; /* ethernet port */
230 unsigned int speed; /* specified speed 10,100 or 1000 */
231 unsigned int duplex; /* specified duplex FULL or HALF */
232 };
233
234 static const struct fixed_phy_port fixed_phy_port[] = {
235 CONFIG_SYS_FIXED_PHY_PORTS /* defined in board configuration file */
236 };
237
238 /*-----------------------------------------------------------------------------+
239 * Global variables. TX and RX descriptors and buffers.
240 *-----------------------------------------------------------------------------*/
241
242 /*
243 * Get count of EMAC devices (doesn't have to be the max. possible number
244 * supported by the cpu)
245 *
246 * CONFIG_BOARD_EMAC_COUNT added so now a "dynamic" way to configure the
247 * EMAC count is possible. As it is needed for the Kilauea/Haleakala
248 * 405EX/405EXr eval board, using the same binary.
249 */
250 #if defined(CONFIG_BOARD_EMAC_COUNT)
251 #define LAST_EMAC_NUM board_emac_count()
252 #else /* CONFIG_BOARD_EMAC_COUNT */
253 #if defined(CONFIG_HAS_ETH3)
254 #define LAST_EMAC_NUM 4
255 #elif defined(CONFIG_HAS_ETH2)
256 #define LAST_EMAC_NUM 3
257 #elif defined(CONFIG_HAS_ETH1)
258 #define LAST_EMAC_NUM 2
259 #else
260 #define LAST_EMAC_NUM 1
261 #endif
262 #endif /* CONFIG_BOARD_EMAC_COUNT */
263
264 /* normal boards start with EMAC0 */
265 #if !defined(CONFIG_EMAC_NR_START)
266 #define CONFIG_EMAC_NR_START 0
267 #endif
268
269 #define MAL_RX_DESC_SIZE 2048
270 #define MAL_TX_DESC_SIZE 2048
271 #define MAL_ALLOC_SIZE (MAL_TX_DESC_SIZE + MAL_RX_DESC_SIZE)
272
273 /*-----------------------------------------------------------------------------+
274 * Prototypes and externals.
275 *-----------------------------------------------------------------------------*/
276 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
277
278 int enetInt (struct eth_device *dev);
279 static void mal_err (struct eth_device *dev, unsigned long isr,
280 unsigned long uic, unsigned long maldef,
281 unsigned long mal_errr);
282 static void emac_err (struct eth_device *dev, unsigned long isr);
283
284 extern int phy_setup_aneg (char *devname, unsigned char addr);
285 int emac4xx_miiphy_read(struct mii_dev *bus, int addr, int devad, int reg);
286 int emac4xx_miiphy_write(struct mii_dev *bus, int addr, int devad, int reg,
287 u16 value);
288
289 int board_emac_count(void);
290
291 static void emac_loopback_enable(EMAC_4XX_HW_PST hw_p)
292 {
293 #if defined(CONFIG_440SPE) || \
294 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
295 defined(CONFIG_405EX)
296 u32 val;
297
298 mfsdr(SDR0_MFR, val);
299 val |= SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
300 mtsdr(SDR0_MFR, val);
301 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
302 u32 val;
303
304 mfsdr(SDR0_ETH_CFG, val);
305 val |= SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
306 mtsdr(SDR0_ETH_CFG, val);
307 #endif
308 }
309
310 static void emac_loopback_disable(EMAC_4XX_HW_PST hw_p)
311 {
312 #if defined(CONFIG_440SPE) || \
313 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
314 defined(CONFIG_405EX)
315 u32 val;
316
317 mfsdr(SDR0_MFR, val);
318 val &= ~SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
319 mtsdr(SDR0_MFR, val);
320 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
321 u32 val;
322
323 mfsdr(SDR0_ETH_CFG, val);
324 val &= ~SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
325 mtsdr(SDR0_ETH_CFG, val);
326 #endif
327 }
328
329 /*-----------------------------------------------------------------------------+
330 | ppc_4xx_eth_halt
331 | Disable MAL channel, and EMACn
332 +-----------------------------------------------------------------------------*/
333 static void ppc_4xx_eth_halt (struct eth_device *dev)
334 {
335 EMAC_4XX_HW_PST hw_p = dev->priv;
336 u32 val = 10000;
337
338 out_be32((void *)EMAC0_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
339
340 /* 1st reset MAL channel */
341 /* Note: writing a 0 to a channel has no effect */
342 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
343 mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
344 #else
345 mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> hw_p->devnum));
346 #endif
347 mtdcr (MAL0_RXCARR, (MAL_CR_MMSR >> hw_p->devnum));
348
349 /* wait for reset */
350 while (mfdcr (MAL0_RXCASR) & (MAL_CR_MMSR >> hw_p->devnum)) {
351 udelay (1000); /* Delay 1 MS so as not to hammer the register */
352 val--;
353 if (val == 0)
354 break;
355 }
356
357 /* provide clocks for EMAC internal loopback */
358 emac_loopback_enable(hw_p);
359
360 /* EMAC RESET */
361 out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_SRST);
362
363 /* remove clocks for EMAC internal loopback */
364 emac_loopback_disable(hw_p);
365
366 #ifndef CONFIG_NETCONSOLE
367 hw_p->print_speed = 1; /* print speed message again next time */
368 #endif
369
370 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
371 /* don't bypass the TAHOE0/TAHOE1 cores for Linux */
372 mfsdr(SDR0_ETH_CFG, val);
373 val &= ~(SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
374 mtsdr(SDR0_ETH_CFG, val);
375 #endif
376
377 return;
378 }
379
380 #if defined (CONFIG_440GX)
381 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
382 {
383 unsigned long pfc1;
384 unsigned long zmiifer;
385 unsigned long rmiifer;
386
387 mfsdr(SDR0_PFC1, pfc1);
388 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
389
390 zmiifer = 0;
391 rmiifer = 0;
392
393 switch (pfc1) {
394 case 1:
395 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
396 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
397 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
398 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
399 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
400 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
401 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
402 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
403 break;
404 case 2:
405 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
406 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
407 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
408 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
409 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
410 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
411 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
412 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
413 break;
414 case 3:
415 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
416 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
417 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
418 bis->bi_phymode[1] = BI_PHYMODE_NONE;
419 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
420 bis->bi_phymode[3] = BI_PHYMODE_NONE;
421 break;
422 case 4:
423 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
424 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
425 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
426 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
427 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
428 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
429 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
430 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
431 break;
432 case 5:
433 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
434 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
435 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
436 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
437 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
438 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
439 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
440 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
441 break;
442 case 6:
443 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
444 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
445 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
446 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
447 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
448 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
449 break;
450 case 0:
451 default:
452 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
453 rmiifer = 0x0;
454 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
455 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
456 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
457 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
458 break;
459 }
460
461 /* Ensure we setup mdio for this devnum and ONLY this devnum */
462 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
463
464 out_be32((void *)ZMII0_FER, zmiifer);
465 out_be32((void *)RGMII_FER, rmiifer);
466
467 return ((int)pfc1);
468 }
469 #endif /* CONFIG_440_GX */
470
471 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
472 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
473 {
474 unsigned long zmiifer=0x0;
475 unsigned long pfc1;
476
477 mfsdr(SDR0_PFC1, pfc1);
478 pfc1 &= SDR0_PFC1_SELECT_MASK;
479
480 switch (pfc1) {
481 case SDR0_PFC1_SELECT_CONFIG_2:
482 /* 1 x GMII port */
483 out_be32((void *)ZMII0_FER, 0x00);
484 out_be32((void *)RGMII_FER, 0x00000037);
485 bis->bi_phymode[0] = BI_PHYMODE_GMII;
486 bis->bi_phymode[1] = BI_PHYMODE_NONE;
487 break;
488 case SDR0_PFC1_SELECT_CONFIG_4:
489 /* 2 x RGMII ports */
490 out_be32((void *)ZMII0_FER, 0x00);
491 out_be32((void *)RGMII_FER, 0x00000055);
492 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
493 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
494 break;
495 case SDR0_PFC1_SELECT_CONFIG_6:
496 /* 2 x SMII ports */
497 out_be32((void *)ZMII0_FER,
498 ((ZMII_FER_SMII) << ZMII_FER_V(0)) |
499 ((ZMII_FER_SMII) << ZMII_FER_V(1)));
500 out_be32((void *)RGMII_FER, 0x00000000);
501 bis->bi_phymode[0] = BI_PHYMODE_SMII;
502 bis->bi_phymode[1] = BI_PHYMODE_SMII;
503 break;
504 case SDR0_PFC1_SELECT_CONFIG_1_2:
505 /* only 1 x MII supported */
506 out_be32((void *)ZMII0_FER, (ZMII_FER_MII) << ZMII_FER_V(0));
507 out_be32((void *)RGMII_FER, 0x00000000);
508 bis->bi_phymode[0] = BI_PHYMODE_MII;
509 bis->bi_phymode[1] = BI_PHYMODE_NONE;
510 break;
511 default:
512 break;
513 }
514
515 /* Ensure we setup mdio for this devnum and ONLY this devnum */
516 zmiifer = in_be32((void *)ZMII0_FER);
517 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
518 out_be32((void *)ZMII0_FER, zmiifer);
519
520 return ((int)0x0);
521 }
522 #endif /* CONFIG_440EPX */
523
524 #if defined(CONFIG_405EX)
525 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
526 {
527 u32 rgmiifer = 0;
528
529 /*
530 * The 405EX(r)'s RGMII bridge can operate in one of several
531 * modes, only one of which (2 x RGMII) allows the
532 * simultaneous use of both EMACs on the 405EX.
533 */
534
535 switch (CONFIG_EMAC_PHY_MODE) {
536
537 case EMAC_PHY_MODE_NONE:
538 /* No ports */
539 rgmiifer |= RGMII_FER_DIS << 0;
540 rgmiifer |= RGMII_FER_DIS << 4;
541 out_be32((void *)RGMII_FER, rgmiifer);
542 bis->bi_phymode[0] = BI_PHYMODE_NONE;
543 bis->bi_phymode[1] = BI_PHYMODE_NONE;
544 break;
545 case EMAC_PHY_MODE_NONE_RGMII:
546 /* 1 x RGMII port on channel 0 */
547 rgmiifer |= RGMII_FER_RGMII << 0;
548 rgmiifer |= RGMII_FER_DIS << 4;
549 out_be32((void *)RGMII_FER, rgmiifer);
550 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
551 bis->bi_phymode[1] = BI_PHYMODE_NONE;
552 break;
553 case EMAC_PHY_MODE_RGMII_NONE:
554 /* 1 x RGMII port on channel 1 */
555 rgmiifer |= RGMII_FER_DIS << 0;
556 rgmiifer |= RGMII_FER_RGMII << 4;
557 out_be32((void *)RGMII_FER, rgmiifer);
558 bis->bi_phymode[0] = BI_PHYMODE_NONE;
559 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
560 break;
561 case EMAC_PHY_MODE_RGMII_RGMII:
562 /* 2 x RGMII ports */
563 rgmiifer |= RGMII_FER_RGMII << 0;
564 rgmiifer |= RGMII_FER_RGMII << 4;
565 out_be32((void *)RGMII_FER, rgmiifer);
566 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
567 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
568 break;
569 case EMAC_PHY_MODE_NONE_GMII:
570 /* 1 x GMII port on channel 0 */
571 rgmiifer |= RGMII_FER_GMII << 0;
572 rgmiifer |= RGMII_FER_DIS << 4;
573 out_be32((void *)RGMII_FER, rgmiifer);
574 bis->bi_phymode[0] = BI_PHYMODE_GMII;
575 bis->bi_phymode[1] = BI_PHYMODE_NONE;
576 break;
577 case EMAC_PHY_MODE_NONE_MII:
578 /* 1 x MII port on channel 0 */
579 rgmiifer |= RGMII_FER_MII << 0;
580 rgmiifer |= RGMII_FER_DIS << 4;
581 out_be32((void *)RGMII_FER, rgmiifer);
582 bis->bi_phymode[0] = BI_PHYMODE_MII;
583 bis->bi_phymode[1] = BI_PHYMODE_NONE;
584 break;
585 case EMAC_PHY_MODE_GMII_NONE:
586 /* 1 x GMII port on channel 1 */
587 rgmiifer |= RGMII_FER_DIS << 0;
588 rgmiifer |= RGMII_FER_GMII << 4;
589 out_be32((void *)RGMII_FER, rgmiifer);
590 bis->bi_phymode[0] = BI_PHYMODE_NONE;
591 bis->bi_phymode[1] = BI_PHYMODE_GMII;
592 break;
593 case EMAC_PHY_MODE_MII_NONE:
594 /* 1 x MII port on channel 1 */
595 rgmiifer |= RGMII_FER_DIS << 0;
596 rgmiifer |= RGMII_FER_MII << 4;
597 out_be32((void *)RGMII_FER, rgmiifer);
598 bis->bi_phymode[0] = BI_PHYMODE_NONE;
599 bis->bi_phymode[1] = BI_PHYMODE_MII;
600 break;
601 default:
602 break;
603 }
604
605 /* Ensure we setup mdio for this devnum and ONLY this devnum */
606 rgmiifer = in_be32((void *)RGMII_FER);
607 rgmiifer |= (1 << (19-devnum));
608 out_be32((void *)RGMII_FER, rgmiifer);
609
610 return ((int)0x0);
611 }
612 #endif /* CONFIG_405EX */
613
614 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
615 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
616 {
617 u32 eth_cfg;
618 u32 zmiifer; /* ZMII0_FER reg. */
619 u32 rmiifer; /* RGMII0_FER reg. Bridge 0 */
620 u32 rmiifer1; /* RGMII0_FER reg. Bridge 1 */
621 int mode;
622
623 zmiifer = 0;
624 rmiifer = 0;
625 rmiifer1 = 0;
626
627 #if defined(CONFIG_460EX)
628 mode = 9;
629 mfsdr(SDR0_ETH_CFG, eth_cfg);
630 if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
631 ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0))
632 mode = 11; /* config SGMII */
633 #else
634 mode = 10;
635 mfsdr(SDR0_ETH_CFG, eth_cfg);
636 if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
637 ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0) &&
638 ((eth_cfg & SDR0_ETH_CFG_SGMII2_ENABLE) > 0))
639 mode = 12; /* config SGMII */
640 #endif
641
642 /* TODO:
643 * NOTE: 460GT has 2 RGMII bridge cores:
644 * emac0 ------ RGMII0_BASE
645 * |
646 * emac1 -----+
647 *
648 * emac2 ------ RGMII1_BASE
649 * |
650 * emac3 -----+
651 *
652 * 460EX has 1 RGMII bridge core:
653 * and RGMII1_BASE is disabled
654 * emac0 ------ RGMII0_BASE
655 * |
656 * emac1 -----+
657 */
658
659 /*
660 * Right now only 2*RGMII is supported. Please extend when needed.
661 * sr - 2008-02-19
662 * Add SGMII support.
663 * vg - 2008-07-28
664 */
665 switch (mode) {
666 case 1:
667 /* 1 MII - 460EX */
668 /* GMC0 EMAC4_0, ZMII Bridge */
669 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
670 bis->bi_phymode[0] = BI_PHYMODE_MII;
671 bis->bi_phymode[1] = BI_PHYMODE_NONE;
672 bis->bi_phymode[2] = BI_PHYMODE_NONE;
673 bis->bi_phymode[3] = BI_PHYMODE_NONE;
674 break;
675 case 2:
676 /* 2 MII - 460GT */
677 /* GMC0 EMAC4_0, GMC1 EMAC4_2, ZMII Bridge */
678 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
679 zmiifer |= ZMII_FER_MII << ZMII_FER_V(2);
680 bis->bi_phymode[0] = BI_PHYMODE_MII;
681 bis->bi_phymode[1] = BI_PHYMODE_NONE;
682 bis->bi_phymode[2] = BI_PHYMODE_MII;
683 bis->bi_phymode[3] = BI_PHYMODE_NONE;
684 break;
685 case 3:
686 /* 2 RMII - 460EX */
687 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
688 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
689 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
690 bis->bi_phymode[0] = BI_PHYMODE_RMII;
691 bis->bi_phymode[1] = BI_PHYMODE_RMII;
692 bis->bi_phymode[2] = BI_PHYMODE_NONE;
693 bis->bi_phymode[3] = BI_PHYMODE_NONE;
694 break;
695 case 4:
696 /* 4 RMII - 460GT */
697 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC1 EMAC4_2, GMC1, EMAC4_3 */
698 /* ZMII Bridge */
699 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
700 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
701 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
702 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
703 bis->bi_phymode[0] = BI_PHYMODE_RMII;
704 bis->bi_phymode[1] = BI_PHYMODE_RMII;
705 bis->bi_phymode[2] = BI_PHYMODE_RMII;
706 bis->bi_phymode[3] = BI_PHYMODE_RMII;
707 break;
708 case 5:
709 /* 2 SMII - 460EX */
710 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
711 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
712 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
713 bis->bi_phymode[0] = BI_PHYMODE_SMII;
714 bis->bi_phymode[1] = BI_PHYMODE_SMII;
715 bis->bi_phymode[2] = BI_PHYMODE_NONE;
716 bis->bi_phymode[3] = BI_PHYMODE_NONE;
717 break;
718 case 6:
719 /* 4 SMII - 460GT */
720 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC0 EMAC4_3, GMC0 EMAC4_3 */
721 /* ZMII Bridge */
722 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
723 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
724 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
725 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
726 bis->bi_phymode[0] = BI_PHYMODE_SMII;
727 bis->bi_phymode[1] = BI_PHYMODE_SMII;
728 bis->bi_phymode[2] = BI_PHYMODE_SMII;
729 bis->bi_phymode[3] = BI_PHYMODE_SMII;
730 break;
731 case 7:
732 /* This is the default mode that we want for board bringup - Maple */
733 /* 1 GMII - 460EX */
734 /* GMC0 EMAC4_0, RGMII Bridge 0 */
735 rmiifer |= RGMII_FER_MDIO(0);
736
737 if (devnum == 0) {
738 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
739 bis->bi_phymode[0] = BI_PHYMODE_GMII;
740 bis->bi_phymode[1] = BI_PHYMODE_NONE;
741 bis->bi_phymode[2] = BI_PHYMODE_NONE;
742 bis->bi_phymode[3] = BI_PHYMODE_NONE;
743 } else {
744 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(3); /* CH1CFG - EMAC1 */
745 bis->bi_phymode[0] = BI_PHYMODE_NONE;
746 bis->bi_phymode[1] = BI_PHYMODE_GMII;
747 bis->bi_phymode[2] = BI_PHYMODE_NONE;
748 bis->bi_phymode[3] = BI_PHYMODE_NONE;
749 }
750 break;
751 case 8:
752 /* 2 GMII - 460GT */
753 /* GMC0 EMAC4_0, RGMII Bridge 0 */
754 /* GMC1 EMAC4_2, RGMII Bridge 1 */
755 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
756 rmiifer1 |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC2 */
757 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
758 rmiifer1 |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC2 */
759
760 bis->bi_phymode[0] = BI_PHYMODE_GMII;
761 bis->bi_phymode[1] = BI_PHYMODE_NONE;
762 bis->bi_phymode[2] = BI_PHYMODE_GMII;
763 bis->bi_phymode[3] = BI_PHYMODE_NONE;
764 break;
765 case 9:
766 /* 2 RGMII - 460EX */
767 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
768 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
769 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
770 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
771
772 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
773 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
774 bis->bi_phymode[2] = BI_PHYMODE_NONE;
775 bis->bi_phymode[3] = BI_PHYMODE_NONE;
776 break;
777 case 10:
778 /* 4 RGMII - 460GT */
779 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
780 /* GMC1 EMAC4_2, GMC1 EMAC4_3, RGMII Bridge 1 */
781 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
782 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
783 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(2);
784 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(3);
785 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
786 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
787 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
788 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
789 break;
790 case 11:
791 /* 2 SGMII - 460EX */
792 bis->bi_phymode[0] = BI_PHYMODE_SGMII;
793 bis->bi_phymode[1] = BI_PHYMODE_SGMII;
794 bis->bi_phymode[2] = BI_PHYMODE_NONE;
795 bis->bi_phymode[3] = BI_PHYMODE_NONE;
796 break;
797 case 12:
798 /* 3 SGMII - 460GT */
799 bis->bi_phymode[0] = BI_PHYMODE_SGMII;
800 bis->bi_phymode[1] = BI_PHYMODE_SGMII;
801 bis->bi_phymode[2] = BI_PHYMODE_SGMII;
802 bis->bi_phymode[3] = BI_PHYMODE_NONE;
803 break;
804 default:
805 break;
806 }
807
808 /* Set EMAC for MDIO */
809 mfsdr(SDR0_ETH_CFG, eth_cfg);
810 eth_cfg |= SDR0_ETH_CFG_MDIO_SEL_EMAC0;
811 mtsdr(SDR0_ETH_CFG, eth_cfg);
812
813 out_be32((void *)RGMII_FER, rmiifer);
814 #if defined(CONFIG_460GT)
815 out_be32((void *)RGMII_FER + RGMII1_BASE_OFFSET, rmiifer1);
816 #endif
817
818 /* bypass the TAHOE0/TAHOE1 cores for U-Boot */
819 mfsdr(SDR0_ETH_CFG, eth_cfg);
820 eth_cfg |= (SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
821 mtsdr(SDR0_ETH_CFG, eth_cfg);
822
823 return 0;
824 }
825 #endif /* CONFIG_460EX || CONFIG_460GT */
826
827 static inline void *malloc_aligned(u32 size, u32 align)
828 {
829 return (void *)(((u32)malloc(size + align) + align - 1) &
830 ~(align - 1));
831 }
832
833 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
834 {
835 int i;
836 unsigned long reg = 0;
837 unsigned long msr;
838 unsigned long speed;
839 unsigned long duplex;
840 unsigned long failsafe;
841 unsigned mode_reg;
842 unsigned short devnum;
843 unsigned short reg_short;
844 #if defined(CONFIG_440GX) || \
845 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
846 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
847 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
848 defined(CONFIG_405EX)
849 u32 opbfreq;
850 sys_info_t sysinfo;
851 #if defined(CONFIG_440GX) || \
852 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
853 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
854 defined(CONFIG_405EX)
855 __maybe_unused int ethgroup = -1;
856 #endif
857 #endif
858 u32 bd_cached;
859 u32 bd_uncached = 0;
860 #ifdef CONFIG_4xx_DCACHE
861 static u32 last_used_ea = 0;
862 #endif
863 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
864 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
865 defined(CONFIG_405EX)
866 int rgmii_channel;
867 #endif
868
869 EMAC_4XX_HW_PST hw_p = dev->priv;
870
871 /* before doing anything, figure out if we have a MAC address */
872 /* if not, bail */
873 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
874 printf("ERROR: ethaddr not set!\n");
875 return -1;
876 }
877
878 #if defined(CONFIG_440GX) || \
879 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
880 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
881 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
882 defined(CONFIG_405EX)
883 /* Need to get the OPB frequency so we can access the PHY */
884 get_sys_info (&sysinfo);
885 #endif
886
887 msr = mfmsr ();
888 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
889
890 devnum = hw_p->devnum;
891
892 #ifdef INFO_4XX_ENET
893 /* AS.HARNOIS
894 * We should have :
895 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
896 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
897 * is possible that new packets (without relationship with
898 * current transfer) have got the time to arrived before
899 * netloop calls eth_halt
900 */
901 printf ("About preceding transfer (eth%d):\n"
902 "- Sent packet number %d\n"
903 "- Received packet number %d\n"
904 "- Handled packet number %d\n",
905 hw_p->devnum,
906 hw_p->stats.pkts_tx,
907 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
908
909 hw_p->stats.pkts_tx = 0;
910 hw_p->stats.pkts_rx = 0;
911 hw_p->stats.pkts_handled = 0;
912 hw_p->print_speed = 1; /* print speed message again next time */
913 #endif
914
915 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
916 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
917
918 hw_p->rx_slot = 0; /* MAL Receive Slot */
919 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
920 hw_p->rx_u_index = 0; /* Receive User Queue Index */
921
922 hw_p->tx_slot = 0; /* MAL Transmit Slot */
923 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
924 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
925
926 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
927 /* set RMII mode */
928 /* NOTE: 440GX spec states that mode is mutually exclusive */
929 /* NOTE: Therefore, disable all other EMACS, since we handle */
930 /* NOTE: only one emac at a time */
931 reg = 0;
932 out_be32((void *)ZMII0_FER, 0);
933 udelay (100);
934
935 #if defined(CONFIG_440GP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
936 out_be32((void *)ZMII0_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
937 #elif defined(CONFIG_440GX) || \
938 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
939 defined(CONFIG_460EX) || defined(CONFIG_460GT)
940 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
941 #endif
942
943 out_be32((void *)ZMII0_SSR, ZMII0_SSR_SP << ZMII0_SSR_V(devnum));
944 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
945 #if defined(CONFIG_405EX)
946 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
947 #endif
948
949 sync();
950
951 /* provide clocks for EMAC internal loopback */
952 emac_loopback_enable(hw_p);
953
954 /* EMAC RESET */
955 out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_SRST);
956
957 /* remove clocks for EMAC internal loopback */
958 emac_loopback_disable(hw_p);
959
960 failsafe = 1000;
961 while ((in_be32((void *)EMAC0_MR0 + hw_p->hw_addr) & (EMAC_MR0_SRST)) && failsafe) {
962 udelay (1000);
963 failsafe--;
964 }
965 if (failsafe <= 0)
966 printf("\nProblem resetting EMAC!\n");
967
968 #if defined(CONFIG_440GX) || \
969 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
970 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
971 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
972 defined(CONFIG_405EX)
973 /* Whack the M1 register */
974 mode_reg = 0x0;
975 mode_reg &= ~0x00000038;
976 opbfreq = sysinfo.freqOPB / 1000000;
977 if (opbfreq <= 50);
978 else if (opbfreq <= 66)
979 mode_reg |= EMAC_MR1_OBCI_66;
980 else if (opbfreq <= 83)
981 mode_reg |= EMAC_MR1_OBCI_83;
982 else if (opbfreq <= 100)
983 mode_reg |= EMAC_MR1_OBCI_100;
984 else
985 mode_reg |= EMAC_MR1_OBCI_GT100;
986
987 out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
988 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
989
990 #if defined(CONFIG_GPCS_PHY_ADDR) || defined(CONFIG_GPCS_PHY1_ADDR) || \
991 defined(CONFIG_GPCS_PHY2_ADDR) || defined(CONFIG_GPCS_PHY3_ADDR)
992 if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
993 /*
994 * In SGMII mode, GPCS access is needed for
995 * communication with the internal SGMII SerDes.
996 */
997 switch (devnum) {
998 #if defined(CONFIG_GPCS_PHY_ADDR)
999 case 0:
1000 reg = CONFIG_GPCS_PHY_ADDR;
1001 break;
1002 #endif
1003 #if defined(CONFIG_GPCS_PHY1_ADDR)
1004 case 1:
1005 reg = CONFIG_GPCS_PHY1_ADDR;
1006 break;
1007 #endif
1008 #if defined(CONFIG_GPCS_PHY2_ADDR)
1009 case 2:
1010 reg = CONFIG_GPCS_PHY2_ADDR;
1011 break;
1012 #endif
1013 #if defined(CONFIG_GPCS_PHY3_ADDR)
1014 case 3:
1015 reg = CONFIG_GPCS_PHY3_ADDR;
1016 break;
1017 #endif
1018 }
1019
1020 mode_reg = in_be32((void *)EMAC0_MR1 + hw_p->hw_addr);
1021 mode_reg |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_IPPA_SET(reg);
1022 out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
1023
1024 /* Configure GPCS interface to recommended setting for SGMII */
1025 miiphy_reset(dev->name, reg);
1026 miiphy_write(dev->name, reg, 0x04, 0x8120); /* AsymPause, FDX */
1027 miiphy_write(dev->name, reg, 0x07, 0x2801); /* msg_pg, toggle */
1028 miiphy_write(dev->name, reg, 0x00, 0x0140); /* 1Gbps, FDX */
1029 }
1030 #endif /* defined(CONFIG_GPCS_PHY_ADDR) */
1031
1032 /* wait for PHY to complete auto negotiation */
1033 reg_short = 0;
1034 switch (devnum) {
1035 case 0:
1036 reg = CONFIG_PHY_ADDR;
1037 break;
1038 #if defined (CONFIG_PHY1_ADDR)
1039 case 1:
1040 reg = CONFIG_PHY1_ADDR;
1041 break;
1042 #endif
1043 #if defined (CONFIG_PHY2_ADDR)
1044 case 2:
1045 reg = CONFIG_PHY2_ADDR;
1046 break;
1047 #endif
1048 #if defined (CONFIG_PHY3_ADDR)
1049 case 3:
1050 reg = CONFIG_PHY3_ADDR;
1051 break;
1052 #endif
1053 default:
1054 reg = CONFIG_PHY_ADDR;
1055 break;
1056 }
1057
1058 bis->bi_phynum[devnum] = reg;
1059
1060 if (reg == CONFIG_FIXED_PHY)
1061 goto get_speed;
1062
1063 #if defined(CONFIG_PHY_RESET)
1064 /*
1065 * Reset the phy, only if its the first time through
1066 * otherwise, just check the speeds & feeds
1067 */
1068 if (hw_p->first_init == 0) {
1069 #if defined(CONFIG_M88E1111_PHY)
1070 miiphy_write (dev->name, reg, 0x14, 0x0ce3);
1071 miiphy_write (dev->name, reg, 0x18, 0x4101);
1072 miiphy_write (dev->name, reg, 0x09, 0x0e00);
1073 miiphy_write (dev->name, reg, 0x04, 0x01e1);
1074 #if defined(CONFIG_M88E1111_DISABLE_FIBER)
1075 miiphy_read(dev->name, reg, 0x1b, &reg_short);
1076 reg_short |= 0x8000;
1077 miiphy_write(dev->name, reg, 0x1b, reg_short);
1078 #endif
1079 #endif
1080 #if defined(CONFIG_M88E1112_PHY)
1081 if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
1082 /*
1083 * Marvell 88E1112 PHY needs to have the SGMII MAC
1084 * interace (page 2) properly configured to
1085 * communicate with the 460EX/GT GPCS interface.
1086 */
1087
1088 /* Set access to Page 2 */
1089 miiphy_write(dev->name, reg, 0x16, 0x0002);
1090
1091 miiphy_write(dev->name, reg, 0x00, 0x0040); /* 1Gbps */
1092 miiphy_read(dev->name, reg, 0x1a, &reg_short);
1093 reg_short |= 0x8000; /* bypass Auto-Negotiation */
1094 miiphy_write(dev->name, reg, 0x1a, reg_short);
1095 miiphy_reset(dev->name, reg); /* reset MAC interface */
1096
1097 /* Reset access to Page 0 */
1098 miiphy_write(dev->name, reg, 0x16, 0x0000);
1099 }
1100 #endif /* defined(CONFIG_M88E1112_PHY) */
1101 miiphy_reset (dev->name, reg);
1102
1103 #if defined(CONFIG_440GX) || \
1104 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1105 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1106 defined(CONFIG_405EX)
1107
1108 #if defined(CONFIG_CIS8201_PHY)
1109 /*
1110 * Cicada 8201 PHY needs to have an extended register whacked
1111 * for RGMII mode.
1112 */
1113 if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {
1114 #if defined(CONFIG_CIS8201_SHORT_ETCH)
1115 miiphy_write (dev->name, reg, 23, 0x1300);
1116 #else
1117 miiphy_write (dev->name, reg, 23, 0x1000);
1118 #endif
1119 /*
1120 * Vitesse VSC8201/Cicada CIS8201 errata:
1121 * Interoperability problem with Intel 82547EI phys
1122 * This work around (provided by Vitesse) changes
1123 * the default timer convergence from 8ms to 12ms
1124 */
1125 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1126 miiphy_write (dev->name, reg, 0x08, 0x0200);
1127 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
1128 miiphy_write (dev->name, reg, 0x02, 0x0004);
1129 miiphy_write (dev->name, reg, 0x01, 0x0671);
1130 miiphy_write (dev->name, reg, 0x00, 0x8fae);
1131 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1132 miiphy_write (dev->name, reg, 0x08, 0x0000);
1133 miiphy_write (dev->name, reg, 0x1f, 0x0000);
1134 /* end Vitesse/Cicada errata */
1135 }
1136 #endif /* defined(CONFIG_CIS8201_PHY) */
1137
1138 #if defined(CONFIG_ET1011C_PHY)
1139 /*
1140 * Agere ET1011c PHY needs to have an extended register whacked
1141 * for RGMII mode.
1142 */
1143 if (((devnum == 2) || (devnum ==3)) && (4 == ethgroup)) {
1144 miiphy_read (dev->name, reg, 0x16, &reg_short);
1145 reg_short &= ~(0x7);
1146 reg_short |= 0x6; /* RGMII DLL Delay*/
1147 miiphy_write (dev->name, reg, 0x16, reg_short);
1148
1149 miiphy_read (dev->name, reg, 0x17, &reg_short);
1150 reg_short &= ~(0x40);
1151 miiphy_write (dev->name, reg, 0x17, reg_short);
1152
1153 miiphy_write(dev->name, reg, 0x1c, 0x74f0);
1154 }
1155 #endif /* defined(CONFIG_ET1011C_PHY) */
1156
1157 #endif /* defined(CONFIG_440GX) ... */
1158 /* Start/Restart autonegotiation */
1159 phy_setup_aneg (dev->name, reg);
1160 udelay (1000);
1161 }
1162 #endif /* defined(CONFIG_PHY_RESET) */
1163
1164 miiphy_read (dev->name, reg, MII_BMSR, &reg_short);
1165
1166 /*
1167 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
1168 */
1169 if ((reg_short & BMSR_ANEGCAPABLE)
1170 && !(reg_short & BMSR_ANEGCOMPLETE)) {
1171 puts ("Waiting for PHY auto negotiation to complete");
1172 i = 0;
1173 while (!(reg_short & BMSR_ANEGCOMPLETE)) {
1174 /*
1175 * Timeout reached ?
1176 */
1177 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
1178 puts (" TIMEOUT !\n");
1179 break;
1180 }
1181
1182 if ((i++ % 1000) == 0) {
1183 putc ('.');
1184 }
1185 udelay (1000); /* 1 ms */
1186 miiphy_read (dev->name, reg, MII_BMSR, &reg_short);
1187 }
1188 puts (" done\n");
1189 udelay (500000); /* another 500 ms (results in faster booting) */
1190 }
1191
1192 get_speed:
1193 if (reg == CONFIG_FIXED_PHY) {
1194 for (i = 0; i < ARRAY_SIZE(fixed_phy_port); i++) {
1195 if (devnum == fixed_phy_port[i].devnum) {
1196 speed = fixed_phy_port[i].speed;
1197 duplex = fixed_phy_port[i].duplex;
1198 break;
1199 }
1200 }
1201
1202 if (i == ARRAY_SIZE(fixed_phy_port)) {
1203 printf("ERROR: PHY (%s) not configured correctly!\n",
1204 dev->name);
1205 return -1;
1206 }
1207 } else {
1208 speed = miiphy_speed(dev->name, reg);
1209 duplex = miiphy_duplex(dev->name, reg);
1210 }
1211
1212 if (hw_p->print_speed) {
1213 hw_p->print_speed = 0;
1214 printf ("ENET Speed is %d Mbps - %s duplex connection (EMAC%d)\n",
1215 (int) speed, (duplex == HALF) ? "HALF" : "FULL",
1216 hw_p->devnum);
1217 }
1218
1219 #if defined(CONFIG_440) && \
1220 !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \
1221 !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX) && \
1222 !defined(CONFIG_460EX) && !defined(CONFIG_460GT)
1223 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
1224 mfsdr(SDR0_MFR, reg);
1225 if (speed == 100) {
1226 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
1227 } else {
1228 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
1229 }
1230 mtsdr(SDR0_MFR, reg);
1231 #endif
1232
1233 /* Set ZMII/RGMII speed according to the phy link speed */
1234 reg = in_be32((void *)ZMII0_SSR);
1235 if ( (speed == 100) || (speed == 1000) )
1236 out_be32((void *)ZMII0_SSR, reg | (ZMII0_SSR_SP << ZMII0_SSR_V (devnum)));
1237 else
1238 out_be32((void *)ZMII0_SSR, reg & (~(ZMII0_SSR_SP << ZMII0_SSR_V (devnum))));
1239
1240 if ((devnum == 2) || (devnum == 3)) {
1241 if (speed == 1000)
1242 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
1243 else if (speed == 100)
1244 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
1245 else if (speed == 10)
1246 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
1247 else {
1248 printf("Error in RGMII Speed\n");
1249 return -1;
1250 }
1251 out_be32((void *)RGMII_SSR, reg);
1252 }
1253 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
1254
1255 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1256 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1257 defined(CONFIG_405EX)
1258 if (devnum >= 2)
1259 rgmii_channel = devnum - 2;
1260 else
1261 rgmii_channel = devnum;
1262
1263 if (speed == 1000)
1264 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V(rgmii_channel));
1265 else if (speed == 100)
1266 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V(rgmii_channel));
1267 else if (speed == 10)
1268 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V(rgmii_channel));
1269 else {
1270 printf("Error in RGMII Speed\n");
1271 return -1;
1272 }
1273 out_be32((void *)RGMII_SSR, reg);
1274 #if defined(CONFIG_460GT)
1275 if ((devnum == 2) || (devnum == 3))
1276 out_be32((void *)RGMII_SSR + RGMII1_BASE_OFFSET, reg);
1277 #endif
1278 #endif
1279
1280 /* set the Mal configuration reg */
1281 #if defined(CONFIG_440GX) || \
1282 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1283 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
1284 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1285 defined(CONFIG_405EX)
1286 mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
1287 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
1288 #else
1289 mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
1290 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
1291 if (get_pvr() == PVR_440GP_RB) {
1292 mtdcr (MAL0_CFG, mfdcr(MAL0_CFG) & ~MAL_CR_PLBB);
1293 }
1294 #endif
1295
1296 /*
1297 * Malloc MAL buffer desciptors, make sure they are
1298 * aligned on cache line boundary size
1299 * (401/403/IOP480 = 16, 405 = 32)
1300 * and doesn't cross cache block boundaries.
1301 */
1302 if (hw_p->first_init == 0) {
1303 debug("*** Allocating descriptor memory ***\n");
1304
1305 bd_cached = (u32)malloc_aligned(MAL_ALLOC_SIZE, 4096);
1306 if (!bd_cached) {
1307 printf("%s: Error allocating MAL descriptor buffers!\n", __func__);
1308 return -1;
1309 }
1310
1311 #ifdef CONFIG_4xx_DCACHE
1312 flush_dcache_range(bd_cached, bd_cached + MAL_ALLOC_SIZE);
1313 if (!last_used_ea)
1314 #if defined(CONFIG_SYS_MEM_TOP_HIDE)
1315 bd_uncached = bis->bi_memsize + CONFIG_SYS_MEM_TOP_HIDE;
1316 #else
1317 bd_uncached = bis->bi_memsize;
1318 #endif
1319 else
1320 bd_uncached = last_used_ea + MAL_ALLOC_SIZE;
1321
1322 last_used_ea = bd_uncached;
1323 program_tlb(bd_cached, bd_uncached, MAL_ALLOC_SIZE,
1324 TLB_WORD2_I_ENABLE);
1325 #else
1326 bd_uncached = bd_cached;
1327 #endif
1328 hw_p->tx_phys = bd_cached;
1329 hw_p->rx_phys = bd_cached + MAL_TX_DESC_SIZE;
1330 hw_p->tx = (mal_desc_t *)(bd_uncached);
1331 hw_p->rx = (mal_desc_t *)(bd_uncached + MAL_TX_DESC_SIZE);
1332 debug("hw_p->tx=%p, hw_p->rx=%p\n", hw_p->tx, hw_p->rx);
1333 }
1334
1335 for (i = 0; i < NUM_TX_BUFF; i++) {
1336 hw_p->tx[i].ctrl = 0;
1337 hw_p->tx[i].data_len = 0;
1338 if (hw_p->first_init == 0)
1339 hw_p->txbuf_ptr = malloc_aligned(MAL_ALLOC_SIZE,
1340 L1_CACHE_BYTES);
1341 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
1342 if ((NUM_TX_BUFF - 1) == i)
1343 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
1344 hw_p->tx_run[i] = -1;
1345 debug("TX_BUFF %d @ 0x%08x\n", i, (u32)hw_p->tx[i].data_ptr);
1346 }
1347
1348 for (i = 0; i < NUM_RX_BUFF; i++) {
1349 hw_p->rx[i].ctrl = 0;
1350 hw_p->rx[i].data_len = 0;
1351 hw_p->rx[i].data_ptr = (char *)net_rx_packets[i];
1352 if ((NUM_RX_BUFF - 1) == i)
1353 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
1354 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
1355 hw_p->rx_ready[i] = -1;
1356 debug("RX_BUFF %d @ 0x%08x\n", i, (u32)hw_p->rx[i].data_ptr);
1357 }
1358
1359 reg = 0x00000000;
1360
1361 reg |= dev->enetaddr[0]; /* set high address */
1362 reg = reg << 8;
1363 reg |= dev->enetaddr[1];
1364
1365 out_be32((void *)EMAC0_IAH + hw_p->hw_addr, reg);
1366
1367 reg = 0x00000000;
1368 reg |= dev->enetaddr[2]; /* set low address */
1369 reg = reg << 8;
1370 reg |= dev->enetaddr[3];
1371 reg = reg << 8;
1372 reg |= dev->enetaddr[4];
1373 reg = reg << 8;
1374 reg |= dev->enetaddr[5];
1375
1376 out_be32((void *)EMAC0_IAL + hw_p->hw_addr, reg);
1377
1378 switch (devnum) {
1379 case 1:
1380 /* setup MAL tx & rx channel pointers */
1381 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
1382 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1383 #else
1384 mtdcr (MAL0_TXCTP1R, hw_p->tx_phys);
1385 #endif
1386 #if defined(CONFIG_440)
1387 mtdcr (MAL0_TXBADDR, 0x0);
1388 mtdcr (MAL0_RXBADDR, 0x0);
1389 #endif
1390
1391 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
1392 mtdcr (MAL0_RXCTP8R, hw_p->rx_phys);
1393 /* set RX buffer size */
1394 mtdcr (MAL0_RCBS8, ENET_MAX_MTU_ALIGNED / 16);
1395 #else
1396 mtdcr (MAL0_RXCTP1R, hw_p->rx_phys);
1397 /* set RX buffer size */
1398 mtdcr (MAL0_RCBS1, ENET_MAX_MTU_ALIGNED / 16);
1399 #endif
1400 break;
1401 #if defined (CONFIG_440GX)
1402 case 2:
1403 /* setup MAL tx & rx channel pointers */
1404 mtdcr (MAL0_TXBADDR, 0x0);
1405 mtdcr (MAL0_RXBADDR, 0x0);
1406 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1407 mtdcr (MAL0_RXCTP2R, hw_p->rx_phys);
1408 /* set RX buffer size */
1409 mtdcr (MAL0_RCBS2, ENET_MAX_MTU_ALIGNED / 16);
1410 break;
1411 case 3:
1412 /* setup MAL tx & rx channel pointers */
1413 mtdcr (MAL0_TXBADDR, 0x0);
1414 mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1415 mtdcr (MAL0_RXBADDR, 0x0);
1416 mtdcr (MAL0_RXCTP3R, hw_p->rx_phys);
1417 /* set RX buffer size */
1418 mtdcr (MAL0_RCBS3, ENET_MAX_MTU_ALIGNED / 16);
1419 break;
1420 #endif /* CONFIG_440GX */
1421 #if defined (CONFIG_460GT)
1422 case 2:
1423 /* setup MAL tx & rx channel pointers */
1424 mtdcr (MAL0_TXBADDR, 0x0);
1425 mtdcr (MAL0_RXBADDR, 0x0);
1426 mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1427 mtdcr (MAL0_RXCTP16R, hw_p->rx_phys);
1428 /* set RX buffer size */
1429 mtdcr (MAL0_RCBS16, ENET_MAX_MTU_ALIGNED / 16);
1430 break;
1431 case 3:
1432 /* setup MAL tx & rx channel pointers */
1433 mtdcr (MAL0_TXBADDR, 0x0);
1434 mtdcr (MAL0_RXBADDR, 0x0);
1435 mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1436 mtdcr (MAL0_RXCTP24R, hw_p->rx_phys);
1437 /* set RX buffer size */
1438 mtdcr (MAL0_RCBS24, ENET_MAX_MTU_ALIGNED / 16);
1439 break;
1440 #endif /* CONFIG_460GT */
1441 case 0:
1442 default:
1443 /* setup MAL tx & rx channel pointers */
1444 #if defined(CONFIG_440)
1445 mtdcr (MAL0_TXBADDR, 0x0);
1446 mtdcr (MAL0_RXBADDR, 0x0);
1447 #endif
1448 mtdcr (MAL0_TXCTP0R, hw_p->tx_phys);
1449 mtdcr (MAL0_RXCTP0R, hw_p->rx_phys);
1450 /* set RX buffer size */
1451 mtdcr (MAL0_RCBS0, ENET_MAX_MTU_ALIGNED / 16);
1452 break;
1453 }
1454
1455 /* Enable MAL transmit and receive channels */
1456 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
1457 mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
1458 #else
1459 mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1460 #endif
1461 mtdcr (MAL0_RXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1462
1463 /* set transmit enable & receive enable */
1464 out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_TXE | EMAC_MR0_RXE);
1465
1466 mode_reg = in_be32((void *)EMAC0_MR1 + hw_p->hw_addr);
1467
1468 /* set rx-/tx-fifo size */
1469 mode_reg = (mode_reg & ~EMAC_MR1_FIFO_MASK) | EMAC_MR1_FIFO_SIZE;
1470
1471 /* set speed */
1472 if (speed == _1000BASET) {
1473 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1474 unsigned long pfc1;
1475
1476 mfsdr (SDR0_PFC1, pfc1);
1477 pfc1 |= SDR0_PFC1_EM_1000;
1478 mtsdr (SDR0_PFC1, pfc1);
1479 #endif
1480 mode_reg = mode_reg | EMAC_MR1_MF_1000MBPS | EMAC_MR1_IST;
1481 } else if (speed == _100BASET)
1482 mode_reg = mode_reg | EMAC_MR1_MF_100MBPS | EMAC_MR1_IST;
1483 else
1484 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
1485 if (duplex == FULL)
1486 mode_reg = mode_reg | 0x80000000 | EMAC_MR1_IST;
1487
1488 out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
1489
1490 /* Enable broadcast and indvidual address */
1491 /* TBS: enabling runts as some misbehaved nics will send runts */
1492 out_be32((void *)EMAC0_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
1493
1494 /* we probably need to set the tx mode1 reg? maybe at tx time */
1495
1496 /* set transmit request threshold register */
1497 out_be32((void *)EMAC0_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
1498
1499 /* set receive low/high water mark register */
1500 #if defined(CONFIG_440)
1501 /* 440s has a 64 byte burst length */
1502 out_be32((void *)EMAC0_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
1503 #else
1504 /* 405s have a 16 byte burst length */
1505 out_be32((void *)EMAC0_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
1506 #endif /* defined(CONFIG_440) */
1507 out_be32((void *)EMAC0_TMR1 + hw_p->hw_addr, 0xf8640000);
1508
1509 /* Set fifo limit entry in tx mode 0 */
1510 out_be32((void *)EMAC0_TMR0 + hw_p->hw_addr, 0x00000003);
1511 /* Frame gap set */
1512 out_be32((void *)EMAC0_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
1513
1514 /* Set EMAC IER */
1515 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1516 if (speed == _100BASET)
1517 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
1518
1519 out_be32((void *)EMAC0_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
1520 out_be32((void *)EMAC0_IER + hw_p->hw_addr, hw_p->emac_ier);
1521
1522 if (hw_p->first_init == 0) {
1523 /*
1524 * Connect interrupt service routines
1525 */
1526 irq_install_handler(ETH_IRQ_NUM(hw_p->devnum),
1527 (interrupt_handler_t *) enetInt, dev);
1528 }
1529
1530 mtmsr (msr); /* enable interrupts again */
1531
1532 hw_p->bis = bis;
1533 hw_p->first_init = 1;
1534
1535 return 0;
1536 }
1537
1538
1539 static int ppc_4xx_eth_send(struct eth_device *dev, void *ptr, int len)
1540 {
1541 struct enet_frame *ef_ptr;
1542 ulong time_start, time_now;
1543 unsigned long temp_txm0;
1544 EMAC_4XX_HW_PST hw_p = dev->priv;
1545
1546 ef_ptr = (struct enet_frame *) ptr;
1547
1548 /*-----------------------------------------------------------------------+
1549 * Copy in our address into the frame.
1550 *-----------------------------------------------------------------------*/
1551 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
1552
1553 /*-----------------------------------------------------------------------+
1554 * If frame is too long or too short, modify length.
1555 *-----------------------------------------------------------------------*/
1556 /* TBS: where does the fragment go???? */
1557 if (len > ENET_MAX_MTU)
1558 len = ENET_MAX_MTU;
1559
1560 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
1561 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
1562 flush_dcache_range((u32)hw_p->txbuf_ptr, (u32)hw_p->txbuf_ptr + len);
1563
1564 /*-----------------------------------------------------------------------+
1565 * set TX Buffer busy, and send it
1566 *-----------------------------------------------------------------------*/
1567 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
1568 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
1569 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
1570 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
1571 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
1572
1573 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
1574 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
1575
1576 sync();
1577
1578 out_be32((void *)EMAC0_TMR0 + hw_p->hw_addr,
1579 in_be32((void *)EMAC0_TMR0 + hw_p->hw_addr) | EMAC_TMR0_GNP0);
1580 #ifdef INFO_4XX_ENET
1581 hw_p->stats.pkts_tx++;
1582 #endif
1583
1584 /*-----------------------------------------------------------------------+
1585 * poll unitl the packet is sent and then make sure it is OK
1586 *-----------------------------------------------------------------------*/
1587 time_start = get_timer (0);
1588 while (1) {
1589 temp_txm0 = in_be32((void *)EMAC0_TMR0 + hw_p->hw_addr);
1590 /* loop until either TINT turns on or 3 seconds elapse */
1591 if ((temp_txm0 & EMAC_TMR0_GNP0) != 0) {
1592 /* transmit is done, so now check for errors
1593 * If there is an error, an interrupt should
1594 * happen when we return
1595 */
1596 time_now = get_timer (0);
1597 if ((time_now - time_start) > 3000) {
1598 return (-1);
1599 }
1600 } else {
1601 return (len);
1602 }
1603 }
1604 }
1605
1606 int enetInt (struct eth_device *dev)
1607 {
1608 int serviced;
1609 int rc = -1; /* default to not us */
1610 u32 mal_isr;
1611 u32 emac_isr = 0;
1612 u32 mal_eob;
1613 u32 uic_mal;
1614 u32 uic_mal_err;
1615 u32 uic_emac;
1616 u32 uic_emac_b;
1617 EMAC_4XX_HW_PST hw_p;
1618
1619 /*
1620 * Because the mal is generic, we need to get the current
1621 * eth device
1622 */
1623 dev = eth_get_dev();
1624
1625 hw_p = dev->priv;
1626
1627 /* enter loop that stays in interrupt code until nothing to service */
1628 do {
1629 serviced = 0;
1630
1631 uic_mal = mfdcr(UIC_BASE_MAL + UIC_MSR);
1632 uic_mal_err = mfdcr(UIC_BASE_MAL_ERR + UIC_MSR);
1633 uic_emac = mfdcr(UIC_BASE_EMAC + UIC_MSR);
1634 uic_emac_b = mfdcr(UIC_BASE_EMAC_B + UIC_MSR);
1635
1636 if (!(uic_mal & (UIC_MAL_RXEOB | UIC_MAL_TXEOB))
1637 && !(uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE))
1638 && !(uic_emac & UIC_ETHx) && !(uic_emac_b & UIC_ETHxB)) {
1639 /* not for us */
1640 return (rc);
1641 }
1642
1643 /* get and clear controller status interrupts */
1644 /* look at MAL and EMAC error interrupts */
1645 if (uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)) {
1646 /* we have a MAL error interrupt */
1647 mal_isr = mfdcr(MAL0_ESR);
1648 mal_err(dev, mal_isr, uic_mal_err,
1649 MAL_UIC_DEF, MAL_UIC_ERR);
1650
1651 /* clear MAL error interrupt status bits */
1652 mtdcr(UIC_BASE_MAL_ERR + UIC_SR,
1653 UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE);
1654
1655 return -1;
1656 }
1657
1658 /* look for EMAC errors */
1659 if ((uic_emac & UIC_ETHx) || (uic_emac_b & UIC_ETHxB)) {
1660 emac_isr = in_be32((void *)EMAC0_ISR + hw_p->hw_addr);
1661 emac_err(dev, emac_isr);
1662
1663 /* clear EMAC error interrupt status bits */
1664 mtdcr(UIC_BASE_EMAC + UIC_SR, UIC_ETHx);
1665 mtdcr(UIC_BASE_EMAC_B + UIC_SR, UIC_ETHxB);
1666
1667 return -1;
1668 }
1669
1670 /* handle MAX TX EOB interrupt from a tx */
1671 if (uic_mal & UIC_MAL_TXEOB) {
1672 /* clear MAL interrupt status bits */
1673 mal_eob = mfdcr(MAL0_TXEOBISR);
1674 mtdcr(MAL0_TXEOBISR, mal_eob);
1675 mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_TXEOB);
1676
1677 /* indicate that we serviced an interrupt */
1678 serviced = 1;
1679 rc = 0;
1680 }
1681
1682 /* handle MAL RX EOB interrupt from a receive */
1683 /* check for EOB on valid channels */
1684 if (uic_mal & UIC_MAL_RXEOB) {
1685 mal_eob = mfdcr(MAL0_RXEOBISR);
1686 if (mal_eob &
1687 (0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL))) {
1688 /* push packet to upper layer */
1689 enet_rcv(dev, emac_isr);
1690
1691 /* clear MAL interrupt status bits */
1692 mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_RXEOB);
1693
1694 /* indicate that we serviced an interrupt */
1695 serviced = 1;
1696 rc = 0;
1697 }
1698 }
1699 #if defined(CONFIG_405EZ)
1700 /*
1701 * On 405EZ the RX-/TX-interrupts are coalesced into
1702 * one IRQ bit in the UIC. We need to acknowledge the
1703 * RX-/TX-interrupts in the SDR0_ICINTSTAT reg as well.
1704 */
1705 mtsdr(SDR0_ICINTSTAT,
1706 SDR_ICRX_STAT | SDR_ICTX0_STAT | SDR_ICTX1_STAT);
1707 #endif /* defined(CONFIG_405EZ) */
1708 } while (serviced);
1709
1710 return (rc);
1711 }
1712
1713 /*-----------------------------------------------------------------------------+
1714 * MAL Error Routine
1715 *-----------------------------------------------------------------------------*/
1716 static void mal_err (struct eth_device *dev, unsigned long isr,
1717 unsigned long uic, unsigned long maldef,
1718 unsigned long mal_errr)
1719 {
1720 mtdcr (MAL0_ESR, isr); /* clear interrupt */
1721
1722 /* clear DE interrupt */
1723 mtdcr (MAL0_TXDEIR, 0xC0000000);
1724 mtdcr (MAL0_RXDEIR, 0x80000000);
1725
1726 #ifdef INFO_4XX_ENET
1727 printf("\nMAL error occurred.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx\n",
1728 isr, uic, maldef, mal_errr);
1729 #endif
1730
1731 eth_init(); /* start again... */
1732 }
1733
1734 /*-----------------------------------------------------------------------------+
1735 * EMAC Error Routine
1736 *-----------------------------------------------------------------------------*/
1737 static void emac_err (struct eth_device *dev, unsigned long isr)
1738 {
1739 EMAC_4XX_HW_PST hw_p = dev->priv;
1740
1741 printf ("EMAC%d error occurred.... ISR = %lx\n", hw_p->devnum, isr);
1742 out_be32((void *)EMAC0_ISR + hw_p->hw_addr, isr);
1743 }
1744
1745 /*-----------------------------------------------------------------------------+
1746 * enet_rcv() handles the ethernet receive data
1747 *-----------------------------------------------------------------------------*/
1748 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1749 {
1750 unsigned long data_len;
1751 unsigned long rx_eob_isr;
1752 EMAC_4XX_HW_PST hw_p = dev->priv;
1753
1754 int handled = 0;
1755 int i;
1756 int loop_count = 0;
1757
1758 rx_eob_isr = mfdcr (MAL0_RXEOBISR);
1759 if ((0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL)) & rx_eob_isr) {
1760 /* clear EOB */
1761 mtdcr (MAL0_RXEOBISR, rx_eob_isr);
1762
1763 /* EMAC RX done */
1764 while (1) { /* do all */
1765 i = hw_p->rx_slot;
1766
1767 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1768 || (loop_count >= NUM_RX_BUFF))
1769 break;
1770
1771 loop_count++;
1772 handled++;
1773 data_len = (unsigned long) hw_p->rx[i].data_len & 0x0fff; /* Get len */
1774 if (data_len) {
1775 if (data_len > ENET_MAX_MTU) /* Check len */
1776 data_len = 0;
1777 else {
1778 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1779 data_len = 0;
1780 hw_p->stats.rx_err_log[hw_p->
1781 rx_err_index]
1782 = hw_p->rx[i].ctrl;
1783 hw_p->rx_err_index++;
1784 if (hw_p->rx_err_index ==
1785 MAX_ERR_LOG)
1786 hw_p->rx_err_index =
1787 0;
1788 } /* emac_erros */
1789 } /* data_len < max mtu */
1790 } /* if data_len */
1791 if (!data_len) { /* no data */
1792 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1793
1794 hw_p->stats.data_len_err++; /* Error at Rx */
1795 }
1796
1797 /* !data_len */
1798 /* AS.HARNOIS */
1799 /* Check if user has already eaten buffer */
1800 /* if not => ERROR */
1801 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1802 if (hw_p->is_receiving)
1803 printf ("ERROR : Receive buffers are full!\n");
1804 break;
1805 } else {
1806 hw_p->stats.rx_frames++;
1807 hw_p->stats.rx += data_len;
1808 #ifdef INFO_4XX_ENET
1809 hw_p->stats.pkts_rx++;
1810 #endif
1811 /* AS.HARNOIS
1812 * use ring buffer
1813 */
1814 hw_p->rx_ready[hw_p->rx_i_index] = i;
1815 hw_p->rx_i_index++;
1816 if (NUM_RX_BUFF == hw_p->rx_i_index)
1817 hw_p->rx_i_index = 0;
1818
1819 hw_p->rx_slot++;
1820 if (NUM_RX_BUFF == hw_p->rx_slot)
1821 hw_p->rx_slot = 0;
1822
1823 /* AS.HARNOIS
1824 * free receive buffer only when
1825 * buffer has been handled (eth_rx)
1826 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1827 */
1828 } /* if data_len */
1829 } /* while */
1830 } /* if EMACK_RXCHL */
1831 }
1832
1833
1834 static int ppc_4xx_eth_rx (struct eth_device *dev)
1835 {
1836 int length;
1837 int user_index;
1838 unsigned long msr;
1839 EMAC_4XX_HW_PST hw_p = dev->priv;
1840
1841 hw_p->is_receiving = 1; /* tell driver */
1842
1843 for (;;) {
1844 /* AS.HARNOIS
1845 * use ring buffer and
1846 * get index from rx buffer desciptor queue
1847 */
1848 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1849 if (user_index == -1) {
1850 length = -1;
1851 break; /* nothing received - leave for() loop */
1852 }
1853
1854 msr = mfmsr ();
1855 mtmsr (msr & ~(MSR_EE));
1856
1857 length = hw_p->rx[user_index].data_len & 0x0fff;
1858
1859 /*
1860 * Pass the packet up to the protocol layers.
1861 * net_process_received_packet(net_rx_packets[rxIdx],
1862 * length - 4);
1863 * net_process_received_packet(net_rx_packets[i], length);
1864 */
1865 invalidate_dcache_range((u32)hw_p->rx[user_index].data_ptr,
1866 (u32)hw_p->rx[user_index].data_ptr +
1867 length - 4);
1868 net_process_received_packet(net_rx_packets[user_index],
1869 length - 4);
1870 /* Free Recv Buffer */
1871 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1872 /* Free rx buffer descriptor queue */
1873 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1874 hw_p->rx_u_index++;
1875 if (NUM_RX_BUFF == hw_p->rx_u_index)
1876 hw_p->rx_u_index = 0;
1877
1878 #ifdef INFO_4XX_ENET
1879 hw_p->stats.pkts_handled++;
1880 #endif
1881
1882 mtmsr (msr); /* Enable IRQ's */
1883 }
1884
1885 hw_p->is_receiving = 0; /* tell driver */
1886
1887 return length;
1888 }
1889
1890 int ppc_4xx_eth_initialize (bd_t * bis)
1891 {
1892 static int virgin = 0;
1893 struct eth_device *dev;
1894 int eth_num = 0;
1895 EMAC_4XX_HW_PST hw = NULL;
1896 u8 ethaddr[4 + CONFIG_EMAC_NR_START][6];
1897 u32 hw_addr[4];
1898 u32 mal_ier;
1899
1900 #if defined(CONFIG_440GX)
1901 unsigned long pfc1;
1902
1903 mfsdr (SDR0_PFC1, pfc1);
1904 pfc1 &= ~(0x01e00000);
1905 pfc1 |= 0x01200000;
1906 mtsdr (SDR0_PFC1, pfc1);
1907 #endif
1908
1909 /* first clear all mac-addresses */
1910 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++)
1911 memcpy(ethaddr[eth_num], "\0\0\0\0\0\0", 6);
1912
1913 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1914 int ethaddr_idx = eth_num + CONFIG_EMAC_NR_START;
1915 switch (eth_num) {
1916 default: /* fall through */
1917 case 0:
1918 eth_getenv_enetaddr("ethaddr", ethaddr[ethaddr_idx]);
1919 hw_addr[eth_num] = 0x0;
1920 break;
1921 #ifdef CONFIG_HAS_ETH1
1922 case 1:
1923 eth_getenv_enetaddr("eth1addr", ethaddr[ethaddr_idx]);
1924 hw_addr[eth_num] = 0x100;
1925 break;
1926 #endif
1927 #ifdef CONFIG_HAS_ETH2
1928 case 2:
1929 eth_getenv_enetaddr("eth2addr", ethaddr[ethaddr_idx]);
1930 #if defined(CONFIG_460GT)
1931 hw_addr[eth_num] = 0x300;
1932 #else
1933 hw_addr[eth_num] = 0x400;
1934 #endif
1935 break;
1936 #endif
1937 #ifdef CONFIG_HAS_ETH3
1938 case 3:
1939 eth_getenv_enetaddr("eth3addr", ethaddr[ethaddr_idx]);
1940 #if defined(CONFIG_460GT)
1941 hw_addr[eth_num] = 0x400;
1942 #else
1943 hw_addr[eth_num] = 0x600;
1944 #endif
1945 break;
1946 #endif
1947 }
1948 }
1949
1950 /* set phy num and mode */
1951 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1952 bis->bi_phymode[0] = 0;
1953
1954 #if defined(CONFIG_PHY1_ADDR)
1955 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1956 bis->bi_phymode[1] = 0;
1957 #endif
1958 #if defined(CONFIG_440GX)
1959 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1960 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1961 bis->bi_phymode[2] = 2;
1962 bis->bi_phymode[3] = 2;
1963 #endif
1964
1965 #if defined(CONFIG_440GX) || \
1966 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1967 defined(CONFIG_405EX)
1968 ppc_4xx_eth_setup_bridge(0, bis);
1969 #endif
1970
1971 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1972 /*
1973 * See if we can actually bring up the interface,
1974 * otherwise, skip it
1975 */
1976 if (memcmp (ethaddr[eth_num], "\0\0\0\0\0\0", 6) == 0) {
1977 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1978 continue;
1979 }
1980
1981 /* Allocate device structure */
1982 dev = (struct eth_device *) malloc (sizeof (*dev));
1983 if (dev == NULL) {
1984 printf ("ppc_4xx_eth_initialize: "
1985 "Cannot allocate eth_device %d\n", eth_num);
1986 return (-1);
1987 }
1988 memset(dev, 0, sizeof(*dev));
1989
1990 /* Allocate our private use data */
1991 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
1992 if (hw == NULL) {
1993 printf ("ppc_4xx_eth_initialize: "
1994 "Cannot allocate private hw data for eth_device %d",
1995 eth_num);
1996 free (dev);
1997 return (-1);
1998 }
1999 memset(hw, 0, sizeof(*hw));
2000
2001 hw->hw_addr = hw_addr[eth_num];
2002 memcpy (dev->enetaddr, ethaddr[eth_num], 6);
2003 hw->devnum = eth_num;
2004 hw->print_speed = 1;
2005
2006 sprintf (dev->name, "ppc_4xx_eth%d", eth_num - CONFIG_EMAC_NR_START);
2007 dev->priv = (void *) hw;
2008 dev->init = ppc_4xx_eth_init;
2009 dev->halt = ppc_4xx_eth_halt;
2010 dev->send = ppc_4xx_eth_send;
2011 dev->recv = ppc_4xx_eth_rx;
2012
2013 eth_register(dev);
2014
2015 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
2016 int retval;
2017 struct mii_dev *mdiodev = mdio_alloc();
2018 if (!mdiodev)
2019 return -ENOMEM;
2020 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
2021 mdiodev->read = emac4xx_miiphy_read;
2022 mdiodev->write = emac4xx_miiphy_write;
2023
2024 retval = mdio_register(mdiodev);
2025 if (retval < 0)
2026 return retval;
2027 #endif
2028
2029 if (0 == virgin) {
2030 /* set the MAL IER ??? names may change with new spec ??? */
2031 #if defined(CONFIG_440SPE) || \
2032 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
2033 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
2034 defined(CONFIG_405EX)
2035 mal_ier =
2036 MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
2037 MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
2038 #else
2039 mal_ier =
2040 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
2041 MAL_IER_OPBE | MAL_IER_PLBE;
2042 #endif
2043 mtdcr (MAL0_ESR, 0xffffffff); /* clear pending interrupts */
2044 mtdcr (MAL0_TXDEIR, 0xffffffff); /* clear pending interrupts */
2045 mtdcr (MAL0_RXDEIR, 0xffffffff); /* clear pending interrupts */
2046 mtdcr (MAL0_IER, mal_ier);
2047
2048 /* install MAL interrupt handler */
2049 irq_install_handler (VECNUM_MAL_SERR,
2050 (interrupt_handler_t *) enetInt,
2051 dev);
2052 irq_install_handler (VECNUM_MAL_TXEOB,
2053 (interrupt_handler_t *) enetInt,
2054 dev);
2055 irq_install_handler (VECNUM_MAL_RXEOB,
2056 (interrupt_handler_t *) enetInt,
2057 dev);
2058 irq_install_handler (VECNUM_MAL_TXDE,
2059 (interrupt_handler_t *) enetInt,
2060 dev);
2061 irq_install_handler (VECNUM_MAL_RXDE,
2062 (interrupt_handler_t *) enetInt,
2063 dev);
2064 virgin = 1;
2065 }
2066 } /* end for each supported device */
2067
2068 return 0;
2069 }