]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/ppc4xx/4xx_enet.c
Merge branch 'master' of /home/wd/git/u-boot/lwmon5
[people/ms/u-boot.git] / cpu / ppc4xx / 4xx_enet.c
1 /*-----------------------------------------------------------------------------+
2 *
3 * This source code has been made available to you by IBM on an AS-IS
4 * basis. Anyone receiving this source is licensed under IBM
5 * copyrights to use it in any way he or she deems fit, including
6 * copying it, modifying it, compiling it, and redistributing it either
7 * with or without modifications. No license under IBM patents or
8 * patent applications is to be implied by the copyright license.
9 *
10 * Any user of this software should understand that IBM cannot provide
11 * technical support for this software and will not be responsible for
12 * any consequences resulting from the use of this software.
13 *
14 * Any person who transfers this source code or any derivative work
15 * must include the IBM copyright notice, this paragraph, and the
16 * preceding two paragraphs in the transferred software.
17 *
18 * COPYRIGHT I B M CORPORATION 1995
19 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
20 *-----------------------------------------------------------------------------*/
21 /*-----------------------------------------------------------------------------+
22 *
23 * File Name: enetemac.c
24 *
25 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
26 *
27 * Author: Mark Wisner
28 *
29 * Change Activity-
30 *
31 * Date Description of Change BY
32 * --------- --------------------- ---
33 * 05-May-99 Created MKW
34 * 27-Jun-99 Clean up JWB
35 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
36 * 29-Jul-99 Added Full duplex support MKW
37 * 06-Aug-99 Changed names for Mal CR reg MKW
38 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
39 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
40 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
41 * to avoid chaining maximum sized packets. Push starting
42 * RX descriptor address up to the next cache line boundary.
43 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
44 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
45 * EMAC_RXM register. JWB
46 * 12-Mar-01 anne-sophie.harnois@nextream.fr
47 * - Variables are compatible with those already defined in
48 * include/net.h
49 * - Receive buffer descriptor ring is used to send buffers
50 * to the user
51 * - Info print about send/received/handled packet number if
52 * INFO_405_ENET is set
53 * 17-Apr-01 stefan.roese@esd-electronics.com
54 * - MAL reset in "eth_halt" included
55 * - Enet speed and duplex output now in one line
56 * 08-May-01 stefan.roese@esd-electronics.com
57 * - MAL error handling added (eth_init called again)
58 * 13-Nov-01 stefan.roese@esd-electronics.com
59 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
60 * 04-Jan-02 stefan.roese@esd-electronics.com
61 * - Wait for PHY auto negotiation to complete added
62 * 06-Feb-02 stefan.roese@esd-electronics.com
63 * - Bug fixed in waiting for auto negotiation to complete
64 * 26-Feb-02 stefan.roese@esd-electronics.com
65 * - rx and tx buffer descriptors now allocated (no fixed address
66 * used anymore)
67 * 17-Jun-02 stefan.roese@esd-electronics.com
68 * - MAL error debug printf 'M' removed (rx de interrupt may
69 * occur upon many incoming packets with only 4 rx buffers).
70 *-----------------------------------------------------------------------------*
71 * 17-Nov-03 travis.sawyer@sandburst.com
72 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
73 * in the 440GX. This port should work with the 440GP
74 * (2 EMACs) also
75 * 15-Aug-05 sr@denx.de
76 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
77 now handling all 4xx cpu's.
78 *-----------------------------------------------------------------------------*/
79
80 #include <config.h>
81 #include <common.h>
82 #include <net.h>
83 #include <asm/processor.h>
84 #include <asm/io.h>
85 #include <asm/cache.h>
86 #include <asm/mmu.h>
87 #include <commproc.h>
88 #include <ppc4xx.h>
89 #include <ppc4xx_enet.h>
90 #include <405_mal.h>
91 #include <miiphy.h>
92 #include <malloc.h>
93 #include <asm/ppc4xx-intvec.h>
94
95 /*
96 * Only compile for platform with AMCC EMAC ethernet controller and
97 * network support enabled.
98 * Remark: CONFIG_405 describes Xilinx PPC405 FPGA without EMAC controller!
99 */
100 #if defined(CONFIG_CMD_NET) && !defined(CONFIG_405) && !defined(CONFIG_IOP480)
101
102 #if !(defined(CONFIG_MII) || defined(CONFIG_CMD_MII))
103 #error "CONFIG_MII has to be defined!"
104 #endif
105
106 #if defined(CONFIG_NETCONSOLE) && !defined(CONFIG_NET_MULTI)
107 #error "CONFIG_NET_MULTI has to be defined for NetConsole"
108 #endif
109
110 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
111 #define PHY_AUTONEGOTIATE_TIMEOUT 5000 /* 5000 ms autonegotiate timeout */
112
113 /* Ethernet Transmit and Receive Buffers */
114 /* AS.HARNOIS
115 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
116 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
117 */
118 #define ENET_MAX_MTU PKTSIZE
119 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
120
121 /*-----------------------------------------------------------------------------+
122 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
123 * Interrupt Controller).
124 *-----------------------------------------------------------------------------*/
125 #define MAL_UIC_ERR ( UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
126 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
127 #define EMAC_UIC_DEF UIC_ENET
128 #define EMAC_UIC_DEF1 UIC_ENET1
129 #define SEL_UIC_DEF(p) (p ? UIC_ENET1 : UIC_ENET )
130
131 #undef INFO_4XX_ENET
132
133 #define BI_PHYMODE_NONE 0
134 #define BI_PHYMODE_ZMII 1
135 #define BI_PHYMODE_RGMII 2
136 #define BI_PHYMODE_GMII 3
137 #define BI_PHYMODE_RTBI 4
138 #define BI_PHYMODE_TBI 5
139 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
140 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
141 defined(CONFIG_405EX)
142 #define BI_PHYMODE_SMII 6
143 #define BI_PHYMODE_MII 7
144 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
145 #define BI_PHYMODE_RMII 8
146 #endif
147 #endif
148
149 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
150 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
151 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
152 defined(CONFIG_405EX)
153 #define SDR0_MFR_ETH_CLK_SEL_V(n) ((0x01<<27) / (n+1))
154 #endif
155
156 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
157 #define SDR0_ETH_CFG_CLK_SEL_V(n) (0x01 << (8 + n))
158 #endif
159
160 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
161 #define MAL_RX_CHAN_MUL 8 /* 460EX/GT uses MAL channel 8 for EMAC1 */
162 #else
163 #define MAL_RX_CHAN_MUL 1
164 #endif
165
166 /*-----------------------------------------------------------------------------+
167 * Global variables. TX and RX descriptors and buffers.
168 *-----------------------------------------------------------------------------*/
169 /* IER globals */
170 static uint32_t mal_ier;
171
172 #if !defined(CONFIG_NET_MULTI)
173 struct eth_device *emac0_dev = NULL;
174 #endif
175
176 /*
177 * Get count of EMAC devices (doesn't have to be the max. possible number
178 * supported by the cpu)
179 *
180 * CONFIG_BOARD_EMAC_COUNT added so now a "dynamic" way to configure the
181 * EMAC count is possible. As it is needed for the Kilauea/Haleakala
182 * 405EX/405EXr eval board, using the same binary.
183 */
184 #if defined(CONFIG_BOARD_EMAC_COUNT)
185 #define LAST_EMAC_NUM board_emac_count()
186 #else /* CONFIG_BOARD_EMAC_COUNT */
187 #if defined(CONFIG_HAS_ETH3)
188 #define LAST_EMAC_NUM 4
189 #elif defined(CONFIG_HAS_ETH2)
190 #define LAST_EMAC_NUM 3
191 #elif defined(CONFIG_HAS_ETH1)
192 #define LAST_EMAC_NUM 2
193 #else
194 #define LAST_EMAC_NUM 1
195 #endif
196 #endif /* CONFIG_BOARD_EMAC_COUNT */
197
198 /* normal boards start with EMAC0 */
199 #if !defined(CONFIG_EMAC_NR_START)
200 #define CONFIG_EMAC_NR_START 0
201 #endif
202
203 #if defined(CONFIG_405EX) || defined(CONFIG_440EPX)
204 #define ETH_IRQ_NUM(dev) (VECNUM_ETH0 + ((dev)))
205 #else
206 #define ETH_IRQ_NUM(dev) (VECNUM_ETH0 + ((dev) * 2))
207 #endif
208
209 #define MAL_RX_DESC_SIZE 2048
210 #define MAL_TX_DESC_SIZE 2048
211 #define MAL_ALLOC_SIZE (MAL_TX_DESC_SIZE + MAL_RX_DESC_SIZE)
212
213 /*-----------------------------------------------------------------------------+
214 * Prototypes and externals.
215 *-----------------------------------------------------------------------------*/
216 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
217
218 int enetInt (struct eth_device *dev);
219 static void mal_err (struct eth_device *dev, unsigned long isr,
220 unsigned long uic, unsigned long maldef,
221 unsigned long mal_errr);
222 static void emac_err (struct eth_device *dev, unsigned long isr);
223
224 extern int phy_setup_aneg (char *devname, unsigned char addr);
225 extern int emac4xx_miiphy_read (char *devname, unsigned char addr,
226 unsigned char reg, unsigned short *value);
227 extern int emac4xx_miiphy_write (char *devname, unsigned char addr,
228 unsigned char reg, unsigned short value);
229
230 int board_emac_count(void);
231
232 static void emac_loopback_enable(EMAC_4XX_HW_PST hw_p)
233 {
234 #if defined(CONFIG_440SPE) || \
235 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
236 defined(CONFIG_405EX)
237 u32 val;
238
239 mfsdr(sdr_mfr, val);
240 val |= SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
241 mtsdr(sdr_mfr, val);
242 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
243 u32 val;
244
245 mfsdr(SDR0_ETH_CFG, val);
246 val |= SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
247 mtsdr(SDR0_ETH_CFG, val);
248 #endif
249 }
250
251 static void emac_loopback_disable(EMAC_4XX_HW_PST hw_p)
252 {
253 #if defined(CONFIG_440SPE) || \
254 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
255 defined(CONFIG_405EX)
256 u32 val;
257
258 mfsdr(sdr_mfr, val);
259 val &= ~SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
260 mtsdr(sdr_mfr, val);
261 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
262 u32 val;
263
264 mfsdr(SDR0_ETH_CFG, val);
265 val &= ~SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
266 mtsdr(SDR0_ETH_CFG, val);
267 #endif
268 }
269
270 /*-----------------------------------------------------------------------------+
271 | ppc_4xx_eth_halt
272 | Disable MAL channel, and EMACn
273 +-----------------------------------------------------------------------------*/
274 static void ppc_4xx_eth_halt (struct eth_device *dev)
275 {
276 EMAC_4XX_HW_PST hw_p = dev->priv;
277 u32 val = 10000;
278
279 out_be32((void *)EMAC_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
280
281 /* 1st reset MAL channel */
282 /* Note: writing a 0 to a channel has no effect */
283 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
284 mtdcr (maltxcarr, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
285 #else
286 mtdcr (maltxcarr, (MAL_CR_MMSR >> hw_p->devnum));
287 #endif
288 mtdcr (malrxcarr, (MAL_CR_MMSR >> hw_p->devnum));
289
290 /* wait for reset */
291 while (mfdcr (malrxcasr) & (MAL_CR_MMSR >> hw_p->devnum)) {
292 udelay (1000); /* Delay 1 MS so as not to hammer the register */
293 val--;
294 if (val == 0)
295 break;
296 }
297
298 /* provide clocks for EMAC internal loopback */
299 emac_loopback_enable(hw_p);
300
301 /* EMAC RESET */
302 out_be32((void *)EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
303
304 /* remove clocks for EMAC internal loopback */
305 emac_loopback_disable(hw_p);
306
307 #ifndef CONFIG_NETCONSOLE
308 hw_p->print_speed = 1; /* print speed message again next time */
309 #endif
310
311 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
312 /* don't bypass the TAHOE0/TAHOE1 cores for Linux */
313 mfsdr(SDR0_ETH_CFG, val);
314 val &= ~(SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
315 mtsdr(SDR0_ETH_CFG, val);
316 #endif
317
318 return;
319 }
320
321 #if defined (CONFIG_440GX)
322 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
323 {
324 unsigned long pfc1;
325 unsigned long zmiifer;
326 unsigned long rmiifer;
327
328 mfsdr(sdr_pfc1, pfc1);
329 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
330
331 zmiifer = 0;
332 rmiifer = 0;
333
334 switch (pfc1) {
335 case 1:
336 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
337 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
338 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
339 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
340 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
341 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
342 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
343 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
344 break;
345 case 2:
346 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
347 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
348 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
349 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
350 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
351 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
352 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
353 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
354 break;
355 case 3:
356 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
357 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
358 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
359 bis->bi_phymode[1] = BI_PHYMODE_NONE;
360 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
361 bis->bi_phymode[3] = BI_PHYMODE_NONE;
362 break;
363 case 4:
364 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
365 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
366 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
367 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
368 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
369 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
370 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
371 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
372 break;
373 case 5:
374 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
375 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
376 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
377 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
378 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
379 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
380 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
381 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
382 break;
383 case 6:
384 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
385 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
386 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
387 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
388 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
389 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
390 break;
391 case 0:
392 default:
393 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
394 rmiifer = 0x0;
395 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
396 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
397 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
398 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
399 break;
400 }
401
402 /* Ensure we setup mdio for this devnum and ONLY this devnum */
403 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
404
405 out_be32((void *)ZMII_FER, zmiifer);
406 out_be32((void *)RGMII_FER, rmiifer);
407
408 return ((int)pfc1);
409 }
410 #endif /* CONFIG_440_GX */
411
412 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
413 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
414 {
415 unsigned long zmiifer=0x0;
416 unsigned long pfc1;
417
418 mfsdr(sdr_pfc1, pfc1);
419 pfc1 &= SDR0_PFC1_SELECT_MASK;
420
421 switch (pfc1) {
422 case SDR0_PFC1_SELECT_CONFIG_2:
423 /* 1 x GMII port */
424 out_be32((void *)ZMII_FER, 0x00);
425 out_be32((void *)RGMII_FER, 0x00000037);
426 bis->bi_phymode[0] = BI_PHYMODE_GMII;
427 bis->bi_phymode[1] = BI_PHYMODE_NONE;
428 break;
429 case SDR0_PFC1_SELECT_CONFIG_4:
430 /* 2 x RGMII ports */
431 out_be32((void *)ZMII_FER, 0x00);
432 out_be32((void *)RGMII_FER, 0x00000055);
433 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
434 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
435 break;
436 case SDR0_PFC1_SELECT_CONFIG_6:
437 /* 2 x SMII ports */
438 out_be32((void *)ZMII_FER,
439 ((ZMII_FER_SMII) << ZMII_FER_V(0)) |
440 ((ZMII_FER_SMII) << ZMII_FER_V(1)));
441 out_be32((void *)RGMII_FER, 0x00000000);
442 bis->bi_phymode[0] = BI_PHYMODE_SMII;
443 bis->bi_phymode[1] = BI_PHYMODE_SMII;
444 break;
445 case SDR0_PFC1_SELECT_CONFIG_1_2:
446 /* only 1 x MII supported */
447 out_be32((void *)ZMII_FER, (ZMII_FER_MII) << ZMII_FER_V(0));
448 out_be32((void *)RGMII_FER, 0x00000000);
449 bis->bi_phymode[0] = BI_PHYMODE_MII;
450 bis->bi_phymode[1] = BI_PHYMODE_NONE;
451 break;
452 default:
453 break;
454 }
455
456 /* Ensure we setup mdio for this devnum and ONLY this devnum */
457 zmiifer = in_be32((void *)ZMII_FER);
458 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
459 out_be32((void *)ZMII_FER, zmiifer);
460
461 return ((int)0x0);
462 }
463 #endif /* CONFIG_440EPX */
464
465 #if defined(CONFIG_405EX)
466 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
467 {
468 u32 gmiifer = 0;
469
470 /*
471 * Right now only 2*RGMII is supported. Please extend when needed.
472 * sr - 2007-09-19
473 */
474 switch (1) {
475 case 1:
476 /* 2 x RGMII ports */
477 out_be32((void *)RGMII_FER, 0x00000055);
478 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
479 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
480 break;
481 case 2:
482 /* 2 x SMII ports */
483 break;
484 default:
485 break;
486 }
487
488 /* Ensure we setup mdio for this devnum and ONLY this devnum */
489 gmiifer = in_be32((void *)RGMII_FER);
490 gmiifer |= (1 << (19-devnum));
491 out_be32((void *)RGMII_FER, gmiifer);
492
493 return ((int)0x0);
494 }
495 #endif /* CONFIG_405EX */
496
497 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
498 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
499 {
500 u32 eth_cfg;
501 u32 zmiifer; /* ZMII0_FER reg. */
502 u32 rmiifer; /* RGMII0_FER reg. Bridge 0 */
503 u32 rmiifer1; /* RGMII0_FER reg. Bridge 1 */
504 int mode;
505
506 zmiifer = 0;
507 rmiifer = 0;
508 rmiifer1 = 0;
509
510 #if defined(CONFIG_460EX)
511 mode = 9;
512 #else
513 mode = 10;
514 #endif
515
516 /* TODO:
517 * NOTE: 460GT has 2 RGMII bridge cores:
518 * emac0 ------ RGMII0_BASE
519 * |
520 * emac1 -----+
521 *
522 * emac2 ------ RGMII1_BASE
523 * |
524 * emac3 -----+
525 *
526 * 460EX has 1 RGMII bridge core:
527 * and RGMII1_BASE is disabled
528 * emac0 ------ RGMII0_BASE
529 * |
530 * emac1 -----+
531 */
532
533 /*
534 * Right now only 2*RGMII is supported. Please extend when needed.
535 * sr - 2008-02-19
536 */
537 switch (mode) {
538 case 1:
539 /* 1 MII - 460EX */
540 /* GMC0 EMAC4_0, ZMII Bridge */
541 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
542 bis->bi_phymode[0] = BI_PHYMODE_MII;
543 bis->bi_phymode[1] = BI_PHYMODE_NONE;
544 bis->bi_phymode[2] = BI_PHYMODE_NONE;
545 bis->bi_phymode[3] = BI_PHYMODE_NONE;
546 break;
547 case 2:
548 /* 2 MII - 460GT */
549 /* GMC0 EMAC4_0, GMC1 EMAC4_2, ZMII Bridge */
550 zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
551 zmiifer |= ZMII_FER_MII << ZMII_FER_V(2);
552 bis->bi_phymode[0] = BI_PHYMODE_MII;
553 bis->bi_phymode[1] = BI_PHYMODE_NONE;
554 bis->bi_phymode[2] = BI_PHYMODE_MII;
555 bis->bi_phymode[3] = BI_PHYMODE_NONE;
556 break;
557 case 3:
558 /* 2 RMII - 460EX */
559 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
560 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
561 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
562 bis->bi_phymode[0] = BI_PHYMODE_RMII;
563 bis->bi_phymode[1] = BI_PHYMODE_RMII;
564 bis->bi_phymode[2] = BI_PHYMODE_NONE;
565 bis->bi_phymode[3] = BI_PHYMODE_NONE;
566 break;
567 case 4:
568 /* 4 RMII - 460GT */
569 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC1 EMAC4_2, GMC1, EMAC4_3 */
570 /* ZMII Bridge */
571 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
572 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
573 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
574 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
575 bis->bi_phymode[0] = BI_PHYMODE_RMII;
576 bis->bi_phymode[1] = BI_PHYMODE_RMII;
577 bis->bi_phymode[2] = BI_PHYMODE_RMII;
578 bis->bi_phymode[3] = BI_PHYMODE_RMII;
579 break;
580 case 5:
581 /* 2 SMII - 460EX */
582 /* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
583 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
584 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
585 bis->bi_phymode[0] = BI_PHYMODE_SMII;
586 bis->bi_phymode[1] = BI_PHYMODE_SMII;
587 bis->bi_phymode[2] = BI_PHYMODE_NONE;
588 bis->bi_phymode[3] = BI_PHYMODE_NONE;
589 break;
590 case 6:
591 /* 4 SMII - 460GT */
592 /* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC0 EMAC4_3, GMC0 EMAC4_3 */
593 /* ZMII Bridge */
594 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
595 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
596 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
597 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
598 bis->bi_phymode[0] = BI_PHYMODE_SMII;
599 bis->bi_phymode[1] = BI_PHYMODE_SMII;
600 bis->bi_phymode[2] = BI_PHYMODE_SMII;
601 bis->bi_phymode[3] = BI_PHYMODE_SMII;
602 break;
603 case 7:
604 /* This is the default mode that we want for board bringup - Maple */
605 /* 1 GMII - 460EX */
606 /* GMC0 EMAC4_0, RGMII Bridge 0 */
607 rmiifer |= RGMII_FER_MDIO(0);
608
609 if (devnum == 0) {
610 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
611 bis->bi_phymode[0] = BI_PHYMODE_GMII;
612 bis->bi_phymode[1] = BI_PHYMODE_NONE;
613 bis->bi_phymode[2] = BI_PHYMODE_NONE;
614 bis->bi_phymode[3] = BI_PHYMODE_NONE;
615 } else {
616 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(3); /* CH1CFG - EMAC1 */
617 bis->bi_phymode[0] = BI_PHYMODE_NONE;
618 bis->bi_phymode[1] = BI_PHYMODE_GMII;
619 bis->bi_phymode[2] = BI_PHYMODE_NONE;
620 bis->bi_phymode[3] = BI_PHYMODE_NONE;
621 }
622 break;
623 case 8:
624 /* 2 GMII - 460GT */
625 /* GMC0 EMAC4_0, RGMII Bridge 0 */
626 /* GMC1 EMAC4_2, RGMII Bridge 1 */
627 rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
628 rmiifer1 |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC2 */
629 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
630 rmiifer1 |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC2 */
631
632 bis->bi_phymode[0] = BI_PHYMODE_GMII;
633 bis->bi_phymode[1] = BI_PHYMODE_NONE;
634 bis->bi_phymode[2] = BI_PHYMODE_GMII;
635 bis->bi_phymode[3] = BI_PHYMODE_NONE;
636 break;
637 case 9:
638 /* 2 RGMII - 460EX */
639 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
640 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
641 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
642 rmiifer |= RGMII_FER_MDIO(0); /* enable MDIO - EMAC0 */
643
644 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
645 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
646 bis->bi_phymode[2] = BI_PHYMODE_NONE;
647 bis->bi_phymode[3] = BI_PHYMODE_NONE;
648 break;
649 case 10:
650 /* 4 RGMII - 460GT */
651 /* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
652 /* GMC1 EMAC4_2, GMC1 EMAC4_3, RGMII Bridge 1 */
653 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
654 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
655 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(2);
656 rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(3);
657 bis->bi_phymode[0] = BI_PHYMODE_RGMII;
658 bis->bi_phymode[1] = BI_PHYMODE_RGMII;
659 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
660 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
661 break;
662 default:
663 break;
664 }
665
666 /* Set EMAC for MDIO */
667 mfsdr(SDR0_ETH_CFG, eth_cfg);
668 eth_cfg |= SDR0_ETH_CFG_MDIO_SEL_EMAC0;
669 mtsdr(SDR0_ETH_CFG, eth_cfg);
670
671 out_be32((void *)RGMII_FER, rmiifer);
672 #if defined(CONFIG_460GT)
673 out_be32((void *)RGMII_FER + RGMII1_BASE_OFFSET, rmiifer1);
674 #endif
675
676 /* bypass the TAHOE0/TAHOE1 cores for U-Boot */
677 mfsdr(SDR0_ETH_CFG, eth_cfg);
678 eth_cfg |= (SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
679 mtsdr(SDR0_ETH_CFG, eth_cfg);
680
681 return 0;
682 }
683 #endif /* CONFIG_460EX || CONFIG_460GT */
684
685 static inline void *malloc_aligned(u32 size, u32 align)
686 {
687 return (void *)(((u32)malloc(size + align) + align - 1) &
688 ~(align - 1));
689 }
690
691 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
692 {
693 int i;
694 unsigned long reg = 0;
695 unsigned long msr;
696 unsigned long speed;
697 unsigned long duplex;
698 unsigned long failsafe;
699 unsigned mode_reg;
700 unsigned short devnum;
701 unsigned short reg_short;
702 #if defined(CONFIG_440GX) || \
703 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
704 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
705 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
706 defined(CONFIG_405EX)
707 sys_info_t sysinfo;
708 #if defined(CONFIG_440GX) || defined(CONFIG_440SPE) || \
709 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
710 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
711 defined(CONFIG_405EX)
712 int ethgroup = -1;
713 #endif
714 #endif
715 u32 bd_cached;
716 u32 bd_uncached = 0;
717 #ifdef CONFIG_4xx_DCACHE
718 static u32 last_used_ea = 0;
719 #endif
720 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
721 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
722 defined(CONFIG_405EX)
723 int rgmii_channel;
724 #endif
725
726 EMAC_4XX_HW_PST hw_p = dev->priv;
727
728 /* before doing anything, figure out if we have a MAC address */
729 /* if not, bail */
730 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
731 printf("ERROR: ethaddr not set!\n");
732 return -1;
733 }
734
735 #if defined(CONFIG_440GX) || \
736 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
737 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
738 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
739 defined(CONFIG_405EX)
740 /* Need to get the OPB frequency so we can access the PHY */
741 get_sys_info (&sysinfo);
742 #endif
743
744 msr = mfmsr ();
745 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
746
747 devnum = hw_p->devnum;
748
749 #ifdef INFO_4XX_ENET
750 /* AS.HARNOIS
751 * We should have :
752 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
753 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
754 * is possible that new packets (without relationship with
755 * current transfer) have got the time to arrived before
756 * netloop calls eth_halt
757 */
758 printf ("About preceeding transfer (eth%d):\n"
759 "- Sent packet number %d\n"
760 "- Received packet number %d\n"
761 "- Handled packet number %d\n",
762 hw_p->devnum,
763 hw_p->stats.pkts_tx,
764 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
765
766 hw_p->stats.pkts_tx = 0;
767 hw_p->stats.pkts_rx = 0;
768 hw_p->stats.pkts_handled = 0;
769 hw_p->print_speed = 1; /* print speed message again next time */
770 #endif
771
772 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
773 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
774
775 hw_p->rx_slot = 0; /* MAL Receive Slot */
776 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
777 hw_p->rx_u_index = 0; /* Receive User Queue Index */
778
779 hw_p->tx_slot = 0; /* MAL Transmit Slot */
780 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
781 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
782
783 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
784 /* set RMII mode */
785 /* NOTE: 440GX spec states that mode is mutually exclusive */
786 /* NOTE: Therefore, disable all other EMACS, since we handle */
787 /* NOTE: only one emac at a time */
788 reg = 0;
789 out_be32((void *)ZMII_FER, 0);
790 udelay (100);
791
792 #if defined(CONFIG_440GP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
793 out_be32((void *)ZMII_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
794 #elif defined(CONFIG_440GX) || \
795 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
796 defined(CONFIG_460EX) || defined(CONFIG_460GT)
797 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
798 #endif
799
800 out_be32((void *)ZMII_SSR, ZMII_SSR_SP << ZMII_SSR_V(devnum));
801 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
802 #if defined(CONFIG_405EX)
803 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
804 #endif
805
806 sync();
807
808 /* provide clocks for EMAC internal loopback */
809 emac_loopback_enable(hw_p);
810
811 /* EMAC RESET */
812 out_be32((void *)EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
813
814 /* remove clocks for EMAC internal loopback */
815 emac_loopback_disable(hw_p);
816
817 failsafe = 1000;
818 while ((in_be32((void *)EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) {
819 udelay (1000);
820 failsafe--;
821 }
822 if (failsafe <= 0)
823 printf("\nProblem resetting EMAC!\n");
824
825 #if defined(CONFIG_440GX) || \
826 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
827 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
828 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
829 defined(CONFIG_405EX)
830 /* Whack the M1 register */
831 mode_reg = 0x0;
832 mode_reg &= ~0x00000038;
833 if (sysinfo.freqOPB <= 50000000);
834 else if (sysinfo.freqOPB <= 66666667)
835 mode_reg |= EMAC_M1_OBCI_66;
836 else if (sysinfo.freqOPB <= 83333333)
837 mode_reg |= EMAC_M1_OBCI_83;
838 else if (sysinfo.freqOPB <= 100000000)
839 mode_reg |= EMAC_M1_OBCI_100;
840 else
841 mode_reg |= EMAC_M1_OBCI_GT100;
842
843 out_be32((void *)EMAC_M1 + hw_p->hw_addr, mode_reg);
844 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
845
846 /* wait for PHY to complete auto negotiation */
847 reg_short = 0;
848 #ifndef CONFIG_CS8952_PHY
849 switch (devnum) {
850 case 0:
851 reg = CONFIG_PHY_ADDR;
852 break;
853 #if defined (CONFIG_PHY1_ADDR)
854 case 1:
855 reg = CONFIG_PHY1_ADDR;
856 break;
857 #endif
858 #if defined (CONFIG_PHY2_ADDR)
859 case 2:
860 reg = CONFIG_PHY2_ADDR;
861 break;
862 #endif
863 #if defined (CONFIG_PHY3_ADDR)
864 case 3:
865 reg = CONFIG_PHY3_ADDR;
866 break;
867 #endif
868 default:
869 reg = CONFIG_PHY_ADDR;
870 break;
871 }
872
873 bis->bi_phynum[devnum] = reg;
874
875 #if defined(CONFIG_PHY_RESET)
876 /*
877 * Reset the phy, only if its the first time through
878 * otherwise, just check the speeds & feeds
879 */
880 if (hw_p->first_init == 0) {
881 #if defined(CONFIG_M88E1111_PHY)
882 miiphy_write (dev->name, reg, 0x14, 0x0ce3);
883 miiphy_write (dev->name, reg, 0x18, 0x4101);
884 miiphy_write (dev->name, reg, 0x09, 0x0e00);
885 miiphy_write (dev->name, reg, 0x04, 0x01e1);
886 #endif
887 miiphy_reset (dev->name, reg);
888
889 #if defined(CONFIG_440GX) || \
890 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
891 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
892 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
893 defined(CONFIG_405EX)
894
895 #if defined(CONFIG_CIS8201_PHY)
896 /*
897 * Cicada 8201 PHY needs to have an extended register whacked
898 * for RGMII mode.
899 */
900 if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {
901 #if defined(CONFIG_CIS8201_SHORT_ETCH)
902 miiphy_write (dev->name, reg, 23, 0x1300);
903 #else
904 miiphy_write (dev->name, reg, 23, 0x1000);
905 #endif
906 /*
907 * Vitesse VSC8201/Cicada CIS8201 errata:
908 * Interoperability problem with Intel 82547EI phys
909 * This work around (provided by Vitesse) changes
910 * the default timer convergence from 8ms to 12ms
911 */
912 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
913 miiphy_write (dev->name, reg, 0x08, 0x0200);
914 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
915 miiphy_write (dev->name, reg, 0x02, 0x0004);
916 miiphy_write (dev->name, reg, 0x01, 0x0671);
917 miiphy_write (dev->name, reg, 0x00, 0x8fae);
918 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
919 miiphy_write (dev->name, reg, 0x08, 0x0000);
920 miiphy_write (dev->name, reg, 0x1f, 0x0000);
921 /* end Vitesse/Cicada errata */
922 }
923 #endif
924
925 #if defined(CONFIG_ET1011C_PHY)
926 /*
927 * Agere ET1011c PHY needs to have an extended register whacked
928 * for RGMII mode.
929 */
930 if (((devnum == 2) || (devnum ==3)) && (4 == ethgroup)) {
931 miiphy_read (dev->name, reg, 0x16, &reg_short);
932 reg_short &= ~(0x7);
933 reg_short |= 0x6; /* RGMII DLL Delay*/
934 miiphy_write (dev->name, reg, 0x16, reg_short);
935
936 miiphy_read (dev->name, reg, 0x17, &reg_short);
937 reg_short &= ~(0x40);
938 miiphy_write (dev->name, reg, 0x17, reg_short);
939
940 miiphy_write(dev->name, reg, 0x1c, 0x74f0);
941 }
942 #endif
943
944 #endif
945 /* Start/Restart autonegotiation */
946 phy_setup_aneg (dev->name, reg);
947 udelay (1000);
948 }
949 #endif /* defined(CONFIG_PHY_RESET) */
950
951 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
952
953 /*
954 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
955 */
956 if ((reg_short & PHY_BMSR_AUTN_ABLE)
957 && !(reg_short & PHY_BMSR_AUTN_COMP)) {
958 puts ("Waiting for PHY auto negotiation to complete");
959 i = 0;
960 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
961 /*
962 * Timeout reached ?
963 */
964 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
965 puts (" TIMEOUT !\n");
966 break;
967 }
968
969 if ((i++ % 1000) == 0) {
970 putc ('.');
971 }
972 udelay (1000); /* 1 ms */
973 miiphy_read (dev->name, reg, PHY_BMSR, &reg_short);
974
975 }
976 puts (" done\n");
977 udelay (500000); /* another 500 ms (results in faster booting) */
978 }
979 #endif /* #ifndef CONFIG_CS8952_PHY */
980
981 speed = miiphy_speed (dev->name, reg);
982 duplex = miiphy_duplex (dev->name, reg);
983
984 if (hw_p->print_speed) {
985 hw_p->print_speed = 0;
986 printf ("ENET Speed is %d Mbps - %s duplex connection (EMAC%d)\n",
987 (int) speed, (duplex == HALF) ? "HALF" : "FULL",
988 hw_p->devnum);
989 }
990
991 #if defined(CONFIG_440) && \
992 !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \
993 !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX) && \
994 !defined(CONFIG_460EX) && !defined(CONFIG_460GT)
995 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
996 mfsdr(sdr_mfr, reg);
997 if (speed == 100) {
998 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
999 } else {
1000 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
1001 }
1002 mtsdr(sdr_mfr, reg);
1003 #endif
1004
1005 /* Set ZMII/RGMII speed according to the phy link speed */
1006 reg = in_be32((void *)ZMII_SSR);
1007 if ( (speed == 100) || (speed == 1000) )
1008 out_be32((void *)ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));
1009 else
1010 out_be32((void *)ZMII_SSR, reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));
1011
1012 if ((devnum == 2) || (devnum == 3)) {
1013 if (speed == 1000)
1014 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
1015 else if (speed == 100)
1016 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
1017 else if (speed == 10)
1018 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
1019 else {
1020 printf("Error in RGMII Speed\n");
1021 return -1;
1022 }
1023 out_be32((void *)RGMII_SSR, reg);
1024 }
1025 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
1026
1027 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1028 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1029 defined(CONFIG_405EX)
1030 if (devnum >= 2)
1031 rgmii_channel = devnum - 2;
1032 else
1033 rgmii_channel = devnum;
1034
1035 if (speed == 1000)
1036 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V(rgmii_channel));
1037 else if (speed == 100)
1038 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V(rgmii_channel));
1039 else if (speed == 10)
1040 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V(rgmii_channel));
1041 else {
1042 printf("Error in RGMII Speed\n");
1043 return -1;
1044 }
1045 out_be32((void *)RGMII_SSR, reg);
1046 #if defined(CONFIG_460GT)
1047 if ((devnum == 2) || (devnum == 3))
1048 out_be32((void *)RGMII_SSR + RGMII1_BASE_OFFSET, reg);
1049 #endif
1050 #endif
1051
1052 /* set the Mal configuration reg */
1053 #if defined(CONFIG_440GX) || \
1054 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1055 defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
1056 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1057 defined(CONFIG_405EX)
1058 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
1059 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
1060 #else
1061 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
1062 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
1063 if (get_pvr() == PVR_440GP_RB) {
1064 mtdcr (malmcr, mfdcr(malmcr) & ~MAL_CR_PLBB);
1065 }
1066 #endif
1067
1068 /*
1069 * Malloc MAL buffer desciptors, make sure they are
1070 * aligned on cache line boundary size
1071 * (401/403/IOP480 = 16, 405 = 32)
1072 * and doesn't cross cache block boundaries.
1073 */
1074 if (hw_p->first_init == 0) {
1075 debug("*** Allocating descriptor memory ***\n");
1076
1077 bd_cached = (u32)malloc_aligned(MAL_ALLOC_SIZE, 4096);
1078 if (!bd_cached) {
1079 printf("%s: Error allocating MAL descriptor buffers!\n");
1080 return -1;
1081 }
1082
1083 #ifdef CONFIG_4xx_DCACHE
1084 flush_dcache_range(bd_cached, bd_cached + MAL_ALLOC_SIZE);
1085 if (!last_used_ea)
1086 #if defined(CFG_MEM_TOP_HIDE)
1087 bd_uncached = bis->bi_memsize + CFG_MEM_TOP_HIDE;
1088 #else
1089 bd_uncached = bis->bi_memsize;
1090 #endif
1091 else
1092 bd_uncached = last_used_ea + MAL_ALLOC_SIZE;
1093
1094 last_used_ea = bd_uncached;
1095 program_tlb(bd_cached, bd_uncached, MAL_ALLOC_SIZE,
1096 TLB_WORD2_I_ENABLE);
1097 #else
1098 bd_uncached = bd_cached;
1099 #endif
1100 hw_p->tx_phys = bd_cached;
1101 hw_p->rx_phys = bd_cached + MAL_TX_DESC_SIZE;
1102 hw_p->tx = (mal_desc_t *)(bd_uncached);
1103 hw_p->rx = (mal_desc_t *)(bd_uncached + MAL_TX_DESC_SIZE);
1104 debug("hw_p->tx=%08x, hw_p->rx=%08x\n", hw_p->tx, hw_p->rx);
1105 }
1106
1107 for (i = 0; i < NUM_TX_BUFF; i++) {
1108 hw_p->tx[i].ctrl = 0;
1109 hw_p->tx[i].data_len = 0;
1110 if (hw_p->first_init == 0)
1111 hw_p->txbuf_ptr = malloc_aligned(MAL_ALLOC_SIZE,
1112 L1_CACHE_BYTES);
1113 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
1114 if ((NUM_TX_BUFF - 1) == i)
1115 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
1116 hw_p->tx_run[i] = -1;
1117 debug("TX_BUFF %d @ 0x%08lx\n", i, (u32)hw_p->tx[i].data_ptr);
1118 }
1119
1120 for (i = 0; i < NUM_RX_BUFF; i++) {
1121 hw_p->rx[i].ctrl = 0;
1122 hw_p->rx[i].data_len = 0;
1123 hw_p->rx[i].data_ptr = (char *)NetRxPackets[i];
1124 if ((NUM_RX_BUFF - 1) == i)
1125 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
1126 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
1127 hw_p->rx_ready[i] = -1;
1128 debug("RX_BUFF %d @ 0x%08lx\n", i, (u32)hw_p->rx[i].data_ptr);
1129 }
1130
1131 reg = 0x00000000;
1132
1133 reg |= dev->enetaddr[0]; /* set high address */
1134 reg = reg << 8;
1135 reg |= dev->enetaddr[1];
1136
1137 out_be32((void *)EMAC_IAH + hw_p->hw_addr, reg);
1138
1139 reg = 0x00000000;
1140 reg |= dev->enetaddr[2]; /* set low address */
1141 reg = reg << 8;
1142 reg |= dev->enetaddr[3];
1143 reg = reg << 8;
1144 reg |= dev->enetaddr[4];
1145 reg = reg << 8;
1146 reg |= dev->enetaddr[5];
1147
1148 out_be32((void *)EMAC_IAL + hw_p->hw_addr, reg);
1149
1150 switch (devnum) {
1151 case 1:
1152 /* setup MAL tx & rx channel pointers */
1153 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
1154 mtdcr (maltxctp2r, hw_p->tx_phys);
1155 #else
1156 mtdcr (maltxctp1r, hw_p->tx_phys);
1157 #endif
1158 #if defined(CONFIG_440)
1159 mtdcr (maltxbattr, 0x0);
1160 mtdcr (malrxbattr, 0x0);
1161 #endif
1162
1163 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
1164 mtdcr (malrxctp8r, hw_p->rx_phys);
1165 /* set RX buffer size */
1166 mtdcr (malrcbs8, ENET_MAX_MTU_ALIGNED / 16);
1167 #else
1168 mtdcr (malrxctp1r, hw_p->rx_phys);
1169 /* set RX buffer size */
1170 mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16);
1171 #endif
1172 break;
1173 #if defined (CONFIG_440GX)
1174 case 2:
1175 /* setup MAL tx & rx channel pointers */
1176 mtdcr (maltxbattr, 0x0);
1177 mtdcr (malrxbattr, 0x0);
1178 mtdcr (maltxctp2r, hw_p->tx_phys);
1179 mtdcr (malrxctp2r, hw_p->rx_phys);
1180 /* set RX buffer size */
1181 mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16);
1182 break;
1183 case 3:
1184 /* setup MAL tx & rx channel pointers */
1185 mtdcr (maltxbattr, 0x0);
1186 mtdcr (maltxctp3r, hw_p->tx_phys);
1187 mtdcr (malrxbattr, 0x0);
1188 mtdcr (malrxctp3r, hw_p->rx_phys);
1189 /* set RX buffer size */
1190 mtdcr (malrcbs3, ENET_MAX_MTU_ALIGNED / 16);
1191 break;
1192 #endif /* CONFIG_440GX */
1193 #if defined (CONFIG_460GT)
1194 case 2:
1195 /* setup MAL tx & rx channel pointers */
1196 mtdcr (maltxbattr, 0x0);
1197 mtdcr (malrxbattr, 0x0);
1198 mtdcr (maltxctp2r, hw_p->tx_phys);
1199 mtdcr (malrxctp16r, hw_p->rx_phys);
1200 /* set RX buffer size */
1201 mtdcr (malrcbs16, ENET_MAX_MTU_ALIGNED / 16);
1202 break;
1203 case 3:
1204 /* setup MAL tx & rx channel pointers */
1205 mtdcr (maltxbattr, 0x0);
1206 mtdcr (malrxbattr, 0x0);
1207 mtdcr (maltxctp3r, hw_p->tx_phys);
1208 mtdcr (malrxctp24r, hw_p->rx_phys);
1209 /* set RX buffer size */
1210 mtdcr (malrcbs24, ENET_MAX_MTU_ALIGNED / 16);
1211 break;
1212 #endif /* CONFIG_460GT */
1213 case 0:
1214 default:
1215 /* setup MAL tx & rx channel pointers */
1216 #if defined(CONFIG_440)
1217 mtdcr (maltxbattr, 0x0);
1218 mtdcr (malrxbattr, 0x0);
1219 #endif
1220 mtdcr (maltxctp0r, hw_p->tx_phys);
1221 mtdcr (malrxctp0r, hw_p->rx_phys);
1222 /* set RX buffer size */
1223 mtdcr (malrcbs0, ENET_MAX_MTU_ALIGNED / 16);
1224 break;
1225 }
1226
1227 /* Enable MAL transmit and receive channels */
1228 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
1229 mtdcr (maltxcasr, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
1230 #else
1231 mtdcr (maltxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
1232 #endif
1233 mtdcr (malrxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
1234
1235 /* set transmit enable & receive enable */
1236 out_be32((void *)EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);
1237
1238 mode_reg = in_be32((void *)EMAC_M1 + hw_p->hw_addr);
1239
1240 /* set rx-/tx-fifo size */
1241 mode_reg = (mode_reg & ~EMAC_MR1_FIFO_MASK) | EMAC_MR1_FIFO_SIZE;
1242
1243 /* set speed */
1244 if (speed == _1000BASET) {
1245 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1246 defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1247 unsigned long pfc1;
1248
1249 mfsdr (sdr_pfc1, pfc1);
1250 pfc1 |= SDR0_PFC1_EM_1000;
1251 mtsdr (sdr_pfc1, pfc1);
1252 #endif
1253 mode_reg = mode_reg | EMAC_M1_MF_1000MBPS | EMAC_M1_IST;
1254 } else if (speed == _100BASET)
1255 mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;
1256 else
1257 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
1258 if (duplex == FULL)
1259 mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;
1260
1261 out_be32((void *)EMAC_M1 + hw_p->hw_addr, mode_reg);
1262
1263 /* Enable broadcast and indvidual address */
1264 /* TBS: enabling runts as some misbehaved nics will send runts */
1265 out_be32((void *)EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
1266
1267 /* we probably need to set the tx mode1 reg? maybe at tx time */
1268
1269 /* set transmit request threshold register */
1270 out_be32((void *)EMAC_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
1271
1272 /* set receive low/high water mark register */
1273 #if defined(CONFIG_440)
1274 /* 440s has a 64 byte burst length */
1275 out_be32((void *)EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
1276 #else
1277 /* 405s have a 16 byte burst length */
1278 out_be32((void *)EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
1279 #endif /* defined(CONFIG_440) */
1280 out_be32((void *)EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);
1281
1282 /* Set fifo limit entry in tx mode 0 */
1283 out_be32((void *)EMAC_TXM0 + hw_p->hw_addr, 0x00000003);
1284 /* Frame gap set */
1285 out_be32((void *)EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
1286
1287 /* Set EMAC IER */
1288 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1289 if (speed == _100BASET)
1290 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
1291
1292 out_be32((void *)EMAC_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
1293 out_be32((void *)EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);
1294
1295 if (hw_p->first_init == 0) {
1296 /*
1297 * Connect interrupt service routines
1298 */
1299 irq_install_handler(ETH_IRQ_NUM(hw_p->devnum),
1300 (interrupt_handler_t *) enetInt, dev);
1301 }
1302
1303 mtmsr (msr); /* enable interrupts again */
1304
1305 hw_p->bis = bis;
1306 hw_p->first_init = 1;
1307
1308 return 0;
1309 }
1310
1311
1312 static int ppc_4xx_eth_send (struct eth_device *dev, volatile void *ptr,
1313 int len)
1314 {
1315 struct enet_frame *ef_ptr;
1316 ulong time_start, time_now;
1317 unsigned long temp_txm0;
1318 EMAC_4XX_HW_PST hw_p = dev->priv;
1319
1320 ef_ptr = (struct enet_frame *) ptr;
1321
1322 /*-----------------------------------------------------------------------+
1323 * Copy in our address into the frame.
1324 *-----------------------------------------------------------------------*/
1325 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
1326
1327 /*-----------------------------------------------------------------------+
1328 * If frame is too long or too short, modify length.
1329 *-----------------------------------------------------------------------*/
1330 /* TBS: where does the fragment go???? */
1331 if (len > ENET_MAX_MTU)
1332 len = ENET_MAX_MTU;
1333
1334 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
1335 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
1336 flush_dcache_range((u32)hw_p->txbuf_ptr, (u32)hw_p->txbuf_ptr + len);
1337
1338 /*-----------------------------------------------------------------------+
1339 * set TX Buffer busy, and send it
1340 *-----------------------------------------------------------------------*/
1341 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
1342 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
1343 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
1344 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
1345 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
1346
1347 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
1348 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
1349
1350 sync();
1351
1352 out_be32((void *)EMAC_TXM0 + hw_p->hw_addr,
1353 in_be32((void *)EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);
1354 #ifdef INFO_4XX_ENET
1355 hw_p->stats.pkts_tx++;
1356 #endif
1357
1358 /*-----------------------------------------------------------------------+
1359 * poll unitl the packet is sent and then make sure it is OK
1360 *-----------------------------------------------------------------------*/
1361 time_start = get_timer (0);
1362 while (1) {
1363 temp_txm0 = in_be32((void *)EMAC_TXM0 + hw_p->hw_addr);
1364 /* loop until either TINT turns on or 3 seconds elapse */
1365 if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {
1366 /* transmit is done, so now check for errors
1367 * If there is an error, an interrupt should
1368 * happen when we return
1369 */
1370 time_now = get_timer (0);
1371 if ((time_now - time_start) > 3000) {
1372 return (-1);
1373 }
1374 } else {
1375 return (len);
1376 }
1377 }
1378 }
1379
1380
1381 #if defined (CONFIG_440) || defined(CONFIG_405EX)
1382
1383 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1384 /*
1385 * Hack: On 440SP all enet irq sources are located on UIC1
1386 * Needs some cleanup. --sr
1387 */
1388 #define UIC0MSR uic1msr
1389 #define UIC0SR uic1sr
1390 #define UIC1MSR uic1msr
1391 #define UIC1SR uic1sr
1392 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
1393 /*
1394 * Hack: On 460EX/GT all enet irq sources are located on UIC2
1395 * Needs some cleanup. --ag
1396 */
1397 #define UIC0MSR uic2msr
1398 #define UIC0SR uic2sr
1399 #define UIC1MSR uic2msr
1400 #define UIC1SR uic2sr
1401 #else
1402 #define UIC0MSR uic0msr
1403 #define UIC0SR uic0sr
1404 #define UIC1MSR uic1msr
1405 #define UIC1SR uic1sr
1406 #endif
1407
1408 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1409 defined(CONFIG_405EX)
1410 #define UICMSR_ETHX uic0msr
1411 #define UICSR_ETHX uic0sr
1412 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
1413 #define UICMSR_ETHX uic2msr
1414 #define UICSR_ETHX uic2sr
1415 #else
1416 #define UICMSR_ETHX uic1msr
1417 #define UICSR_ETHX uic1sr
1418 #endif
1419
1420 int enetInt (struct eth_device *dev)
1421 {
1422 int serviced;
1423 int rc = -1; /* default to not us */
1424 unsigned long mal_isr;
1425 unsigned long emac_isr = 0;
1426 unsigned long mal_rx_eob;
1427 unsigned long my_uic0msr, my_uic1msr;
1428 unsigned long my_uicmsr_ethx;
1429
1430 #if defined(CONFIG_440GX)
1431 unsigned long my_uic2msr;
1432 #endif
1433 EMAC_4XX_HW_PST hw_p;
1434
1435 /*
1436 * Because the mal is generic, we need to get the current
1437 * eth device
1438 */
1439 #if defined(CONFIG_NET_MULTI)
1440 dev = eth_get_dev();
1441 #else
1442 dev = emac0_dev;
1443 #endif
1444
1445 hw_p = dev->priv;
1446
1447 /* enter loop that stays in interrupt code until nothing to service */
1448 do {
1449 serviced = 0;
1450
1451 my_uic0msr = mfdcr (UIC0MSR);
1452 my_uic1msr = mfdcr (UIC1MSR);
1453 #if defined(CONFIG_440GX)
1454 my_uic2msr = mfdcr (uic2msr);
1455 #endif
1456 my_uicmsr_ethx = mfdcr (UICMSR_ETHX);
1457
1458 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
1459 && !(my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))
1460 && !(my_uicmsr_ethx & (UIC_ETH0 | UIC_ETH1))) {
1461 /* not for us */
1462 return (rc);
1463 }
1464 #if defined (CONFIG_440GX)
1465 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
1466 && !(my_uic2msr & (UIC_ETH2 | UIC_ETH3))) {
1467 /* not for us */
1468 return (rc);
1469 }
1470 #endif
1471 /* get and clear controller status interrupts */
1472 /* look at Mal and EMAC interrupts */
1473 if ((my_uic0msr & (UIC_MRE | UIC_MTE))
1474 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1475 /* we have a MAL interrupt */
1476 mal_isr = mfdcr (malesr);
1477 /* look for mal error */
1478 if (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE)) {
1479 mal_err (dev, mal_isr, my_uic1msr, MAL_UIC_DEF, MAL_UIC_ERR);
1480 serviced = 1;
1481 rc = 0;
1482 }
1483 }
1484
1485 /* port by port dispatch of emac interrupts */
1486 if (hw_p->devnum == 0) {
1487 if (UIC_ETH0 & my_uicmsr_ethx) { /* look for EMAC errors */
1488 emac_isr = in_be32((void *)EMAC_ISR + hw_p->hw_addr);
1489 if ((hw_p->emac_ier & emac_isr) != 0) {
1490 emac_err (dev, emac_isr);
1491 serviced = 1;
1492 rc = 0;
1493 }
1494 }
1495 if ((hw_p->emac_ier & emac_isr)
1496 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1497 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1498 mtdcr (UIC1SR, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1499 mtdcr (UICSR_ETHX, UIC_ETH0); /* Clear */
1500 return (rc); /* we had errors so get out */
1501 }
1502 }
1503
1504 #if !defined(CONFIG_440SP)
1505 if (hw_p->devnum == 1) {
1506 if (UIC_ETH1 & my_uicmsr_ethx) { /* look for EMAC errors */
1507 emac_isr = in_be32((void *)EMAC_ISR + hw_p->hw_addr);
1508 if ((hw_p->emac_ier & emac_isr) != 0) {
1509 emac_err (dev, emac_isr);
1510 serviced = 1;
1511 rc = 0;
1512 }
1513 }
1514 if ((hw_p->emac_ier & emac_isr)
1515 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1516 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1517 mtdcr (UIC1SR, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1518 mtdcr (UICSR_ETHX, UIC_ETH1); /* Clear */
1519 return (rc); /* we had errors so get out */
1520 }
1521 }
1522 #if defined (CONFIG_440GX)
1523 if (hw_p->devnum == 2) {
1524 if (UIC_ETH2 & my_uic2msr) { /* look for EMAC errors */
1525 emac_isr = in_be32((void *)EMAC_ISR + hw_p->hw_addr);
1526 if ((hw_p->emac_ier & emac_isr) != 0) {
1527 emac_err (dev, emac_isr);
1528 serviced = 1;
1529 rc = 0;
1530 }
1531 }
1532 if ((hw_p->emac_ier & emac_isr)
1533 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1534 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1535 mtdcr (UIC1SR, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1536 mtdcr (uic2sr, UIC_ETH2);
1537 return (rc); /* we had errors so get out */
1538 }
1539 }
1540
1541 if (hw_p->devnum == 3) {
1542 if (UIC_ETH3 & my_uic2msr) { /* look for EMAC errors */
1543 emac_isr = in_be32((void *)EMAC_ISR + hw_p->hw_addr);
1544 if ((hw_p->emac_ier & emac_isr) != 0) {
1545 emac_err (dev, emac_isr);
1546 serviced = 1;
1547 rc = 0;
1548 }
1549 }
1550 if ((hw_p->emac_ier & emac_isr)
1551 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1552 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1553 mtdcr (UIC1SR, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1554 mtdcr (uic2sr, UIC_ETH3);
1555 return (rc); /* we had errors so get out */
1556 }
1557 }
1558 #endif /* CONFIG_440GX */
1559 #endif /* !CONFIG_440SP */
1560
1561 /* handle MAX TX EOB interrupt from a tx */
1562 if (my_uic0msr & UIC_MTE) {
1563 mal_rx_eob = mfdcr (maltxeobisr);
1564 mtdcr (maltxeobisr, mal_rx_eob);
1565 mtdcr (UIC0SR, UIC_MTE);
1566 }
1567 /* handle MAL RX EOB interupt from a receive */
1568 /* check for EOB on valid channels */
1569 if (my_uic0msr & UIC_MRE) {
1570 mal_rx_eob = mfdcr (malrxeobisr);
1571 if ((mal_rx_eob &
1572 (0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL)))
1573 != 0) { /* call emac routine for channel x */
1574 /* clear EOB
1575 mtdcr(malrxeobisr, mal_rx_eob); */
1576 enet_rcv (dev, emac_isr);
1577 /* indicate that we serviced an interrupt */
1578 serviced = 1;
1579 rc = 0;
1580 }
1581 }
1582
1583 mtdcr (UIC0SR, UIC_MRE); /* Clear */
1584 mtdcr (UIC1SR, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1585 switch (hw_p->devnum) {
1586 case 0:
1587 mtdcr (UICSR_ETHX, UIC_ETH0);
1588 break;
1589 case 1:
1590 mtdcr (UICSR_ETHX, UIC_ETH1);
1591 break;
1592 #if defined (CONFIG_440GX)
1593 case 2:
1594 mtdcr (uic2sr, UIC_ETH2);
1595 break;
1596 case 3:
1597 mtdcr (uic2sr, UIC_ETH3);
1598 break;
1599 #endif /* CONFIG_440GX */
1600 default:
1601 break;
1602 }
1603 } while (serviced);
1604
1605 return (rc);
1606 }
1607
1608 #else /* CONFIG_440 */
1609
1610 int enetInt (struct eth_device *dev)
1611 {
1612 int serviced;
1613 int rc = -1; /* default to not us */
1614 unsigned long mal_isr;
1615 unsigned long emac_isr = 0;
1616 unsigned long mal_rx_eob;
1617 unsigned long my_uicmsr;
1618
1619 EMAC_4XX_HW_PST hw_p;
1620
1621 /*
1622 * Because the mal is generic, we need to get the current
1623 * eth device
1624 */
1625 #if defined(CONFIG_NET_MULTI)
1626 dev = eth_get_dev();
1627 #else
1628 dev = emac0_dev;
1629 #endif
1630
1631 hw_p = dev->priv;
1632
1633 /* enter loop that stays in interrupt code until nothing to service */
1634 do {
1635 serviced = 0;
1636
1637 my_uicmsr = mfdcr (uicmsr);
1638
1639 if ((my_uicmsr & (MAL_UIC_DEF | EMAC_UIC_DEF)) == 0) { /* not for us */
1640 return (rc);
1641 }
1642 /* get and clear controller status interrupts */
1643 /* look at Mal and EMAC interrupts */
1644 if ((MAL_UIC_DEF & my_uicmsr) != 0) { /* we have a MAL interrupt */
1645 mal_isr = mfdcr (malesr);
1646 /* look for mal error */
1647 if ((my_uicmsr & MAL_UIC_ERR) != 0) {
1648 mal_err (dev, mal_isr, my_uicmsr, MAL_UIC_DEF, MAL_UIC_ERR);
1649 serviced = 1;
1650 rc = 0;
1651 }
1652 }
1653
1654 /* port by port dispatch of emac interrupts */
1655
1656 if ((SEL_UIC_DEF(hw_p->devnum) & my_uicmsr) != 0) { /* look for EMAC errors */
1657 emac_isr = in_be32((void *)EMAC_ISR + hw_p->hw_addr);
1658 if ((hw_p->emac_ier & emac_isr) != 0) {
1659 emac_err (dev, emac_isr);
1660 serviced = 1;
1661 rc = 0;
1662 }
1663 }
1664 if (((hw_p->emac_ier & emac_isr) != 0) || ((MAL_UIC_ERR & my_uicmsr) != 0)) {
1665 mtdcr (uicsr, MAL_UIC_DEF | SEL_UIC_DEF(hw_p->devnum)); /* Clear */
1666 return (rc); /* we had errors so get out */
1667 }
1668
1669 /* handle MAX TX EOB interrupt from a tx */
1670 if (my_uicmsr & UIC_MAL_TXEOB) {
1671 mal_rx_eob = mfdcr (maltxeobisr);
1672 mtdcr (maltxeobisr, mal_rx_eob);
1673 mtdcr (uicsr, UIC_MAL_TXEOB);
1674 }
1675 /* handle MAL RX EOB interupt from a receive */
1676 /* check for EOB on valid channels */
1677 if (my_uicmsr & UIC_MAL_RXEOB)
1678 {
1679 mal_rx_eob = mfdcr (malrxeobisr);
1680 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1681 /* clear EOB
1682 mtdcr(malrxeobisr, mal_rx_eob); */
1683 enet_rcv (dev, emac_isr);
1684 /* indicate that we serviced an interrupt */
1685 serviced = 1;
1686 rc = 0;
1687 }
1688 }
1689 mtdcr (uicsr, MAL_UIC_DEF|EMAC_UIC_DEF|EMAC_UIC_DEF1); /* Clear */
1690 #if defined(CONFIG_405EZ)
1691 mtsdr (sdricintstat, SDR_ICRX_STAT | SDR_ICTX0_STAT | SDR_ICTX1_STAT);
1692 #endif /* defined(CONFIG_405EZ) */
1693 }
1694 while (serviced);
1695
1696 return (rc);
1697 }
1698
1699 #endif /* CONFIG_440 */
1700
1701 /*-----------------------------------------------------------------------------+
1702 * MAL Error Routine
1703 *-----------------------------------------------------------------------------*/
1704 static void mal_err (struct eth_device *dev, unsigned long isr,
1705 unsigned long uic, unsigned long maldef,
1706 unsigned long mal_errr)
1707 {
1708 EMAC_4XX_HW_PST hw_p = dev->priv;
1709
1710 mtdcr (malesr, isr); /* clear interrupt */
1711
1712 /* clear DE interrupt */
1713 mtdcr (maltxdeir, 0xC0000000);
1714 mtdcr (malrxdeir, 0x80000000);
1715
1716 #ifdef INFO_4XX_ENET
1717 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
1718 #endif
1719
1720 eth_init (hw_p->bis); /* start again... */
1721 }
1722
1723 /*-----------------------------------------------------------------------------+
1724 * EMAC Error Routine
1725 *-----------------------------------------------------------------------------*/
1726 static void emac_err (struct eth_device *dev, unsigned long isr)
1727 {
1728 EMAC_4XX_HW_PST hw_p = dev->priv;
1729
1730 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1731 out_be32((void *)EMAC_ISR + hw_p->hw_addr, isr);
1732 }
1733
1734 /*-----------------------------------------------------------------------------+
1735 * enet_rcv() handles the ethernet receive data
1736 *-----------------------------------------------------------------------------*/
1737 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1738 {
1739 struct enet_frame *ef_ptr;
1740 unsigned long data_len;
1741 unsigned long rx_eob_isr;
1742 EMAC_4XX_HW_PST hw_p = dev->priv;
1743
1744 int handled = 0;
1745 int i;
1746 int loop_count = 0;
1747
1748 rx_eob_isr = mfdcr (malrxeobisr);
1749 if ((0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL)) & rx_eob_isr) {
1750 /* clear EOB */
1751 mtdcr (malrxeobisr, rx_eob_isr);
1752
1753 /* EMAC RX done */
1754 while (1) { /* do all */
1755 i = hw_p->rx_slot;
1756
1757 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1758 || (loop_count >= NUM_RX_BUFF))
1759 break;
1760
1761 loop_count++;
1762 handled++;
1763 data_len = (unsigned long) hw_p->rx[i].data_len & 0x0fff; /* Get len */
1764 if (data_len) {
1765 if (data_len > ENET_MAX_MTU) /* Check len */
1766 data_len = 0;
1767 else {
1768 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1769 data_len = 0;
1770 hw_p->stats.rx_err_log[hw_p->
1771 rx_err_index]
1772 = hw_p->rx[i].ctrl;
1773 hw_p->rx_err_index++;
1774 if (hw_p->rx_err_index ==
1775 MAX_ERR_LOG)
1776 hw_p->rx_err_index =
1777 0;
1778 } /* emac_erros */
1779 } /* data_len < max mtu */
1780 } /* if data_len */
1781 if (!data_len) { /* no data */
1782 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1783
1784 hw_p->stats.data_len_err++; /* Error at Rx */
1785 }
1786
1787 /* !data_len */
1788 /* AS.HARNOIS */
1789 /* Check if user has already eaten buffer */
1790 /* if not => ERROR */
1791 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1792 if (hw_p->is_receiving)
1793 printf ("ERROR : Receive buffers are full!\n");
1794 break;
1795 } else {
1796 hw_p->stats.rx_frames++;
1797 hw_p->stats.rx += data_len;
1798 ef_ptr = (struct enet_frame *) hw_p->rx[i].
1799 data_ptr;
1800 #ifdef INFO_4XX_ENET
1801 hw_p->stats.pkts_rx++;
1802 #endif
1803 /* AS.HARNOIS
1804 * use ring buffer
1805 */
1806 hw_p->rx_ready[hw_p->rx_i_index] = i;
1807 hw_p->rx_i_index++;
1808 if (NUM_RX_BUFF == hw_p->rx_i_index)
1809 hw_p->rx_i_index = 0;
1810
1811 hw_p->rx_slot++;
1812 if (NUM_RX_BUFF == hw_p->rx_slot)
1813 hw_p->rx_slot = 0;
1814
1815 /* AS.HARNOIS
1816 * free receive buffer only when
1817 * buffer has been handled (eth_rx)
1818 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1819 */
1820 } /* if data_len */
1821 } /* while */
1822 } /* if EMACK_RXCHL */
1823 }
1824
1825
1826 static int ppc_4xx_eth_rx (struct eth_device *dev)
1827 {
1828 int length;
1829 int user_index;
1830 unsigned long msr;
1831 EMAC_4XX_HW_PST hw_p = dev->priv;
1832
1833 hw_p->is_receiving = 1; /* tell driver */
1834
1835 for (;;) {
1836 /* AS.HARNOIS
1837 * use ring buffer and
1838 * get index from rx buffer desciptor queue
1839 */
1840 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1841 if (user_index == -1) {
1842 length = -1;
1843 break; /* nothing received - leave for() loop */
1844 }
1845
1846 msr = mfmsr ();
1847 mtmsr (msr & ~(MSR_EE));
1848
1849 length = hw_p->rx[user_index].data_len & 0x0fff;
1850
1851 /* Pass the packet up to the protocol layers. */
1852 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
1853 /* NetReceive(NetRxPackets[i], length); */
1854 invalidate_dcache_range((u32)hw_p->rx[user_index].data_ptr,
1855 (u32)hw_p->rx[user_index].data_ptr +
1856 length - 4);
1857 NetReceive (NetRxPackets[user_index], length - 4);
1858 /* Free Recv Buffer */
1859 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1860 /* Free rx buffer descriptor queue */
1861 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1862 hw_p->rx_u_index++;
1863 if (NUM_RX_BUFF == hw_p->rx_u_index)
1864 hw_p->rx_u_index = 0;
1865
1866 #ifdef INFO_4XX_ENET
1867 hw_p->stats.pkts_handled++;
1868 #endif
1869
1870 mtmsr (msr); /* Enable IRQ's */
1871 }
1872
1873 hw_p->is_receiving = 0; /* tell driver */
1874
1875 return length;
1876 }
1877
1878 int ppc_4xx_eth_initialize (bd_t * bis)
1879 {
1880 static int virgin = 0;
1881 struct eth_device *dev;
1882 int eth_num = 0;
1883 EMAC_4XX_HW_PST hw = NULL;
1884 u8 ethaddr[4 + CONFIG_EMAC_NR_START][6];
1885 u32 hw_addr[4];
1886
1887 #if defined(CONFIG_440GX)
1888 unsigned long pfc1;
1889
1890 mfsdr (sdr_pfc1, pfc1);
1891 pfc1 &= ~(0x01e00000);
1892 pfc1 |= 0x01200000;
1893 mtsdr (sdr_pfc1, pfc1);
1894 #endif
1895
1896 /* first clear all mac-addresses */
1897 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++)
1898 memcpy(ethaddr[eth_num], "\0\0\0\0\0\0", 6);
1899
1900 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1901 switch (eth_num) {
1902 default: /* fall through */
1903 case 0:
1904 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1905 bis->bi_enetaddr, 6);
1906 hw_addr[eth_num] = 0x0;
1907 break;
1908 #ifdef CONFIG_HAS_ETH1
1909 case 1:
1910 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1911 bis->bi_enet1addr, 6);
1912 hw_addr[eth_num] = 0x100;
1913 break;
1914 #endif
1915 #ifdef CONFIG_HAS_ETH2
1916 case 2:
1917 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1918 bis->bi_enet2addr, 6);
1919 #if defined(CONFIG_460GT)
1920 hw_addr[eth_num] = 0x300;
1921 #else
1922 hw_addr[eth_num] = 0x400;
1923 #endif
1924 break;
1925 #endif
1926 #ifdef CONFIG_HAS_ETH3
1927 case 3:
1928 memcpy(ethaddr[eth_num + CONFIG_EMAC_NR_START],
1929 bis->bi_enet3addr, 6);
1930 #if defined(CONFIG_460GT)
1931 hw_addr[eth_num] = 0x400;
1932 #else
1933 hw_addr[eth_num] = 0x600;
1934 #endif
1935 break;
1936 #endif
1937 }
1938 }
1939
1940 /* set phy num and mode */
1941 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1942 bis->bi_phymode[0] = 0;
1943
1944 #if defined(CONFIG_PHY1_ADDR)
1945 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1946 bis->bi_phymode[1] = 0;
1947 #endif
1948 #if defined(CONFIG_440GX)
1949 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1950 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1951 bis->bi_phymode[2] = 2;
1952 bis->bi_phymode[3] = 2;
1953 #endif
1954
1955 #if defined(CONFIG_440GX) || \
1956 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1957 defined(CONFIG_405EX)
1958 ppc_4xx_eth_setup_bridge(0, bis);
1959 #endif
1960
1961 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1962 /*
1963 * See if we can actually bring up the interface,
1964 * otherwise, skip it
1965 */
1966 if (memcmp (ethaddr[eth_num], "\0\0\0\0\0\0", 6) == 0) {
1967 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1968 continue;
1969 }
1970
1971 /* Allocate device structure */
1972 dev = (struct eth_device *) malloc (sizeof (*dev));
1973 if (dev == NULL) {
1974 printf ("ppc_4xx_eth_initialize: "
1975 "Cannot allocate eth_device %d\n", eth_num);
1976 return (-1);
1977 }
1978 memset(dev, 0, sizeof(*dev));
1979
1980 /* Allocate our private use data */
1981 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
1982 if (hw == NULL) {
1983 printf ("ppc_4xx_eth_initialize: "
1984 "Cannot allocate private hw data for eth_device %d",
1985 eth_num);
1986 free (dev);
1987 return (-1);
1988 }
1989 memset(hw, 0, sizeof(*hw));
1990
1991 hw->hw_addr = hw_addr[eth_num];
1992 memcpy (dev->enetaddr, ethaddr[eth_num], 6);
1993 hw->devnum = eth_num;
1994 hw->print_speed = 1;
1995
1996 sprintf (dev->name, "ppc_4xx_eth%d", eth_num - CONFIG_EMAC_NR_START);
1997 dev->priv = (void *) hw;
1998 dev->init = ppc_4xx_eth_init;
1999 dev->halt = ppc_4xx_eth_halt;
2000 dev->send = ppc_4xx_eth_send;
2001 dev->recv = ppc_4xx_eth_rx;
2002
2003 if (0 == virgin) {
2004 /* set the MAL IER ??? names may change with new spec ??? */
2005 #if defined(CONFIG_440SPE) || \
2006 defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
2007 defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
2008 defined(CONFIG_405EX)
2009 mal_ier =
2010 MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
2011 MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
2012 #else
2013 mal_ier =
2014 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
2015 MAL_IER_OPBE | MAL_IER_PLBE;
2016 #endif
2017 mtdcr (malesr, 0xffffffff); /* clear pending interrupts */
2018 mtdcr (maltxdeir, 0xffffffff); /* clear pending interrupts */
2019 mtdcr (malrxdeir, 0xffffffff); /* clear pending interrupts */
2020 mtdcr (malier, mal_ier);
2021
2022 /* install MAL interrupt handler */
2023 irq_install_handler (VECNUM_MS,
2024 (interrupt_handler_t *) enetInt,
2025 dev);
2026 irq_install_handler (VECNUM_MTE,
2027 (interrupt_handler_t *) enetInt,
2028 dev);
2029 irq_install_handler (VECNUM_MRE,
2030 (interrupt_handler_t *) enetInt,
2031 dev);
2032 irq_install_handler (VECNUM_TXDE,
2033 (interrupt_handler_t *) enetInt,
2034 dev);
2035 irq_install_handler (VECNUM_RXDE,
2036 (interrupt_handler_t *) enetInt,
2037 dev);
2038 virgin = 1;
2039 }
2040
2041 #if defined(CONFIG_NET_MULTI)
2042 eth_register (dev);
2043 #else
2044 emac0_dev = dev;
2045 #endif
2046
2047 #if defined(CONFIG_NET_MULTI)
2048 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
2049 miiphy_register (dev->name,
2050 emac4xx_miiphy_read, emac4xx_miiphy_write);
2051 #endif
2052 #endif
2053 } /* end for each supported device */
2054
2055 return 0;
2056 }
2057
2058 #if !defined(CONFIG_NET_MULTI)
2059 void eth_halt (void) {
2060 if (emac0_dev) {
2061 ppc_4xx_eth_halt(emac0_dev);
2062 free(emac0_dev);
2063 emac0_dev = NULL;
2064 }
2065 }
2066
2067 int eth_init (bd_t *bis)
2068 {
2069 ppc_4xx_eth_initialize(bis);
2070 if (emac0_dev) {
2071 return ppc_4xx_eth_init(emac0_dev, bis);
2072 } else {
2073 printf("ERROR: ethaddr not set!\n");
2074 return -1;
2075 }
2076 }
2077
2078 int eth_send(volatile void *packet, int length)
2079 {
2080 return (ppc_4xx_eth_send(emac0_dev, packet, length));
2081 }
2082
2083 int eth_rx(void)
2084 {
2085 return (ppc_4xx_eth_rx(emac0_dev));
2086 }
2087
2088 int emac4xx_miiphy_initialize (bd_t * bis)
2089 {
2090 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
2091 miiphy_register ("ppc_4xx_eth0",
2092 emac4xx_miiphy_read, emac4xx_miiphy_write);
2093 #endif
2094
2095 return 0;
2096 }
2097 #endif /* !defined(CONFIG_NET_MULTI) */
2098
2099 #endif