]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/mvgbe.c
Merge branch 'master' of git://git.denx.de/u-boot-arm
[people/ms/u-boot.git] / drivers / net / mvgbe.c
1 /*
2 * (C) Copyright 2009
3 * Marvell Semiconductor <www.marvell.com>
4 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5 *
6 * (C) Copyright 2003
7 * Ingo Assmus <ingo.assmus@keymile.com>
8 *
9 * based on - Driver for MV64360X ethernet ports
10 * Copyright (C) 2002 rabeeh@galileo.co.il
11 *
12 * See file CREDITS for list of people who contributed to this
13 * project.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
28 * MA 02110-1301 USA
29 */
30
31 #include <common.h>
32 #include <net.h>
33 #include <malloc.h>
34 #include <miiphy.h>
35 #include <asm/io.h>
36 #include <asm/errno.h>
37 #include <asm/types.h>
38 #include <asm/system.h>
39 #include <asm/byteorder.h>
40
41 #if defined(CONFIG_KIRKWOOD)
42 #include <asm/arch/kirkwood.h>
43 #elif defined(CONFIG_ORION5X)
44 #include <asm/arch/orion5x.h>
45 #endif
46
47 #include "mvgbe.h"
48
49 DECLARE_GLOBAL_DATA_PTR;
50
51 #define MV_PHY_ADR_REQUEST 0xee
52 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
53
54 /*
55 * smi_reg_read - miiphy_read callback function.
56 *
57 * Returns 16bit phy register value, or 0xffff on error
58 */
59 static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
60 {
61 struct eth_device *dev = eth_get_dev_by_name(devname);
62 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
63 struct mvgbe_registers *regs = dmvgbe->regs;
64 u32 smi_reg;
65 u32 timeout;
66
67 /* Phyadr read request */
68 if (phy_adr == MV_PHY_ADR_REQUEST &&
69 reg_ofs == MV_PHY_ADR_REQUEST) {
70 /* */
71 *data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
72 return 0;
73 }
74 /* check parameters */
75 if (phy_adr > PHYADR_MASK) {
76 printf("Err..(%s) Invalid PHY address %d\n",
77 __FUNCTION__, phy_adr);
78 return -EFAULT;
79 }
80 if (reg_ofs > PHYREG_MASK) {
81 printf("Err..(%s) Invalid register offset %d\n",
82 __FUNCTION__, reg_ofs);
83 return -EFAULT;
84 }
85
86 timeout = MVGBE_PHY_SMI_TIMEOUT;
87 /* wait till the SMI is not busy */
88 do {
89 /* read smi register */
90 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
91 if (timeout-- == 0) {
92 printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
93 return -EFAULT;
94 }
95 } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
96
97 /* fill the phy address and regiser offset and read opcode */
98 smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
99 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
100 | MVGBE_PHY_SMI_OPCODE_READ;
101
102 /* write the smi register */
103 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
104
105 /*wait till read value is ready */
106 timeout = MVGBE_PHY_SMI_TIMEOUT;
107
108 do {
109 /* read smi register */
110 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
111 if (timeout-- == 0) {
112 printf("Err..(%s) SMI read ready timeout\n",
113 __FUNCTION__);
114 return -EFAULT;
115 }
116 } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
117
118 /* Wait for the data to update in the SMI register */
119 for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
120 ;
121
122 *data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
123
124 debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
125 reg_ofs, *data);
126
127 return 0;
128 }
129
130 /*
131 * smi_reg_write - imiiphy_write callback function.
132 *
133 * Returns 0 if write succeed, -EINVAL on bad parameters
134 * -ETIME on timeout
135 */
136 static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
137 {
138 struct eth_device *dev = eth_get_dev_by_name(devname);
139 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
140 struct mvgbe_registers *regs = dmvgbe->regs;
141 u32 smi_reg;
142 u32 timeout;
143
144 /* Phyadr write request*/
145 if (phy_adr == MV_PHY_ADR_REQUEST &&
146 reg_ofs == MV_PHY_ADR_REQUEST) {
147 MVGBE_REG_WR(regs->phyadr, data);
148 return 0;
149 }
150
151 /* check parameters */
152 if (phy_adr > PHYADR_MASK) {
153 printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
154 return -EINVAL;
155 }
156 if (reg_ofs > PHYREG_MASK) {
157 printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
158 return -EINVAL;
159 }
160
161 /* wait till the SMI is not busy */
162 timeout = MVGBE_PHY_SMI_TIMEOUT;
163 do {
164 /* read smi register */
165 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
166 if (timeout-- == 0) {
167 printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
168 return -ETIME;
169 }
170 } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
171
172 /* fill the phy addr and reg offset and write opcode and data */
173 smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
174 smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
175 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
176 smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
177
178 /* write the smi register */
179 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
180
181 return 0;
182 }
183
184 /* Stop and checks all queues */
185 static void stop_queue(u32 * qreg)
186 {
187 u32 reg_data;
188
189 reg_data = readl(qreg);
190
191 if (reg_data & 0xFF) {
192 /* Issue stop command for active channels only */
193 writel((reg_data << 8), qreg);
194
195 /* Wait for all queue activity to terminate. */
196 do {
197 /*
198 * Check port cause register that all queues
199 * are stopped
200 */
201 reg_data = readl(qreg);
202 }
203 while (reg_data & 0xFF);
204 }
205 }
206
207 /*
208 * set_access_control - Config address decode parameters for Ethernet unit
209 *
210 * This function configures the address decode parameters for the Gigabit
211 * Ethernet Controller according the given parameters struct.
212 *
213 * @regs Register struct pointer.
214 * @param Address decode parameter struct.
215 */
216 static void set_access_control(struct mvgbe_registers *regs,
217 struct mvgbe_winparam *param)
218 {
219 u32 access_prot_reg;
220
221 /* Set access control register */
222 access_prot_reg = MVGBE_REG_RD(regs->epap);
223 /* clear window permission */
224 access_prot_reg &= (~(3 << (param->win * 2)));
225 access_prot_reg |= (param->access_ctrl << (param->win * 2));
226 MVGBE_REG_WR(regs->epap, access_prot_reg);
227
228 /* Set window Size reg (SR) */
229 MVGBE_REG_WR(regs->barsz[param->win].size,
230 (((param->size / 0x10000) - 1) << 16));
231
232 /* Set window Base address reg (BA) */
233 MVGBE_REG_WR(regs->barsz[param->win].bar,
234 (param->target | param->attrib | param->base_addr));
235 /* High address remap reg (HARR) */
236 if (param->win < 4)
237 MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
238
239 /* Base address enable reg (BARER) */
240 if (param->enable == 1)
241 MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
242 else
243 MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
244 }
245
246 static void set_dram_access(struct mvgbe_registers *regs)
247 {
248 struct mvgbe_winparam win_param;
249 int i;
250
251 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
252 /* Set access parameters for DRAM bank i */
253 win_param.win = i; /* Use Ethernet window i */
254 /* Window target - DDR */
255 win_param.target = MVGBE_TARGET_DRAM;
256 /* Enable full access */
257 win_param.access_ctrl = EWIN_ACCESS_FULL;
258 win_param.high_addr = 0;
259 /* Get bank base and size */
260 win_param.base_addr = gd->bd->bi_dram[i].start;
261 win_param.size = gd->bd->bi_dram[i].size;
262 if (win_param.size == 0)
263 win_param.enable = 0;
264 else
265 win_param.enable = 1; /* Enable the access */
266
267 /* Enable DRAM bank */
268 switch (i) {
269 case 0:
270 win_param.attrib = EBAR_DRAM_CS0;
271 break;
272 case 1:
273 win_param.attrib = EBAR_DRAM_CS1;
274 break;
275 case 2:
276 win_param.attrib = EBAR_DRAM_CS2;
277 break;
278 case 3:
279 win_param.attrib = EBAR_DRAM_CS3;
280 break;
281 default:
282 /* invalid bank, disable access */
283 win_param.enable = 0;
284 win_param.attrib = 0;
285 break;
286 }
287 /* Set the access control for address window(EPAPR) RD/WR */
288 set_access_control(regs, &win_param);
289 }
290 }
291
292 /*
293 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
294 *
295 * Go through all the DA filter tables (Unicast, Special Multicast & Other
296 * Multicast) and set each entry to 0.
297 */
298 static void port_init_mac_tables(struct mvgbe_registers *regs)
299 {
300 int table_index;
301
302 /* Clear DA filter unicast table (Ex_dFUT) */
303 for (table_index = 0; table_index < 4; ++table_index)
304 MVGBE_REG_WR(regs->dfut[table_index], 0);
305
306 for (table_index = 0; table_index < 64; ++table_index) {
307 /* Clear DA filter special multicast table (Ex_dFSMT) */
308 MVGBE_REG_WR(regs->dfsmt[table_index], 0);
309 /* Clear DA filter other multicast table (Ex_dFOMT) */
310 MVGBE_REG_WR(regs->dfomt[table_index], 0);
311 }
312 }
313
314 /*
315 * port_uc_addr - This function Set the port unicast address table
316 *
317 * This function locates the proper entry in the Unicast table for the
318 * specified MAC nibble and sets its properties according to function
319 * parameters.
320 * This function add/removes MAC addresses from the port unicast address
321 * table.
322 *
323 * @uc_nibble Unicast MAC Address last nibble.
324 * @option 0 = Add, 1 = remove address.
325 *
326 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
327 */
328 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
329 int option)
330 {
331 u32 unicast_reg;
332 u32 tbl_offset;
333 u32 reg_offset;
334
335 /* Locate the Unicast table entry */
336 uc_nibble = (0xf & uc_nibble);
337 /* Register offset from unicast table base */
338 tbl_offset = (uc_nibble / 4);
339 /* Entry offset within the above register */
340 reg_offset = uc_nibble % 4;
341
342 switch (option) {
343 case REJECT_MAC_ADDR:
344 /*
345 * Clear accepts frame bit at specified unicast
346 * DA table entry
347 */
348 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
349 unicast_reg &= (0xFF << (8 * reg_offset));
350 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
351 break;
352 case ACCEPT_MAC_ADDR:
353 /* Set accepts frame bit at unicast DA filter table entry */
354 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
355 unicast_reg &= (0xFF << (8 * reg_offset));
356 unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
357 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
358 break;
359 default:
360 return 0;
361 }
362 return 1;
363 }
364
365 /*
366 * port_uc_addr_set - This function Set the port Unicast address.
367 */
368 static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
369 {
370 u32 mac_h;
371 u32 mac_l;
372
373 mac_l = (p_addr[4] << 8) | (p_addr[5]);
374 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
375 (p_addr[3] << 0);
376
377 MVGBE_REG_WR(regs->macal, mac_l);
378 MVGBE_REG_WR(regs->macah, mac_h);
379
380 /* Accept frames of this address */
381 port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
382 }
383
384 /*
385 * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
386 */
387 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
388 {
389 struct mvgbe_rxdesc *p_rx_desc;
390 int i;
391
392 /* initialize the Rx descriptors ring */
393 p_rx_desc = dmvgbe->p_rxdesc;
394 for (i = 0; i < RINGSZ; i++) {
395 p_rx_desc->cmd_sts =
396 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
397 p_rx_desc->buf_size = PKTSIZE_ALIGN;
398 p_rx_desc->byte_cnt = 0;
399 p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
400 if (i == (RINGSZ - 1))
401 p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
402 else {
403 p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
404 ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
405 p_rx_desc = p_rx_desc->nxtdesc_p;
406 }
407 }
408 dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
409 }
410
411 static int mvgbe_init(struct eth_device *dev)
412 {
413 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
414 struct mvgbe_registers *regs = dmvgbe->regs;
415 #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
416 && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
417 int i;
418 #endif
419 /* setup RX rings */
420 mvgbe_init_rx_desc_ring(dmvgbe);
421
422 /* Clear the ethernet port interrupts */
423 MVGBE_REG_WR(regs->ic, 0);
424 MVGBE_REG_WR(regs->ice, 0);
425 /* Unmask RX buffer and TX end interrupt */
426 MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
427 /* Unmask phy and link status changes interrupts */
428 MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
429
430 set_dram_access(regs);
431 port_init_mac_tables(regs);
432 port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
433
434 /* Assign port configuration and command. */
435 MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
436 MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
437 MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
438
439 /* Assign port SDMA configuration */
440 MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
441 MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
442 MVGBE_REG_WR(regs->tqx[0].tqxtbc,
443 (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
444 /* Turn off the port/RXUQ bandwidth limitation */
445 MVGBE_REG_WR(regs->pmtu, 0);
446
447 /* Set maximum receive buffer to 9700 bytes */
448 MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
449 | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
450
451 /* Enable port initially */
452 MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
453
454 /*
455 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
456 * disable the leaky bucket mechanism .
457 */
458 MVGBE_REG_WR(regs->pmtu, 0);
459
460 /* Assignment of Rx CRDB of given RXUQ */
461 MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
462 /* ensure previous write is done before enabling Rx DMA */
463 isb();
464 /* Enable port Rx. */
465 MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
466
467 #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
468 && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
469 /* Wait up to 5s for the link status */
470 for (i = 0; i < 5; i++) {
471 u16 phyadr;
472
473 miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
474 MV_PHY_ADR_REQUEST, &phyadr);
475 /* Return if we get link up */
476 if (miiphy_link(dev->name, phyadr))
477 return 0;
478 udelay(1000000);
479 }
480
481 printf("No link on %s\n", dev->name);
482 return -1;
483 #endif
484 return 0;
485 }
486
487 static int mvgbe_halt(struct eth_device *dev)
488 {
489 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
490 struct mvgbe_registers *regs = dmvgbe->regs;
491
492 /* Disable all gigE address decoder */
493 MVGBE_REG_WR(regs->bare, 0x3f);
494
495 stop_queue(&regs->tqc);
496 stop_queue(&regs->rqc);
497
498 /* Disable port */
499 MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
500 /* Set port is not reset */
501 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
502 #ifdef CONFIG_SYS_MII_MODE
503 /* Set MMI interface up */
504 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
505 #endif
506 /* Disable & mask ethernet port interrupts */
507 MVGBE_REG_WR(regs->ic, 0);
508 MVGBE_REG_WR(regs->ice, 0);
509 MVGBE_REG_WR(regs->pim, 0);
510 MVGBE_REG_WR(regs->peim, 0);
511
512 return 0;
513 }
514
515 static int mvgbe_write_hwaddr(struct eth_device *dev)
516 {
517 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
518 struct mvgbe_registers *regs = dmvgbe->regs;
519
520 /* Programs net device MAC address after initialization */
521 port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
522 return 0;
523 }
524
525 static int mvgbe_send(struct eth_device *dev, void *dataptr,
526 int datasize)
527 {
528 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
529 struct mvgbe_registers *regs = dmvgbe->regs;
530 struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
531 void *p = (void *)dataptr;
532 u32 cmd_sts;
533
534 /* Copy buffer if it's misaligned */
535 if ((u32) dataptr & 0x07) {
536 if (datasize > PKTSIZE_ALIGN) {
537 printf("Non-aligned data too large (%d)\n",
538 datasize);
539 return -1;
540 }
541
542 memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
543 p = dmvgbe->p_aligned_txbuf;
544 }
545
546 p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
547 p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
548 p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
549 p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
550 p_txdesc->buf_ptr = (u8 *) p;
551 p_txdesc->byte_cnt = datasize;
552
553 /* Set this tc desc as zeroth TXUQ */
554 MVGBE_REG_WR(regs->tcqdp[TXUQ], (u32) p_txdesc);
555
556 /* ensure tx desc writes above are performed before we start Tx DMA */
557 isb();
558
559 /* Apply send command using zeroth TXUQ */
560 MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
561
562 /*
563 * wait for packet xmit completion
564 */
565 cmd_sts = readl(&p_txdesc->cmd_sts);
566 while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
567 /* return fail if error is detected */
568 if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
569 (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
570 cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
571 printf("Err..(%s) in xmit packet\n", __FUNCTION__);
572 return -1;
573 }
574 cmd_sts = readl(&p_txdesc->cmd_sts);
575 };
576 return 0;
577 }
578
579 static int mvgbe_recv(struct eth_device *dev)
580 {
581 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
582 struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
583 u32 cmd_sts;
584 u32 timeout = 0;
585
586 /* wait untill rx packet available or timeout */
587 do {
588 if (timeout < MVGBE_PHY_SMI_TIMEOUT)
589 timeout++;
590 else {
591 debug("%s time out...\n", __FUNCTION__);
592 return -1;
593 }
594 } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
595
596 if (p_rxdesc_curr->byte_cnt != 0) {
597 debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
598 __FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
599 (u32) p_rxdesc_curr->buf_ptr,
600 (u32) p_rxdesc_curr->cmd_sts);
601 }
602
603 /*
604 * In case received a packet without first/last bits on
605 * OR the error summary bit is on,
606 * the packets needs to be dropeed.
607 */
608 cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
609
610 if ((cmd_sts &
611 (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
612 != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
613
614 printf("Err..(%s) Dropping packet spread on"
615 " multiple descriptors\n", __FUNCTION__);
616
617 } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
618
619 printf("Err..(%s) Dropping packet with errors\n",
620 __FUNCTION__);
621
622 } else {
623 /* !!! call higher layer processing */
624 debug("%s: Sending Received packet to"
625 " upper layer (NetReceive)\n", __FUNCTION__);
626
627 /* let the upper layer handle the packet */
628 NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
629 (int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
630 }
631 /*
632 * free these descriptors and point next in the ring
633 */
634 p_rxdesc_curr->cmd_sts =
635 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
636 p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
637 p_rxdesc_curr->byte_cnt = 0;
638
639 writel((unsigned)p_rxdesc_curr->nxtdesc_p,
640 (u32) &dmvgbe->p_rxdesc_curr);
641
642 return 0;
643 }
644
645 int mvgbe_initialize(bd_t *bis)
646 {
647 struct mvgbe_device *dmvgbe;
648 struct eth_device *dev;
649 int devnum;
650 char *s;
651 u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
652
653 for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
654 /*skip if port is configured not to use */
655 if (used_ports[devnum] == 0)
656 continue;
657
658 dmvgbe = malloc(sizeof(struct mvgbe_device));
659
660 if (!dmvgbe)
661 goto error1;
662
663 memset(dmvgbe, 0, sizeof(struct mvgbe_device));
664
665 dmvgbe->p_rxdesc =
666 (struct mvgbe_rxdesc *)memalign(PKTALIGN,
667 MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
668
669 if (!dmvgbe->p_rxdesc)
670 goto error2;
671
672 dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
673 RINGSZ*PKTSIZE_ALIGN + 1);
674
675 if (!dmvgbe->p_rxbuf)
676 goto error3;
677
678 dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
679
680 if (!dmvgbe->p_aligned_txbuf)
681 goto error4;
682
683 dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
684 PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
685
686 if (!dmvgbe->p_txdesc) {
687 free(dmvgbe->p_aligned_txbuf);
688 error4:
689 free(dmvgbe->p_rxbuf);
690 error3:
691 free(dmvgbe->p_rxdesc);
692 error2:
693 free(dmvgbe);
694 error1:
695 printf("Err.. %s Failed to allocate memory\n",
696 __FUNCTION__);
697 return -1;
698 }
699
700 dev = &dmvgbe->dev;
701
702 /* must be less than NAMESIZE (16) */
703 sprintf(dev->name, "egiga%d", devnum);
704
705 /* Extract the MAC address from the environment */
706 switch (devnum) {
707 case 0:
708 dmvgbe->regs = (void *)MVGBE0_BASE;
709 s = "ethaddr";
710 break;
711 #if defined(MVGBE1_BASE)
712 case 1:
713 dmvgbe->regs = (void *)MVGBE1_BASE;
714 s = "eth1addr";
715 break;
716 #endif
717 default: /* this should never happen */
718 printf("Err..(%s) Invalid device number %d\n",
719 __FUNCTION__, devnum);
720 return -1;
721 }
722
723 while (!eth_getenv_enetaddr(s, dev->enetaddr)) {
724 /* Generate Private MAC addr if not set */
725 dev->enetaddr[0] = 0x02;
726 dev->enetaddr[1] = 0x50;
727 dev->enetaddr[2] = 0x43;
728 #if defined (CONFIG_SKIP_LOCAL_MAC_RANDOMIZATION)
729 /* Generate fixed lower MAC half using devnum */
730 dev->enetaddr[3] = 0;
731 dev->enetaddr[4] = 0;
732 dev->enetaddr[5] = devnum;
733 #else
734 /* Generate random lower MAC half */
735 dev->enetaddr[3] = get_random_hex();
736 dev->enetaddr[4] = get_random_hex();
737 dev->enetaddr[5] = get_random_hex();
738 #endif
739 eth_setenv_enetaddr(s, dev->enetaddr);
740 }
741
742 dev->init = (void *)mvgbe_init;
743 dev->halt = (void *)mvgbe_halt;
744 dev->send = (void *)mvgbe_send;
745 dev->recv = (void *)mvgbe_recv;
746 dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
747
748 eth_register(dev);
749
750 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
751 miiphy_register(dev->name, smi_reg_read, smi_reg_write);
752 /* Set phy address of the port */
753 miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
754 MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
755 #endif
756 }
757 return 0;
758 }