]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/mvgbe.c
Merge branch 'master' of git://git.denx.de/u-boot-samsung
[people/ms/u-boot.git] / drivers / net / mvgbe.c
1 /*
2 * (C) Copyright 2009
3 * Marvell Semiconductor <www.marvell.com>
4 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5 *
6 * (C) Copyright 2003
7 * Ingo Assmus <ingo.assmus@keymile.com>
8 *
9 * based on - Driver for MV64360X ethernet ports
10 * Copyright (C) 2002 rabeeh@galileo.co.il
11 *
12 * See file CREDITS for list of people who contributed to this
13 * project.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
28 * MA 02110-1301 USA
29 */
30
31 #include <common.h>
32 #include <net.h>
33 #include <malloc.h>
34 #include <miiphy.h>
35 #include <asm/errno.h>
36 #include <asm/types.h>
37 #include <asm/byteorder.h>
38
39 #if defined(CONFIG_KIRKWOOD)
40 #include <asm/arch/kirkwood.h>
41 #elif defined(CONFIG_ORION5X)
42 #include <asm/arch/orion5x.h>
43 #endif
44
45 #include "mvgbe.h"
46
47 DECLARE_GLOBAL_DATA_PTR;
48
49 #define MV_PHY_ADR_REQUEST 0xee
50 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
51
52 /*
53 * smi_reg_read - miiphy_read callback function.
54 *
55 * Returns 16bit phy register value, or 0xffff on error
56 */
57 static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
58 {
59 struct eth_device *dev = eth_get_dev_by_name(devname);
60 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
61 struct mvgbe_registers *regs = dmvgbe->regs;
62 u32 smi_reg;
63 u32 timeout;
64
65 /* Phyadr read request */
66 if (phy_adr == MV_PHY_ADR_REQUEST &&
67 reg_ofs == MV_PHY_ADR_REQUEST) {
68 /* */
69 *data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
70 return 0;
71 }
72 /* check parameters */
73 if (phy_adr > PHYADR_MASK) {
74 printf("Err..(%s) Invalid PHY address %d\n",
75 __FUNCTION__, phy_adr);
76 return -EFAULT;
77 }
78 if (reg_ofs > PHYREG_MASK) {
79 printf("Err..(%s) Invalid register offset %d\n",
80 __FUNCTION__, reg_ofs);
81 return -EFAULT;
82 }
83
84 timeout = MVGBE_PHY_SMI_TIMEOUT;
85 /* wait till the SMI is not busy */
86 do {
87 /* read smi register */
88 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
89 if (timeout-- == 0) {
90 printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
91 return -EFAULT;
92 }
93 } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
94
95 /* fill the phy address and regiser offset and read opcode */
96 smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
97 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
98 | MVGBE_PHY_SMI_OPCODE_READ;
99
100 /* write the smi register */
101 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
102
103 /*wait till read value is ready */
104 timeout = MVGBE_PHY_SMI_TIMEOUT;
105
106 do {
107 /* read smi register */
108 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
109 if (timeout-- == 0) {
110 printf("Err..(%s) SMI read ready timeout\n",
111 __FUNCTION__);
112 return -EFAULT;
113 }
114 } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
115
116 /* Wait for the data to update in the SMI register */
117 for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
118 ;
119
120 *data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
121
122 debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
123 reg_ofs, *data);
124
125 return 0;
126 }
127
128 /*
129 * smi_reg_write - imiiphy_write callback function.
130 *
131 * Returns 0 if write succeed, -EINVAL on bad parameters
132 * -ETIME on timeout
133 */
134 static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
135 {
136 struct eth_device *dev = eth_get_dev_by_name(devname);
137 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
138 struct mvgbe_registers *regs = dmvgbe->regs;
139 u32 smi_reg;
140 u32 timeout;
141
142 /* Phyadr write request*/
143 if (phy_adr == MV_PHY_ADR_REQUEST &&
144 reg_ofs == MV_PHY_ADR_REQUEST) {
145 MVGBE_REG_WR(regs->phyadr, data);
146 return 0;
147 }
148
149 /* check parameters */
150 if (phy_adr > PHYADR_MASK) {
151 printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
152 return -EINVAL;
153 }
154 if (reg_ofs > PHYREG_MASK) {
155 printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
156 return -EINVAL;
157 }
158
159 /* wait till the SMI is not busy */
160 timeout = MVGBE_PHY_SMI_TIMEOUT;
161 do {
162 /* read smi register */
163 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
164 if (timeout-- == 0) {
165 printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
166 return -ETIME;
167 }
168 } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
169
170 /* fill the phy addr and reg offset and write opcode and data */
171 smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
172 smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
173 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
174 smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
175
176 /* write the smi register */
177 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
178
179 return 0;
180 }
181
182 /* Stop and checks all queues */
183 static void stop_queue(u32 * qreg)
184 {
185 u32 reg_data;
186
187 reg_data = readl(qreg);
188
189 if (reg_data & 0xFF) {
190 /* Issue stop command for active channels only */
191 writel((reg_data << 8), qreg);
192
193 /* Wait for all queue activity to terminate. */
194 do {
195 /*
196 * Check port cause register that all queues
197 * are stopped
198 */
199 reg_data = readl(qreg);
200 }
201 while (reg_data & 0xFF);
202 }
203 }
204
205 /*
206 * set_access_control - Config address decode parameters for Ethernet unit
207 *
208 * This function configures the address decode parameters for the Gigabit
209 * Ethernet Controller according the given parameters struct.
210 *
211 * @regs Register struct pointer.
212 * @param Address decode parameter struct.
213 */
214 static void set_access_control(struct mvgbe_registers *regs,
215 struct mvgbe_winparam *param)
216 {
217 u32 access_prot_reg;
218
219 /* Set access control register */
220 access_prot_reg = MVGBE_REG_RD(regs->epap);
221 /* clear window permission */
222 access_prot_reg &= (~(3 << (param->win * 2)));
223 access_prot_reg |= (param->access_ctrl << (param->win * 2));
224 MVGBE_REG_WR(regs->epap, access_prot_reg);
225
226 /* Set window Size reg (SR) */
227 MVGBE_REG_WR(regs->barsz[param->win].size,
228 (((param->size / 0x10000) - 1) << 16));
229
230 /* Set window Base address reg (BA) */
231 MVGBE_REG_WR(regs->barsz[param->win].bar,
232 (param->target | param->attrib | param->base_addr));
233 /* High address remap reg (HARR) */
234 if (param->win < 4)
235 MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
236
237 /* Base address enable reg (BARER) */
238 if (param->enable == 1)
239 MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
240 else
241 MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
242 }
243
244 static void set_dram_access(struct mvgbe_registers *regs)
245 {
246 struct mvgbe_winparam win_param;
247 int i;
248
249 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
250 /* Set access parameters for DRAM bank i */
251 win_param.win = i; /* Use Ethernet window i */
252 /* Window target - DDR */
253 win_param.target = MVGBE_TARGET_DRAM;
254 /* Enable full access */
255 win_param.access_ctrl = EWIN_ACCESS_FULL;
256 win_param.high_addr = 0;
257 /* Get bank base and size */
258 win_param.base_addr = gd->bd->bi_dram[i].start;
259 win_param.size = gd->bd->bi_dram[i].size;
260 if (win_param.size == 0)
261 win_param.enable = 0;
262 else
263 win_param.enable = 1; /* Enable the access */
264
265 /* Enable DRAM bank */
266 switch (i) {
267 case 0:
268 win_param.attrib = EBAR_DRAM_CS0;
269 break;
270 case 1:
271 win_param.attrib = EBAR_DRAM_CS1;
272 break;
273 case 2:
274 win_param.attrib = EBAR_DRAM_CS2;
275 break;
276 case 3:
277 win_param.attrib = EBAR_DRAM_CS3;
278 break;
279 default:
280 /* invalid bank, disable access */
281 win_param.enable = 0;
282 win_param.attrib = 0;
283 break;
284 }
285 /* Set the access control for address window(EPAPR) RD/WR */
286 set_access_control(regs, &win_param);
287 }
288 }
289
290 /*
291 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
292 *
293 * Go through all the DA filter tables (Unicast, Special Multicast & Other
294 * Multicast) and set each entry to 0.
295 */
296 static void port_init_mac_tables(struct mvgbe_registers *regs)
297 {
298 int table_index;
299
300 /* Clear DA filter unicast table (Ex_dFUT) */
301 for (table_index = 0; table_index < 4; ++table_index)
302 MVGBE_REG_WR(regs->dfut[table_index], 0);
303
304 for (table_index = 0; table_index < 64; ++table_index) {
305 /* Clear DA filter special multicast table (Ex_dFSMT) */
306 MVGBE_REG_WR(regs->dfsmt[table_index], 0);
307 /* Clear DA filter other multicast table (Ex_dFOMT) */
308 MVGBE_REG_WR(regs->dfomt[table_index], 0);
309 }
310 }
311
312 /*
313 * port_uc_addr - This function Set the port unicast address table
314 *
315 * This function locates the proper entry in the Unicast table for the
316 * specified MAC nibble and sets its properties according to function
317 * parameters.
318 * This function add/removes MAC addresses from the port unicast address
319 * table.
320 *
321 * @uc_nibble Unicast MAC Address last nibble.
322 * @option 0 = Add, 1 = remove address.
323 *
324 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
325 */
326 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
327 int option)
328 {
329 u32 unicast_reg;
330 u32 tbl_offset;
331 u32 reg_offset;
332
333 /* Locate the Unicast table entry */
334 uc_nibble = (0xf & uc_nibble);
335 /* Register offset from unicast table base */
336 tbl_offset = (uc_nibble / 4);
337 /* Entry offset within the above register */
338 reg_offset = uc_nibble % 4;
339
340 switch (option) {
341 case REJECT_MAC_ADDR:
342 /*
343 * Clear accepts frame bit at specified unicast
344 * DA table entry
345 */
346 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
347 unicast_reg &= (0xFF << (8 * reg_offset));
348 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
349 break;
350 case ACCEPT_MAC_ADDR:
351 /* Set accepts frame bit at unicast DA filter table entry */
352 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
353 unicast_reg &= (0xFF << (8 * reg_offset));
354 unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
355 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
356 break;
357 default:
358 return 0;
359 }
360 return 1;
361 }
362
363 /*
364 * port_uc_addr_set - This function Set the port Unicast address.
365 */
366 static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
367 {
368 u32 mac_h;
369 u32 mac_l;
370
371 mac_l = (p_addr[4] << 8) | (p_addr[5]);
372 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
373 (p_addr[3] << 0);
374
375 MVGBE_REG_WR(regs->macal, mac_l);
376 MVGBE_REG_WR(regs->macah, mac_h);
377
378 /* Accept frames of this address */
379 port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
380 }
381
382 /*
383 * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
384 */
385 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
386 {
387 struct mvgbe_rxdesc *p_rx_desc;
388 int i;
389
390 /* initialize the Rx descriptors ring */
391 p_rx_desc = dmvgbe->p_rxdesc;
392 for (i = 0; i < RINGSZ; i++) {
393 p_rx_desc->cmd_sts =
394 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
395 p_rx_desc->buf_size = PKTSIZE_ALIGN;
396 p_rx_desc->byte_cnt = 0;
397 p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
398 if (i == (RINGSZ - 1))
399 p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
400 else {
401 p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
402 ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
403 p_rx_desc = p_rx_desc->nxtdesc_p;
404 }
405 }
406 dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
407 }
408
409 static int mvgbe_init(struct eth_device *dev)
410 {
411 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
412 struct mvgbe_registers *regs = dmvgbe->regs;
413 #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
414 && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
415 int i;
416 #endif
417 /* setup RX rings */
418 mvgbe_init_rx_desc_ring(dmvgbe);
419
420 /* Clear the ethernet port interrupts */
421 MVGBE_REG_WR(regs->ic, 0);
422 MVGBE_REG_WR(regs->ice, 0);
423 /* Unmask RX buffer and TX end interrupt */
424 MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
425 /* Unmask phy and link status changes interrupts */
426 MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
427
428 set_dram_access(regs);
429 port_init_mac_tables(regs);
430 port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
431
432 /* Assign port configuration and command. */
433 MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
434 MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
435 MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
436
437 /* Assign port SDMA configuration */
438 MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
439 MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
440 MVGBE_REG_WR(regs->tqx[0].tqxtbc,
441 (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
442 /* Turn off the port/RXUQ bandwidth limitation */
443 MVGBE_REG_WR(regs->pmtu, 0);
444
445 /* Set maximum receive buffer to 9700 bytes */
446 MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
447 | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
448
449 /* Enable port initially */
450 MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
451
452 /*
453 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
454 * disable the leaky bucket mechanism .
455 */
456 MVGBE_REG_WR(regs->pmtu, 0);
457
458 /* Assignment of Rx CRDB of given RXUQ */
459 MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
460 /* ensure previous write is done before enabling Rx DMA */
461 isb();
462 /* Enable port Rx. */
463 MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
464
465 #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
466 && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
467 /* Wait up to 5s for the link status */
468 for (i = 0; i < 5; i++) {
469 u16 phyadr;
470
471 miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
472 MV_PHY_ADR_REQUEST, &phyadr);
473 /* Return if we get link up */
474 if (miiphy_link(dev->name, phyadr))
475 return 0;
476 udelay(1000000);
477 }
478
479 printf("No link on %s\n", dev->name);
480 return -1;
481 #endif
482 return 0;
483 }
484
485 static int mvgbe_halt(struct eth_device *dev)
486 {
487 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
488 struct mvgbe_registers *regs = dmvgbe->regs;
489
490 /* Disable all gigE address decoder */
491 MVGBE_REG_WR(regs->bare, 0x3f);
492
493 stop_queue(&regs->tqc);
494 stop_queue(&regs->rqc);
495
496 /* Disable port */
497 MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
498 /* Set port is not reset */
499 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
500 #ifdef CONFIG_SYS_MII_MODE
501 /* Set MMI interface up */
502 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
503 #endif
504 /* Disable & mask ethernet port interrupts */
505 MVGBE_REG_WR(regs->ic, 0);
506 MVGBE_REG_WR(regs->ice, 0);
507 MVGBE_REG_WR(regs->pim, 0);
508 MVGBE_REG_WR(regs->peim, 0);
509
510 return 0;
511 }
512
513 static int mvgbe_write_hwaddr(struct eth_device *dev)
514 {
515 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
516 struct mvgbe_registers *regs = dmvgbe->regs;
517
518 /* Programs net device MAC address after initialization */
519 port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
520 return 0;
521 }
522
523 static int mvgbe_send(struct eth_device *dev, void *dataptr,
524 int datasize)
525 {
526 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
527 struct mvgbe_registers *regs = dmvgbe->regs;
528 struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
529 void *p = (void *)dataptr;
530 u32 cmd_sts;
531
532 /* Copy buffer if it's misaligned */
533 if ((u32) dataptr & 0x07) {
534 if (datasize > PKTSIZE_ALIGN) {
535 printf("Non-aligned data too large (%d)\n",
536 datasize);
537 return -1;
538 }
539
540 memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
541 p = dmvgbe->p_aligned_txbuf;
542 }
543
544 p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
545 p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
546 p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
547 p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
548 p_txdesc->buf_ptr = (u8 *) p;
549 p_txdesc->byte_cnt = datasize;
550
551 /* Set this tc desc as zeroth TXUQ */
552 MVGBE_REG_WR(regs->tcqdp[TXUQ], (u32) p_txdesc);
553
554 /* ensure tx desc writes above are performed before we start Tx DMA */
555 isb();
556
557 /* Apply send command using zeroth TXUQ */
558 MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
559
560 /*
561 * wait for packet xmit completion
562 */
563 cmd_sts = readl(&p_txdesc->cmd_sts);
564 while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
565 /* return fail if error is detected */
566 if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
567 (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
568 cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
569 printf("Err..(%s) in xmit packet\n", __FUNCTION__);
570 return -1;
571 }
572 cmd_sts = readl(&p_txdesc->cmd_sts);
573 };
574 return 0;
575 }
576
577 static int mvgbe_recv(struct eth_device *dev)
578 {
579 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
580 struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
581 u32 cmd_sts;
582 u32 timeout = 0;
583
584 /* wait untill rx packet available or timeout */
585 do {
586 if (timeout < MVGBE_PHY_SMI_TIMEOUT)
587 timeout++;
588 else {
589 debug("%s time out...\n", __FUNCTION__);
590 return -1;
591 }
592 } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
593
594 if (p_rxdesc_curr->byte_cnt != 0) {
595 debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
596 __FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
597 (u32) p_rxdesc_curr->buf_ptr,
598 (u32) p_rxdesc_curr->cmd_sts);
599 }
600
601 /*
602 * In case received a packet without first/last bits on
603 * OR the error summary bit is on,
604 * the packets needs to be dropeed.
605 */
606 cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
607
608 if ((cmd_sts &
609 (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
610 != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
611
612 printf("Err..(%s) Dropping packet spread on"
613 " multiple descriptors\n", __FUNCTION__);
614
615 } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
616
617 printf("Err..(%s) Dropping packet with errors\n",
618 __FUNCTION__);
619
620 } else {
621 /* !!! call higher layer processing */
622 debug("%s: Sending Received packet to"
623 " upper layer (NetReceive)\n", __FUNCTION__);
624
625 /* let the upper layer handle the packet */
626 NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
627 (int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
628 }
629 /*
630 * free these descriptors and point next in the ring
631 */
632 p_rxdesc_curr->cmd_sts =
633 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
634 p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
635 p_rxdesc_curr->byte_cnt = 0;
636
637 writel((unsigned)p_rxdesc_curr->nxtdesc_p,
638 (u32) &dmvgbe->p_rxdesc_curr);
639
640 return 0;
641 }
642
643 int mvgbe_initialize(bd_t *bis)
644 {
645 struct mvgbe_device *dmvgbe;
646 struct eth_device *dev;
647 int devnum;
648 char *s;
649 u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
650
651 for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
652 /*skip if port is configured not to use */
653 if (used_ports[devnum] == 0)
654 continue;
655
656 dmvgbe = malloc(sizeof(struct mvgbe_device));
657
658 if (!dmvgbe)
659 goto error1;
660
661 memset(dmvgbe, 0, sizeof(struct mvgbe_device));
662
663 dmvgbe->p_rxdesc =
664 (struct mvgbe_rxdesc *)memalign(PKTALIGN,
665 MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
666
667 if (!dmvgbe->p_rxdesc)
668 goto error2;
669
670 dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
671 RINGSZ*PKTSIZE_ALIGN + 1);
672
673 if (!dmvgbe->p_rxbuf)
674 goto error3;
675
676 dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
677
678 if (!dmvgbe->p_aligned_txbuf)
679 goto error4;
680
681 dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
682 PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
683
684 if (!dmvgbe->p_txdesc) {
685 free(dmvgbe->p_aligned_txbuf);
686 error4:
687 free(dmvgbe->p_rxbuf);
688 error3:
689 free(dmvgbe->p_rxdesc);
690 error2:
691 free(dmvgbe);
692 error1:
693 printf("Err.. %s Failed to allocate memory\n",
694 __FUNCTION__);
695 return -1;
696 }
697
698 dev = &dmvgbe->dev;
699
700 /* must be less than NAMESIZE (16) */
701 sprintf(dev->name, "egiga%d", devnum);
702
703 /* Extract the MAC address from the environment */
704 switch (devnum) {
705 case 0:
706 dmvgbe->regs = (void *)MVGBE0_BASE;
707 s = "ethaddr";
708 break;
709 #if defined(MVGBE1_BASE)
710 case 1:
711 dmvgbe->regs = (void *)MVGBE1_BASE;
712 s = "eth1addr";
713 break;
714 #endif
715 default: /* this should never happen */
716 printf("Err..(%s) Invalid device number %d\n",
717 __FUNCTION__, devnum);
718 return -1;
719 }
720
721 while (!eth_getenv_enetaddr(s, dev->enetaddr)) {
722 /* Generate Private MAC addr if not set */
723 dev->enetaddr[0] = 0x02;
724 dev->enetaddr[1] = 0x50;
725 dev->enetaddr[2] = 0x43;
726 #if defined (CONFIG_SKIP_LOCAL_MAC_RANDOMIZATION)
727 /* Generate fixed lower MAC half using devnum */
728 dev->enetaddr[3] = 0;
729 dev->enetaddr[4] = 0;
730 dev->enetaddr[5] = devnum;
731 #else
732 /* Generate random lower MAC half */
733 dev->enetaddr[3] = get_random_hex();
734 dev->enetaddr[4] = get_random_hex();
735 dev->enetaddr[5] = get_random_hex();
736 #endif
737 eth_setenv_enetaddr(s, dev->enetaddr);
738 }
739
740 dev->init = (void *)mvgbe_init;
741 dev->halt = (void *)mvgbe_halt;
742 dev->send = (void *)mvgbe_send;
743 dev->recv = (void *)mvgbe_recv;
744 dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
745
746 eth_register(dev);
747
748 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
749 miiphy_register(dev->name, smi_reg_read, smi_reg_write);
750 /* Set phy address of the port */
751 miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
752 MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
753 #endif
754 }
755 return 0;
756 }