]> git.ipfire.org Git - people/ms/u-boot.git/blame - drivers/net/mvgbe.c
net: cosmetic: Fix var naming net <-> eth drivers
[people/ms/u-boot.git] / drivers / net / mvgbe.c
CommitLineData
9131589a
PW
1/*
2 * (C) Copyright 2009
3 * Marvell Semiconductor <www.marvell.com>
4 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5 *
6 * (C) Copyright 2003
7 * Ingo Assmus <ingo.assmus@keymile.com>
8 *
9 * based on - Driver for MV64360X ethernet ports
10 * Copyright (C) 2002 rabeeh@galileo.co.il
11 *
1a459660 12 * SPDX-License-Identifier: GPL-2.0+
9131589a
PW
13 */
14
15#include <common.h>
16#include <net.h>
17#include <malloc.h>
18#include <miiphy.h>
a7efd719 19#include <asm/io.h>
9131589a
PW
20#include <asm/errno.h>
21#include <asm/types.h>
a7efd719 22#include <asm/system.h>
9131589a 23#include <asm/byteorder.h>
36aaa918 24#include <asm/arch/cpu.h>
d44265ad
AA
25
26#if defined(CONFIG_KIRKWOOD)
3dc23f78 27#include <asm/arch/soc.h>
d3c9ffd0
AA
28#elif defined(CONFIG_ORION5X)
29#include <asm/arch/orion5x.h>
fb4879b3
SH
30#elif defined(CONFIG_DOVE)
31#include <asm/arch/dove.h>
d44265ad
AA
32#endif
33
9b6bcdcb 34#include "mvgbe.h"
9131589a 35
49fa6ed8
AA
36DECLARE_GLOBAL_DATA_PTR;
37
5aa2297d
LP
38#ifndef CONFIG_MVGBE_PORTS
39# define CONFIG_MVGBE_PORTS {0, 0}
40#endif
41
d44265ad
AA
42#define MV_PHY_ADR_REQUEST 0xee
43#define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
bb1ca3b2 44
cd3ca3ff 45#if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
9131589a
PW
46/*
47 * smi_reg_read - miiphy_read callback function.
48 *
49 * Returns 16bit phy register value, or 0xffff on error
50 */
5700bb63 51static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
9131589a
PW
52{
53 struct eth_device *dev = eth_get_dev_by_name(devname);
d44265ad
AA
54 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
55 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a 56 u32 smi_reg;
7b05f5e0 57 u32 timeout;
9131589a
PW
58
59 /* Phyadr read request */
d44265ad
AA
60 if (phy_adr == MV_PHY_ADR_REQUEST &&
61 reg_ofs == MV_PHY_ADR_REQUEST) {
9131589a 62 /* */
d44265ad 63 *data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
9131589a
PW
64 return 0;
65 }
66 /* check parameters */
67 if (phy_adr > PHYADR_MASK) {
68 printf("Err..(%s) Invalid PHY address %d\n",
1fd92db8 69 __func__, phy_adr);
9131589a
PW
70 return -EFAULT;
71 }
72 if (reg_ofs > PHYREG_MASK) {
73 printf("Err..(%s) Invalid register offset %d\n",
1fd92db8 74 __func__, reg_ofs);
9131589a
PW
75 return -EFAULT;
76 }
77
d44265ad 78 timeout = MVGBE_PHY_SMI_TIMEOUT;
9131589a
PW
79 /* wait till the SMI is not busy */
80 do {
81 /* read smi register */
d44265ad 82 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
9131589a 83 if (timeout-- == 0) {
1fd92db8 84 printf("Err..(%s) SMI busy timeout\n", __func__);
9131589a
PW
85 return -EFAULT;
86 }
d44265ad 87 } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
9131589a
PW
88
89 /* fill the phy address and regiser offset and read opcode */
d44265ad
AA
90 smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
91 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
92 | MVGBE_PHY_SMI_OPCODE_READ;
9131589a
PW
93
94 /* write the smi register */
d44265ad 95 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
9131589a
PW
96
97 /*wait till read value is ready */
d44265ad 98 timeout = MVGBE_PHY_SMI_TIMEOUT;
9131589a
PW
99
100 do {
101 /* read smi register */
d44265ad 102 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
9131589a
PW
103 if (timeout-- == 0) {
104 printf("Err..(%s) SMI read ready timeout\n",
1fd92db8 105 __func__);
9131589a
PW
106 return -EFAULT;
107 }
d44265ad 108 } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
9131589a
PW
109
110 /* Wait for the data to update in the SMI register */
d44265ad
AA
111 for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
112 ;
9131589a 113
d44265ad 114 *data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
9131589a 115
1fd92db8
JH
116 debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
117 *data);
9131589a
PW
118
119 return 0;
120}
121
122/*
123 * smi_reg_write - imiiphy_write callback function.
124 *
125 * Returns 0 if write succeed, -EINVAL on bad parameters
126 * -ETIME on timeout
127 */
5700bb63 128static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
9131589a
PW
129{
130 struct eth_device *dev = eth_get_dev_by_name(devname);
d44265ad
AA
131 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
132 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a 133 u32 smi_reg;
7b05f5e0 134 u32 timeout;
9131589a
PW
135
136 /* Phyadr write request*/
d44265ad
AA
137 if (phy_adr == MV_PHY_ADR_REQUEST &&
138 reg_ofs == MV_PHY_ADR_REQUEST) {
139 MVGBE_REG_WR(regs->phyadr, data);
9131589a
PW
140 return 0;
141 }
142
143 /* check parameters */
144 if (phy_adr > PHYADR_MASK) {
1fd92db8 145 printf("Err..(%s) Invalid phy address\n", __func__);
9131589a
PW
146 return -EINVAL;
147 }
148 if (reg_ofs > PHYREG_MASK) {
1fd92db8 149 printf("Err..(%s) Invalid register offset\n", __func__);
9131589a
PW
150 return -EINVAL;
151 }
152
153 /* wait till the SMI is not busy */
d44265ad 154 timeout = MVGBE_PHY_SMI_TIMEOUT;
9131589a
PW
155 do {
156 /* read smi register */
d44265ad 157 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
9131589a 158 if (timeout-- == 0) {
1fd92db8 159 printf("Err..(%s) SMI busy timeout\n", __func__);
9131589a
PW
160 return -ETIME;
161 }
d44265ad 162 } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
9131589a
PW
163
164 /* fill the phy addr and reg offset and write opcode and data */
d44265ad
AA
165 smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
166 smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
167 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
168 smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
9131589a
PW
169
170 /* write the smi register */
d44265ad 171 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
9131589a
PW
172
173 return 0;
174}
cc79697c 175#endif
9131589a 176
cd3ca3ff
SH
177#if defined(CONFIG_PHYLIB)
178int mvgbe_phy_read(struct mii_dev *bus, int phy_addr, int dev_addr,
179 int reg_addr)
180{
181 u16 data;
182 int ret;
183 ret = smi_reg_read(bus->name, phy_addr, reg_addr, &data);
184 if (ret)
185 return ret;
186 return data;
187}
188
189int mvgbe_phy_write(struct mii_dev *bus, int phy_addr, int dev_addr,
190 int reg_addr, u16 data)
191{
192 return smi_reg_write(bus->name, phy_addr, reg_addr, data);
193}
194#endif
195
9131589a
PW
196/* Stop and checks all queues */
197static void stop_queue(u32 * qreg)
198{
199 u32 reg_data;
200
201 reg_data = readl(qreg);
202
203 if (reg_data & 0xFF) {
204 /* Issue stop command for active channels only */
205 writel((reg_data << 8), qreg);
206
207 /* Wait for all queue activity to terminate. */
208 do {
209 /*
210 * Check port cause register that all queues
211 * are stopped
212 */
213 reg_data = readl(qreg);
214 }
215 while (reg_data & 0xFF);
216 }
217}
218
219/*
220 * set_access_control - Config address decode parameters for Ethernet unit
221 *
222 * This function configures the address decode parameters for the Gigabit
223 * Ethernet Controller according the given parameters struct.
224 *
225 * @regs Register struct pointer.
226 * @param Address decode parameter struct.
227 */
d44265ad
AA
228static void set_access_control(struct mvgbe_registers *regs,
229 struct mvgbe_winparam *param)
9131589a
PW
230{
231 u32 access_prot_reg;
232
233 /* Set access control register */
d44265ad 234 access_prot_reg = MVGBE_REG_RD(regs->epap);
9131589a
PW
235 /* clear window permission */
236 access_prot_reg &= (~(3 << (param->win * 2)));
237 access_prot_reg |= (param->access_ctrl << (param->win * 2));
d44265ad 238 MVGBE_REG_WR(regs->epap, access_prot_reg);
9131589a
PW
239
240 /* Set window Size reg (SR) */
d44265ad 241 MVGBE_REG_WR(regs->barsz[param->win].size,
9131589a
PW
242 (((param->size / 0x10000) - 1) << 16));
243
244 /* Set window Base address reg (BA) */
d44265ad 245 MVGBE_REG_WR(regs->barsz[param->win].bar,
9131589a
PW
246 (param->target | param->attrib | param->base_addr));
247 /* High address remap reg (HARR) */
248 if (param->win < 4)
d44265ad 249 MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
9131589a
PW
250
251 /* Base address enable reg (BARER) */
252 if (param->enable == 1)
d44265ad 253 MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
9131589a 254 else
d44265ad 255 MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
9131589a
PW
256}
257
d44265ad 258static void set_dram_access(struct mvgbe_registers *regs)
9131589a 259{
d44265ad 260 struct mvgbe_winparam win_param;
9131589a
PW
261 int i;
262
263 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
264 /* Set access parameters for DRAM bank i */
265 win_param.win = i; /* Use Ethernet window i */
266 /* Window target - DDR */
d44265ad 267 win_param.target = MVGBE_TARGET_DRAM;
9131589a
PW
268 /* Enable full access */
269 win_param.access_ctrl = EWIN_ACCESS_FULL;
270 win_param.high_addr = 0;
49fa6ed8
AA
271 /* Get bank base and size */
272 win_param.base_addr = gd->bd->bi_dram[i].start;
273 win_param.size = gd->bd->bi_dram[i].size;
9131589a
PW
274 if (win_param.size == 0)
275 win_param.enable = 0;
276 else
277 win_param.enable = 1; /* Enable the access */
278
279 /* Enable DRAM bank */
280 switch (i) {
281 case 0:
282 win_param.attrib = EBAR_DRAM_CS0;
283 break;
284 case 1:
285 win_param.attrib = EBAR_DRAM_CS1;
286 break;
287 case 2:
288 win_param.attrib = EBAR_DRAM_CS2;
289 break;
290 case 3:
291 win_param.attrib = EBAR_DRAM_CS3;
292 break;
293 default:
49fa6ed8 294 /* invalid bank, disable access */
9131589a
PW
295 win_param.enable = 0;
296 win_param.attrib = 0;
297 break;
298 }
299 /* Set the access control for address window(EPAPR) RD/WR */
300 set_access_control(regs, &win_param);
301 }
302}
303
304/*
305 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
306 *
307 * Go through all the DA filter tables (Unicast, Special Multicast & Other
308 * Multicast) and set each entry to 0.
309 */
d44265ad 310static void port_init_mac_tables(struct mvgbe_registers *regs)
9131589a
PW
311{
312 int table_index;
313
314 /* Clear DA filter unicast table (Ex_dFUT) */
315 for (table_index = 0; table_index < 4; ++table_index)
d44265ad 316 MVGBE_REG_WR(regs->dfut[table_index], 0);
9131589a
PW
317
318 for (table_index = 0; table_index < 64; ++table_index) {
319 /* Clear DA filter special multicast table (Ex_dFSMT) */
d44265ad 320 MVGBE_REG_WR(regs->dfsmt[table_index], 0);
9131589a 321 /* Clear DA filter other multicast table (Ex_dFOMT) */
d44265ad 322 MVGBE_REG_WR(regs->dfomt[table_index], 0);
9131589a
PW
323 }
324}
325
326/*
327 * port_uc_addr - This function Set the port unicast address table
328 *
329 * This function locates the proper entry in the Unicast table for the
330 * specified MAC nibble and sets its properties according to function
331 * parameters.
332 * This function add/removes MAC addresses from the port unicast address
333 * table.
334 *
335 * @uc_nibble Unicast MAC Address last nibble.
336 * @option 0 = Add, 1 = remove address.
337 *
338 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
339 */
d44265ad 340static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
9131589a
PW
341 int option)
342{
343 u32 unicast_reg;
344 u32 tbl_offset;
345 u32 reg_offset;
346
347 /* Locate the Unicast table entry */
348 uc_nibble = (0xf & uc_nibble);
349 /* Register offset from unicast table base */
350 tbl_offset = (uc_nibble / 4);
351 /* Entry offset within the above register */
352 reg_offset = uc_nibble % 4;
353
354 switch (option) {
355 case REJECT_MAC_ADDR:
356 /*
357 * Clear accepts frame bit at specified unicast
358 * DA table entry
359 */
d44265ad 360 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
9131589a 361 unicast_reg &= (0xFF << (8 * reg_offset));
d44265ad 362 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
9131589a
PW
363 break;
364 case ACCEPT_MAC_ADDR:
365 /* Set accepts frame bit at unicast DA filter table entry */
d44265ad 366 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
9131589a
PW
367 unicast_reg &= (0xFF << (8 * reg_offset));
368 unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
d44265ad 369 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
9131589a
PW
370 break;
371 default:
372 return 0;
373 }
374 return 1;
375}
376
377/*
378 * port_uc_addr_set - This function Set the port Unicast address.
379 */
d44265ad 380static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
9131589a
PW
381{
382 u32 mac_h;
383 u32 mac_l;
384
385 mac_l = (p_addr[4] << 8) | (p_addr[5]);
386 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
387 (p_addr[3] << 0);
388
d44265ad
AA
389 MVGBE_REG_WR(regs->macal, mac_l);
390 MVGBE_REG_WR(regs->macah, mac_h);
9131589a
PW
391
392 /* Accept frames of this address */
393 port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
394}
395
396/*
d44265ad 397 * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
9131589a 398 */
d44265ad 399static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
9131589a 400{
d44265ad 401 struct mvgbe_rxdesc *p_rx_desc;
9131589a
PW
402 int i;
403
404 /* initialize the Rx descriptors ring */
d44265ad 405 p_rx_desc = dmvgbe->p_rxdesc;
9131589a
PW
406 for (i = 0; i < RINGSZ; i++) {
407 p_rx_desc->cmd_sts =
d44265ad 408 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
9131589a
PW
409 p_rx_desc->buf_size = PKTSIZE_ALIGN;
410 p_rx_desc->byte_cnt = 0;
d44265ad 411 p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
9131589a 412 if (i == (RINGSZ - 1))
d44265ad 413 p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
9131589a 414 else {
d44265ad
AA
415 p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
416 ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
9131589a
PW
417 p_rx_desc = p_rx_desc->nxtdesc_p;
418 }
419 }
d44265ad 420 dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
9131589a
PW
421}
422
d44265ad 423static int mvgbe_init(struct eth_device *dev)
9131589a 424{
d44265ad
AA
425 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
426 struct mvgbe_registers *regs = dmvgbe->regs;
0611c601
SS
427#if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
428 !defined(CONFIG_PHYLIB) && \
429 defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
cad713bf 430 int i;
aba82372 431#endif
9131589a 432 /* setup RX rings */
d44265ad 433 mvgbe_init_rx_desc_ring(dmvgbe);
9131589a
PW
434
435 /* Clear the ethernet port interrupts */
d44265ad
AA
436 MVGBE_REG_WR(regs->ic, 0);
437 MVGBE_REG_WR(regs->ice, 0);
9131589a 438 /* Unmask RX buffer and TX end interrupt */
d44265ad 439 MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
9131589a 440 /* Unmask phy and link status changes interrupts */
d44265ad 441 MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
9131589a
PW
442
443 set_dram_access(regs);
444 port_init_mac_tables(regs);
d44265ad 445 port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
9131589a
PW
446
447 /* Assign port configuration and command. */
d44265ad
AA
448 MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
449 MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
450 MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
9131589a
PW
451
452 /* Assign port SDMA configuration */
d44265ad
AA
453 MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
454 MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
455 MVGBE_REG_WR(regs->tqx[0].tqxtbc,
456 (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
9131589a 457 /* Turn off the port/RXUQ bandwidth limitation */
d44265ad 458 MVGBE_REG_WR(regs->pmtu, 0);
9131589a
PW
459
460 /* Set maximum receive buffer to 9700 bytes */
d44265ad
AA
461 MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
462 | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
9131589a 463
f0588fdf 464 /* Enable port initially */
d44265ad 465 MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
f0588fdf 466
9131589a
PW
467 /*
468 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
469 * disable the leaky bucket mechanism .
470 */
d44265ad 471 MVGBE_REG_WR(regs->pmtu, 0);
9131589a
PW
472
473 /* Assignment of Rx CRDB of given RXUQ */
d44265ad 474 MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
c19a20d5
AA
475 /* ensure previous write is done before enabling Rx DMA */
476 isb();
9131589a 477 /* Enable port Rx. */
d44265ad 478 MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
9131589a 479
cd3ca3ff
SH
480#if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
481 !defined(CONFIG_PHYLIB) && \
482 defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
cad713bf
SK
483 /* Wait up to 5s for the link status */
484 for (i = 0; i < 5; i++) {
485 u16 phyadr;
486
d44265ad
AA
487 miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
488 MV_PHY_ADR_REQUEST, &phyadr);
cad713bf
SK
489 /* Return if we get link up */
490 if (miiphy_link(dev->name, phyadr))
491 return 0;
492 udelay(1000000);
9131589a 493 }
cad713bf
SK
494
495 printf("No link on %s\n", dev->name);
496 return -1;
9131589a
PW
497#endif
498 return 0;
499}
500
d44265ad 501static int mvgbe_halt(struct eth_device *dev)
9131589a 502{
d44265ad
AA
503 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
504 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a
PW
505
506 /* Disable all gigE address decoder */
d44265ad 507 MVGBE_REG_WR(regs->bare, 0x3f);
9131589a
PW
508
509 stop_queue(&regs->tqc);
510 stop_queue(&regs->rqc);
511
f0588fdf 512 /* Disable port */
d44265ad 513 MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
9131589a 514 /* Set port is not reset */
d44265ad 515 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
9131589a
PW
516#ifdef CONFIG_SYS_MII_MODE
517 /* Set MMI interface up */
d44265ad 518 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
9131589a
PW
519#endif
520 /* Disable & mask ethernet port interrupts */
d44265ad
AA
521 MVGBE_REG_WR(regs->ic, 0);
522 MVGBE_REG_WR(regs->ice, 0);
523 MVGBE_REG_WR(regs->pim, 0);
524 MVGBE_REG_WR(regs->peim, 0);
9131589a
PW
525
526 return 0;
527}
528
d44265ad 529static int mvgbe_write_hwaddr(struct eth_device *dev)
b5ce63ed 530{
d44265ad
AA
531 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
532 struct mvgbe_registers *regs = dmvgbe->regs;
b5ce63ed
PW
533
534 /* Programs net device MAC address after initialization */
d44265ad 535 port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
b5ce63ed
PW
536 return 0;
537}
538
10cbe3b6 539static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
9131589a 540{
d44265ad
AA
541 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
542 struct mvgbe_registers *regs = dmvgbe->regs;
543 struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
477fa637 544 void *p = (void *)dataptr;
7b05f5e0 545 u32 cmd_sts;
e6e556c1 546 u32 txuq0_reg_addr;
9131589a 547
477fa637 548 /* Copy buffer if it's misaligned */
9131589a 549 if ((u32) dataptr & 0x07) {
477fa637
SK
550 if (datasize > PKTSIZE_ALIGN) {
551 printf("Non-aligned data too large (%d)\n",
552 datasize);
553 return -1;
554 }
555
d44265ad
AA
556 memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
557 p = dmvgbe->p_aligned_txbuf;
9131589a 558 }
477fa637 559
d44265ad
AA
560 p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
561 p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
562 p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
563 p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
477fa637 564 p_txdesc->buf_ptr = (u8 *) p;
9131589a
PW
565 p_txdesc->byte_cnt = datasize;
566
c19a20d5 567 /* Set this tc desc as zeroth TXUQ */
e6e556c1
AG
568 txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
569 writel((u32) p_txdesc, txuq0_reg_addr);
c19a20d5
AA
570
571 /* ensure tx desc writes above are performed before we start Tx DMA */
572 isb();
573
574 /* Apply send command using zeroth TXUQ */
d44265ad 575 MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
9131589a
PW
576
577 /*
578 * wait for packet xmit completion
579 */
7b05f5e0 580 cmd_sts = readl(&p_txdesc->cmd_sts);
d44265ad 581 while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
9131589a 582 /* return fail if error is detected */
d44265ad
AA
583 if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
584 (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
585 cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
1fd92db8 586 printf("Err..(%s) in xmit packet\n", __func__);
9131589a
PW
587 return -1;
588 }
7b05f5e0 589 cmd_sts = readl(&p_txdesc->cmd_sts);
9131589a
PW
590 };
591 return 0;
592}
593
d44265ad 594static int mvgbe_recv(struct eth_device *dev)
9131589a 595{
d44265ad
AA
596 struct mvgbe_device *dmvgbe = to_mvgbe(dev);
597 struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
7b05f5e0
SK
598 u32 cmd_sts;
599 u32 timeout = 0;
e6e556c1 600 u32 rxdesc_curr_addr;
9131589a
PW
601
602 /* wait untill rx packet available or timeout */
603 do {
d44265ad 604 if (timeout < MVGBE_PHY_SMI_TIMEOUT)
9131589a
PW
605 timeout++;
606 else {
1fd92db8 607 debug("%s time out...\n", __func__);
9131589a
PW
608 return -1;
609 }
d44265ad 610 } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
9131589a
PW
611
612 if (p_rxdesc_curr->byte_cnt != 0) {
613 debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
1fd92db8 614 __func__, (u32) p_rxdesc_curr->byte_cnt,
9131589a
PW
615 (u32) p_rxdesc_curr->buf_ptr,
616 (u32) p_rxdesc_curr->cmd_sts);
617 }
618
619 /*
620 * In case received a packet without first/last bits on
621 * OR the error summary bit is on,
622 * the packets needs to be dropeed.
623 */
7b05f5e0
SK
624 cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
625
626 if ((cmd_sts &
d44265ad
AA
627 (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
628 != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
9131589a
PW
629
630 printf("Err..(%s) Dropping packet spread on"
1fd92db8 631 " multiple descriptors\n", __func__);
9131589a 632
d44265ad 633 } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
9131589a
PW
634
635 printf("Err..(%s) Dropping packet with errors\n",
1fd92db8 636 __func__);
9131589a
PW
637
638 } else {
639 /* !!! call higher layer processing */
640 debug("%s: Sending Received packet to"
1fd92db8
JH
641 " upper layer (net_process_received_packet)\n",
642 __func__);
9131589a
PW
643
644 /* let the upper layer handle the packet */
1fd92db8
JH
645 net_process_received_packet((p_rxdesc_curr->buf_ptr +
646 RX_BUF_OFFSET),
647 (int)(p_rxdesc_curr->byte_cnt -
648 RX_BUF_OFFSET));
9131589a
PW
649 }
650 /*
651 * free these descriptors and point next in the ring
652 */
653 p_rxdesc_curr->cmd_sts =
d44265ad 654 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
9131589a
PW
655 p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
656 p_rxdesc_curr->byte_cnt = 0;
657
e6e556c1
AG
658 rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
659 writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
7b05f5e0 660
9131589a
PW
661 return 0;
662}
663
cd3ca3ff
SH
664#if defined(CONFIG_PHYLIB)
665int mvgbe_phylib_init(struct eth_device *dev, int phyid)
666{
667 struct mii_dev *bus;
668 struct phy_device *phydev;
669 int ret;
670
671 bus = mdio_alloc();
672 if (!bus) {
673 printf("mdio_alloc failed\n");
674 return -ENOMEM;
675 }
676 bus->read = mvgbe_phy_read;
677 bus->write = mvgbe_phy_write;
678 sprintf(bus->name, dev->name);
679
680 ret = mdio_register(bus);
681 if (ret) {
682 printf("mdio_register failed\n");
683 free(bus);
684 return -ENOMEM;
685 }
686
687 /* Set phy address of the port */
688 mvgbe_phy_write(bus, MV_PHY_ADR_REQUEST, 0, MV_PHY_ADR_REQUEST, phyid);
689
690 phydev = phy_connect(bus, phyid, dev, PHY_INTERFACE_MODE_RGMII);
691 if (!phydev) {
692 printf("phy_connect failed\n");
693 return -ENODEV;
694 }
695
696 phy_config(phydev);
697 phy_startup(phydev);
698
699 return 0;
700}
701#endif
702
d44265ad 703int mvgbe_initialize(bd_t *bis)
9131589a 704{
d44265ad 705 struct mvgbe_device *dmvgbe;
9131589a
PW
706 struct eth_device *dev;
707 int devnum;
d44265ad 708 u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
9131589a 709
d44265ad 710 for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
9131589a
PW
711 /*skip if port is configured not to use */
712 if (used_ports[devnum] == 0)
713 continue;
714
d44265ad
AA
715 dmvgbe = malloc(sizeof(struct mvgbe_device));
716
717 if (!dmvgbe)
9131589a
PW
718 goto error1;
719
d44265ad 720 memset(dmvgbe, 0, sizeof(struct mvgbe_device));
9131589a 721
d44265ad
AA
722 dmvgbe->p_rxdesc =
723 (struct mvgbe_rxdesc *)memalign(PKTALIGN,
724 MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
725
726 if (!dmvgbe->p_rxdesc)
9131589a
PW
727 goto error2;
728
d44265ad
AA
729 dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
730 RINGSZ*PKTSIZE_ALIGN + 1);
731
732 if (!dmvgbe->p_rxbuf)
9131589a
PW
733 goto error3;
734
d44265ad
AA
735 dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
736
737 if (!dmvgbe->p_aligned_txbuf)
477fa637
SK
738 goto error4;
739
d44265ad
AA
740 dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
741 PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
742
743 if (!dmvgbe->p_txdesc) {
744 free(dmvgbe->p_aligned_txbuf);
745error4:
746 free(dmvgbe->p_rxbuf);
747error3:
748 free(dmvgbe->p_rxdesc);
749error2:
750 free(dmvgbe);
751error1:
9131589a 752 printf("Err.. %s Failed to allocate memory\n",
1fd92db8 753 __func__);
9131589a
PW
754 return -1;
755 }
756
d44265ad 757 dev = &dmvgbe->dev;
9131589a 758
f6add132 759 /* must be less than sizeof(dev->name) */
9131589a
PW
760 sprintf(dev->name, "egiga%d", devnum);
761
9131589a
PW
762 switch (devnum) {
763 case 0:
d44265ad 764 dmvgbe->regs = (void *)MVGBE0_BASE;
9131589a 765 break;
d44265ad 766#if defined(MVGBE1_BASE)
9131589a 767 case 1:
d44265ad 768 dmvgbe->regs = (void *)MVGBE1_BASE;
9131589a 769 break;
d44265ad 770#endif
9131589a
PW
771 default: /* this should never happen */
772 printf("Err..(%s) Invalid device number %d\n",
1fd92db8 773 __func__, devnum);
9131589a
PW
774 return -1;
775 }
776
d44265ad
AA
777 dev->init = (void *)mvgbe_init;
778 dev->halt = (void *)mvgbe_halt;
779 dev->send = (void *)mvgbe_send;
780 dev->recv = (void *)mvgbe_recv;
781 dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
9131589a
PW
782
783 eth_register(dev);
784
cd3ca3ff
SH
785#if defined(CONFIG_PHYLIB)
786 mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
787#elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
9131589a
PW
788 miiphy_register(dev->name, smi_reg_read, smi_reg_write);
789 /* Set phy address of the port */
d44265ad
AA
790 miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
791 MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
9131589a
PW
792#endif
793 }
794 return 0;
0b785ddd 795}