]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/fm/eth.c
Merge 'u-boot-microblaze/zynq' into (u-boot-arm/master'
[people/ms/u-boot.git] / drivers / net / fm / eth.c
1 /*
2 * Copyright 2009-2012 Freescale Semiconductor, Inc.
3 * Dave Liu <daveliu@freescale.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7 #include <common.h>
8 #include <asm/io.h>
9 #include <malloc.h>
10 #include <net.h>
11 #include <hwconfig.h>
12 #include <fm_eth.h>
13 #include <fsl_mdio.h>
14 #include <miiphy.h>
15 #include <phy.h>
16 #include <asm/fsl_dtsec.h>
17 #include <asm/fsl_tgec.h>
18 #include <asm/fsl_memac.h>
19
20 #include "fm.h"
21
22 static struct eth_device *devlist[NUM_FM_PORTS];
23 static int num_controllers;
24
25 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII)
26
27 #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \
28 TBIANA_FULL_DUPLEX)
29
30 #define TBIANA_SGMII_ACK 0x4001
31
32 #define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \
33 TBICR_FULL_DUPLEX | TBICR_SPEED1_SET)
34
35 /* Configure the TBI for SGMII operation */
36 static void dtsec_configure_serdes(struct fm_eth *priv)
37 {
38 #ifdef CONFIG_SYS_FMAN_V3
39 u32 value;
40 struct mii_dev bus;
41 bus.priv = priv->mac->phyregs;
42
43 /* SGMII IF mode + AN enable */
44 value = PHY_SGMII_IF_MODE_AN | PHY_SGMII_IF_MODE_SGMII;
45 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x14, value);
46
47 /* Dev ability according to SGMII specification */
48 value = PHY_SGMII_DEV_ABILITY_SGMII;
49 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x4, value);
50
51 /* Adjust link timer for SGMII -
52 1.6 ms in units of 8 ns = 2 * 10^5 = 0x30d40 */
53 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x13, 0x3);
54 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0x12, 0xd40);
55
56 /* Restart AN */
57 value = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
58 memac_mdio_write(&bus, 0, MDIO_DEVAD_NONE, 0, value);
59 #else
60 struct dtsec *regs = priv->mac->base;
61 struct tsec_mii_mng *phyregs = priv->mac->phyregs;
62
63 /*
64 * Access TBI PHY registers at given TSEC register offset as
65 * opposed to the register offset used for external PHY accesses
66 */
67 tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_TBICON,
68 TBICON_CLK_SELECT);
69 tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_ANA,
70 TBIANA_SGMII_ACK);
71 tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0,
72 TBI_CR, TBICR_SETTINGS);
73 #endif
74 }
75
76 static void dtsec_init_phy(struct eth_device *dev)
77 {
78 struct fm_eth *fm_eth = dev->priv;
79 #ifndef CONFIG_SYS_FMAN_V3
80 struct dtsec *regs = (struct dtsec *)CONFIG_SYS_FSL_FM1_DTSEC1_ADDR;
81
82 /* Assign a Physical address to the TBI */
83 out_be32(&regs->tbipa, CONFIG_SYS_TBIPA_VALUE);
84 #endif
85
86 if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII)
87 dtsec_configure_serdes(fm_eth);
88 }
89
90 static int tgec_is_fibre(struct eth_device *dev)
91 {
92 struct fm_eth *fm = dev->priv;
93 char phyopt[20];
94
95 sprintf(phyopt, "fsl_fm%d_xaui_phy", fm->fm_index + 1);
96
97 return hwconfig_arg_cmp(phyopt, "xfi");
98 }
99 #endif
100
101 static u16 muram_readw(u16 *addr)
102 {
103 u32 base = (u32)addr & ~0x3;
104 u32 val32 = *(u32 *)base;
105 int byte_pos;
106 u16 ret;
107
108 byte_pos = (u32)addr & 0x3;
109 if (byte_pos)
110 ret = (u16)(val32 & 0x0000ffff);
111 else
112 ret = (u16)((val32 & 0xffff0000) >> 16);
113
114 return ret;
115 }
116
117 static void muram_writew(u16 *addr, u16 val)
118 {
119 u32 base = (u32)addr & ~0x3;
120 u32 org32 = *(u32 *)base;
121 u32 val32;
122 int byte_pos;
123
124 byte_pos = (u32)addr & 0x3;
125 if (byte_pos)
126 val32 = (org32 & 0xffff0000) | val;
127 else
128 val32 = (org32 & 0x0000ffff) | ((u32)val << 16);
129
130 *(u32 *)base = val32;
131 }
132
133 static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port)
134 {
135 int timeout = 1000000;
136
137 clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN);
138
139 /* wait until the rx port is not busy */
140 while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--)
141 ;
142 }
143
144 static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port)
145 {
146 /* set BMI to independent mode, Rx port disable */
147 out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM);
148 /* clear FOF in IM case */
149 out_be32(&rx_port->fmbm_rim, 0);
150 /* Rx frame next engine -RISC */
151 out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX);
152 /* Rx command attribute - no order, MR[3] = 1 */
153 clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK);
154 setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4));
155 /* enable Rx statistic counters */
156 out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN);
157 /* disable Rx performance counters */
158 out_be32(&rx_port->fmbm_rpc, 0);
159 }
160
161 static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port)
162 {
163 int timeout = 1000000;
164
165 clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN);
166
167 /* wait until the tx port is not busy */
168 while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--)
169 ;
170 }
171
172 static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port)
173 {
174 /* set BMI to independent mode, Tx port disable */
175 out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM);
176 /* Tx frame next engine -RISC */
177 out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
178 out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
179 /* Tx command attribute - no order, MR[3] = 1 */
180 clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK);
181 setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4));
182 /* enable Tx statistic counters */
183 out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN);
184 /* disable Tx performance counters */
185 out_be32(&tx_port->fmbm_tpc, 0);
186 }
187
188 static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth)
189 {
190 struct fm_port_global_pram *pram;
191 u32 pram_page_offset;
192 void *rx_bd_ring_base;
193 void *rx_buf_pool;
194 struct fm_port_bd *rxbd;
195 struct fm_port_qd *rxqd;
196 struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port;
197 int i;
198
199 /* alloc global parameter ram at MURAM */
200 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
201 FM_PRAM_SIZE, FM_PRAM_ALIGN);
202 fm_eth->rx_pram = pram;
203
204 /* parameter page offset to MURAM */
205 pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index);
206
207 /* enable global mode- snooping data buffers and BDs */
208 pram->mode = PRAM_MODE_GLOBAL;
209
210 /* init the Rx queue descriptor pionter */
211 pram->rxqd_ptr = pram_page_offset + 0x20;
212
213 /* set the max receive buffer length, power of 2 */
214 muram_writew(&pram->mrblr, MAX_RXBUF_LOG2);
215
216 /* alloc Rx buffer descriptors from main memory */
217 rx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
218 * RX_BD_RING_SIZE);
219 if (!rx_bd_ring_base)
220 return 0;
221 memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd)
222 * RX_BD_RING_SIZE);
223
224 /* alloc Rx buffer from main memory */
225 rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE);
226 if (!rx_buf_pool)
227 return 0;
228 memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE);
229
230 /* save them to fm_eth */
231 fm_eth->rx_bd_ring = rx_bd_ring_base;
232 fm_eth->cur_rxbd = rx_bd_ring_base;
233 fm_eth->rx_buf = rx_buf_pool;
234
235 /* init Rx BDs ring */
236 rxbd = (struct fm_port_bd *)rx_bd_ring_base;
237 for (i = 0; i < RX_BD_RING_SIZE; i++) {
238 rxbd->status = RxBD_EMPTY;
239 rxbd->len = 0;
240 rxbd->buf_ptr_hi = 0;
241 rxbd->buf_ptr_lo = (u32)rx_buf_pool + i * MAX_RXBUF_LEN;
242 rxbd++;
243 }
244
245 /* set the Rx queue descriptor */
246 rxqd = &pram->rxqd;
247 muram_writew(&rxqd->gen, 0);
248 muram_writew(&rxqd->bd_ring_base_hi, 0);
249 rxqd->bd_ring_base_lo = (u32)rx_bd_ring_base;
250 muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd)
251 * RX_BD_RING_SIZE);
252 muram_writew(&rxqd->offset_in, 0);
253 muram_writew(&rxqd->offset_out, 0);
254
255 /* set IM parameter ram pointer to Rx Frame Queue ID */
256 out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset);
257
258 return 1;
259 }
260
261 static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth)
262 {
263 struct fm_port_global_pram *pram;
264 u32 pram_page_offset;
265 void *tx_bd_ring_base;
266 struct fm_port_bd *txbd;
267 struct fm_port_qd *txqd;
268 struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port;
269 int i;
270
271 /* alloc global parameter ram at MURAM */
272 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
273 FM_PRAM_SIZE, FM_PRAM_ALIGN);
274 fm_eth->tx_pram = pram;
275
276 /* parameter page offset to MURAM */
277 pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index);
278
279 /* enable global mode- snooping data buffers and BDs */
280 pram->mode = PRAM_MODE_GLOBAL;
281
282 /* init the Tx queue descriptor pionter */
283 pram->txqd_ptr = pram_page_offset + 0x40;
284
285 /* alloc Tx buffer descriptors from main memory */
286 tx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
287 * TX_BD_RING_SIZE);
288 if (!tx_bd_ring_base)
289 return 0;
290 memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd)
291 * TX_BD_RING_SIZE);
292 /* save it to fm_eth */
293 fm_eth->tx_bd_ring = tx_bd_ring_base;
294 fm_eth->cur_txbd = tx_bd_ring_base;
295
296 /* init Tx BDs ring */
297 txbd = (struct fm_port_bd *)tx_bd_ring_base;
298 for (i = 0; i < TX_BD_RING_SIZE; i++) {
299 txbd->status = TxBD_LAST;
300 txbd->len = 0;
301 txbd->buf_ptr_hi = 0;
302 txbd->buf_ptr_lo = 0;
303 }
304
305 /* set the Tx queue decriptor */
306 txqd = &pram->txqd;
307 muram_writew(&txqd->bd_ring_base_hi, 0);
308 txqd->bd_ring_base_lo = (u32)tx_bd_ring_base;
309 muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd)
310 * TX_BD_RING_SIZE);
311 muram_writew(&txqd->offset_in, 0);
312 muram_writew(&txqd->offset_out, 0);
313
314 /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */
315 out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset);
316
317 return 1;
318 }
319
320 static int fm_eth_init(struct fm_eth *fm_eth)
321 {
322
323 if (!fm_eth_rx_port_parameter_init(fm_eth))
324 return 0;
325
326 if (!fm_eth_tx_port_parameter_init(fm_eth))
327 return 0;
328
329 return 1;
330 }
331
332 static int fm_eth_startup(struct fm_eth *fm_eth)
333 {
334 struct fsl_enet_mac *mac;
335 mac = fm_eth->mac;
336
337 /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */
338 if (!fm_eth_init(fm_eth))
339 return 0;
340 /* setup the MAC controller */
341 mac->init_mac(mac);
342
343 /* For some reason we need to set SPEED_100 */
344 if (((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) ||
345 (fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII)) &&
346 mac->set_if_mode)
347 mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100);
348
349 /* init bmi rx port, IM mode and disable */
350 bmi_rx_port_init(fm_eth->rx_port);
351 /* init bmi tx port, IM mode and disable */
352 bmi_tx_port_init(fm_eth->tx_port);
353
354 return 1;
355 }
356
357 static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth)
358 {
359 struct fm_port_global_pram *pram;
360
361 pram = fm_eth->tx_pram;
362 /* graceful stop transmission of frames */
363 pram->mode |= PRAM_MODE_GRACEFUL_STOP;
364 sync();
365 }
366
367 static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth)
368 {
369 struct fm_port_global_pram *pram;
370
371 pram = fm_eth->tx_pram;
372 /* re-enable transmission of frames */
373 pram->mode &= ~PRAM_MODE_GRACEFUL_STOP;
374 sync();
375 }
376
377 static int fm_eth_open(struct eth_device *dev, bd_t *bd)
378 {
379 struct fm_eth *fm_eth;
380 struct fsl_enet_mac *mac;
381 #ifdef CONFIG_PHYLIB
382 int ret;
383 #endif
384
385 fm_eth = (struct fm_eth *)dev->priv;
386 mac = fm_eth->mac;
387
388 /* setup the MAC address */
389 if (dev->enetaddr[0] & 0x01) {
390 printf("%s: MacAddress is multcast address\n", __func__);
391 return 1;
392 }
393 mac->set_mac_addr(mac, dev->enetaddr);
394
395 /* enable bmi Rx port */
396 setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN);
397 /* enable MAC rx/tx port */
398 mac->enable_mac(mac);
399 /* enable bmi Tx port */
400 setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN);
401 /* re-enable transmission of frame */
402 fmc_tx_port_graceful_stop_disable(fm_eth);
403
404 #ifdef CONFIG_PHYLIB
405 ret = phy_startup(fm_eth->phydev);
406 if (ret) {
407 printf("%s: Could not initialize\n", fm_eth->phydev->dev->name);
408 return ret;
409 }
410 #else
411 fm_eth->phydev->speed = SPEED_1000;
412 fm_eth->phydev->link = 1;
413 fm_eth->phydev->duplex = DUPLEX_FULL;
414 #endif
415
416 /* set the MAC-PHY mode */
417 mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed);
418
419 if (!fm_eth->phydev->link)
420 printf("%s: No link.\n", fm_eth->phydev->dev->name);
421
422 return fm_eth->phydev->link ? 0 : -1;
423 }
424
425 static void fm_eth_halt(struct eth_device *dev)
426 {
427 struct fm_eth *fm_eth;
428 struct fsl_enet_mac *mac;
429
430 fm_eth = (struct fm_eth *)dev->priv;
431 mac = fm_eth->mac;
432
433 /* graceful stop the transmission of frames */
434 fmc_tx_port_graceful_stop_enable(fm_eth);
435 /* disable bmi Tx port */
436 bmi_tx_port_disable(fm_eth->tx_port);
437 /* disable MAC rx/tx port */
438 mac->disable_mac(mac);
439 /* disable bmi Rx port */
440 bmi_rx_port_disable(fm_eth->rx_port);
441
442 phy_shutdown(fm_eth->phydev);
443 }
444
445 static int fm_eth_send(struct eth_device *dev, void *buf, int len)
446 {
447 struct fm_eth *fm_eth;
448 struct fm_port_global_pram *pram;
449 struct fm_port_bd *txbd, *txbd_base;
450 u16 offset_in;
451 int i;
452
453 fm_eth = (struct fm_eth *)dev->priv;
454 pram = fm_eth->tx_pram;
455 txbd = fm_eth->cur_txbd;
456
457 /* find one empty TxBD */
458 for (i = 0; txbd->status & TxBD_READY; i++) {
459 udelay(100);
460 if (i > 0x1000) {
461 printf("%s: Tx buffer not ready\n", dev->name);
462 return 0;
463 }
464 }
465 /* setup TxBD */
466 txbd->buf_ptr_hi = 0;
467 txbd->buf_ptr_lo = (u32)buf;
468 txbd->len = len;
469 sync();
470 txbd->status = TxBD_READY | TxBD_LAST;
471 sync();
472
473 /* update TxQD, let RISC to send the packet */
474 offset_in = muram_readw(&pram->txqd.offset_in);
475 offset_in += sizeof(struct fm_port_bd);
476 if (offset_in >= muram_readw(&pram->txqd.bd_ring_size))
477 offset_in = 0;
478 muram_writew(&pram->txqd.offset_in, offset_in);
479 sync();
480
481 /* wait for buffer to be transmitted */
482 for (i = 0; txbd->status & TxBD_READY; i++) {
483 udelay(100);
484 if (i > 0x10000) {
485 printf("%s: Tx error\n", dev->name);
486 return 0;
487 }
488 }
489
490 /* advance the TxBD */
491 txbd++;
492 txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring;
493 if (txbd >= (txbd_base + TX_BD_RING_SIZE))
494 txbd = txbd_base;
495 /* update current txbd */
496 fm_eth->cur_txbd = (void *)txbd;
497
498 return 1;
499 }
500
501 static int fm_eth_recv(struct eth_device *dev)
502 {
503 struct fm_eth *fm_eth;
504 struct fm_port_global_pram *pram;
505 struct fm_port_bd *rxbd, *rxbd_base;
506 u16 status, len;
507 u8 *data;
508 u16 offset_out;
509
510 fm_eth = (struct fm_eth *)dev->priv;
511 pram = fm_eth->rx_pram;
512 rxbd = fm_eth->cur_rxbd;
513 status = rxbd->status;
514
515 while (!(status & RxBD_EMPTY)) {
516 if (!(status & RxBD_ERROR)) {
517 data = (u8 *)rxbd->buf_ptr_lo;
518 len = rxbd->len;
519 NetReceive(data, len);
520 } else {
521 printf("%s: Rx error\n", dev->name);
522 return 0;
523 }
524
525 /* clear the RxBDs */
526 rxbd->status = RxBD_EMPTY;
527 rxbd->len = 0;
528 sync();
529
530 /* advance RxBD */
531 rxbd++;
532 rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring;
533 if (rxbd >= (rxbd_base + RX_BD_RING_SIZE))
534 rxbd = rxbd_base;
535 /* read next status */
536 status = rxbd->status;
537
538 /* update RxQD */
539 offset_out = muram_readw(&pram->rxqd.offset_out);
540 offset_out += sizeof(struct fm_port_bd);
541 if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size))
542 offset_out = 0;
543 muram_writew(&pram->rxqd.offset_out, offset_out);
544 sync();
545 }
546 fm_eth->cur_rxbd = (void *)rxbd;
547
548 return 1;
549 }
550
551 static int fm_eth_init_mac(struct fm_eth *fm_eth, struct ccsr_fman *reg)
552 {
553 struct fsl_enet_mac *mac;
554 int num;
555 void *base, *phyregs = NULL;
556
557 num = fm_eth->num;
558
559 #ifdef CONFIG_SYS_FMAN_V3
560 if (fm_eth->type == FM_ETH_10G_E)
561 num += 8;
562 base = &reg->memac[num].fm_memac;
563 phyregs = &reg->memac[num].fm_memac_mdio;
564 #else
565 /* Get the mac registers base address */
566 if (fm_eth->type == FM_ETH_1G_E) {
567 base = &reg->mac_1g[num].fm_dtesc;
568 phyregs = &reg->mac_1g[num].fm_mdio.miimcfg;
569 } else {
570 base = &reg->mac_10g[num].fm_10gec;
571 phyregs = &reg->mac_10g[num].fm_10gec_mdio;
572 }
573 #endif
574
575 /* alloc mac controller */
576 mac = malloc(sizeof(struct fsl_enet_mac));
577 if (!mac)
578 return 0;
579 memset(mac, 0, sizeof(struct fsl_enet_mac));
580
581 /* save the mac to fm_eth struct */
582 fm_eth->mac = mac;
583
584 #ifdef CONFIG_SYS_FMAN_V3
585 init_memac(mac, base, phyregs, MAX_RXBUF_LEN);
586 #else
587 if (fm_eth->type == FM_ETH_1G_E)
588 init_dtsec(mac, base, phyregs, MAX_RXBUF_LEN);
589 else
590 init_tgec(mac, base, phyregs, MAX_RXBUF_LEN);
591 #endif
592
593 return 1;
594 }
595
596 static int init_phy(struct eth_device *dev)
597 {
598 struct fm_eth *fm_eth = dev->priv;
599 struct phy_device *phydev = NULL;
600 u32 supported;
601
602 #ifdef CONFIG_PHYLIB
603 if (fm_eth->type == FM_ETH_1G_E)
604 dtsec_init_phy(dev);
605
606 if (fm_eth->bus) {
607 phydev = phy_connect(fm_eth->bus, fm_eth->phyaddr, dev,
608 fm_eth->enet_if);
609 }
610
611 if (!phydev) {
612 printf("Failed to connect\n");
613 return -1;
614 }
615
616 if (fm_eth->type == FM_ETH_1G_E) {
617 supported = (SUPPORTED_10baseT_Half |
618 SUPPORTED_10baseT_Full |
619 SUPPORTED_100baseT_Half |
620 SUPPORTED_100baseT_Full |
621 SUPPORTED_1000baseT_Full);
622 } else {
623 supported = SUPPORTED_10000baseT_Full;
624
625 if (tgec_is_fibre(dev))
626 phydev->port = PORT_FIBRE;
627 }
628
629 phydev->supported &= supported;
630 phydev->advertising = phydev->supported;
631
632 fm_eth->phydev = phydev;
633
634 phy_config(phydev);
635 #endif
636
637 return 0;
638 }
639
640 int fm_eth_initialize(struct ccsr_fman *reg, struct fm_eth_info *info)
641 {
642 struct eth_device *dev;
643 struct fm_eth *fm_eth;
644 int i, num = info->num;
645
646 /* alloc eth device */
647 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
648 if (!dev)
649 return 0;
650 memset(dev, 0, sizeof(struct eth_device));
651
652 /* alloc the FMan ethernet private struct */
653 fm_eth = (struct fm_eth *)malloc(sizeof(struct fm_eth));
654 if (!fm_eth)
655 return 0;
656 memset(fm_eth, 0, sizeof(struct fm_eth));
657
658 /* save off some things we need from the info struct */
659 fm_eth->fm_index = info->index - 1; /* keep as 0 based for muram */
660 fm_eth->num = num;
661 fm_eth->type = info->type;
662
663 fm_eth->rx_port = (void *)&reg->port[info->rx_port_id - 1].fm_bmi;
664 fm_eth->tx_port = (void *)&reg->port[info->tx_port_id - 1].fm_bmi;
665
666 /* set the ethernet max receive length */
667 fm_eth->max_rx_len = MAX_RXBUF_LEN;
668
669 /* init global mac structure */
670 if (!fm_eth_init_mac(fm_eth, reg))
671 return 0;
672
673 /* keep same as the manual, we call FMAN1, FMAN2, DTSEC1, DTSEC2, etc */
674 if (fm_eth->type == FM_ETH_1G_E)
675 sprintf(dev->name, "FM%d@DTSEC%d", info->index, num + 1);
676 else
677 sprintf(dev->name, "FM%d@TGEC%d", info->index, num + 1);
678
679 devlist[num_controllers++] = dev;
680 dev->iobase = 0;
681 dev->priv = (void *)fm_eth;
682 dev->init = fm_eth_open;
683 dev->halt = fm_eth_halt;
684 dev->send = fm_eth_send;
685 dev->recv = fm_eth_recv;
686 fm_eth->dev = dev;
687 fm_eth->bus = info->bus;
688 fm_eth->phyaddr = info->phy_addr;
689 fm_eth->enet_if = info->enet_if;
690
691 /* startup the FM im */
692 if (!fm_eth_startup(fm_eth))
693 return 0;
694
695 if (init_phy(dev))
696 return 0;
697
698 /* clear the ethernet address */
699 for (i = 0; i < 6; i++)
700 dev->enetaddr[i] = 0;
701 eth_register(dev);
702
703 return 1;
704 }