]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/sh_eth.c
net: sh-eth: Use memalign instead of orignal memroy alignment function
[people/ms/u-boot.git] / drivers / net / sh_eth.c
1 /*
2 * sh_eth.c - Driver for Renesas ethernet controler.
3 *
4 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
5 * Copyright (c) 2008, 2011 Nobuhiro Iwamatsu
6 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
7 * Copyright (C) 2013 Renesas Electronics Corporation
8 *
9 * SPDX-License-Identifier: GPL-2.0+
10 */
11
12 #include <config.h>
13 #include <common.h>
14 #include <malloc.h>
15 #include <net.h>
16 #include <netdev.h>
17 #include <miiphy.h>
18 #include <asm/errno.h>
19 #include <asm/io.h>
20
21 #include "sh_eth.h"
22
23 #ifndef CONFIG_SH_ETHER_USE_PORT
24 # error "Please define CONFIG_SH_ETHER_USE_PORT"
25 #endif
26 #ifndef CONFIG_SH_ETHER_PHY_ADDR
27 # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
28 #endif
29
30 #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
31 #define flush_cache_wback(addr, len) \
32 flush_dcache_range((u32)addr, (u32)(addr + len - 1))
33 #else
34 #define flush_cache_wback(...)
35 #endif
36
37 #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
38 #define invalidate_cache(addr, len) \
39 { \
40 u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
41 u32 start, end; \
42 \
43 start = (u32)addr; \
44 end = start + len; \
45 start &= ~(line_size - 1); \
46 end = ((end + line_size - 1) & ~(line_size - 1)); \
47 \
48 invalidate_dcache_range(start, end); \
49 }
50 #else
51 #define invalidate_cache(...)
52 #endif
53
54 #define TIMEOUT_CNT 1000
55
56 int sh_eth_send(struct eth_device *dev, void *packet, int len)
57 {
58 struct sh_eth_dev *eth = dev->priv;
59 int port = eth->port, ret = 0, timeout;
60 struct sh_eth_info *port_info = &eth->port_info[port];
61
62 if (!packet || len > 0xffff) {
63 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
64 ret = -EINVAL;
65 goto err;
66 }
67
68 /* packet must be a 4 byte boundary */
69 if ((int)packet & 3) {
70 printf(SHETHER_NAME ": %s: packet not 4 byte alligned\n"
71 , __func__);
72 ret = -EFAULT;
73 goto err;
74 }
75
76 /* Update tx descriptor */
77 flush_cache_wback(packet, len);
78 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
79 port_info->tx_desc_cur->td1 = len << 16;
80 /* Must preserve the end of descriptor list indication */
81 if (port_info->tx_desc_cur->td0 & TD_TDLE)
82 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
83 else
84 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
85
86 /* Restart the transmitter if disabled */
87 if (!(sh_eth_read(eth, EDTRR) & EDTRR_TRNS))
88 sh_eth_write(eth, EDTRR_TRNS, EDTRR);
89
90 /* Wait until packet is transmitted */
91 timeout = TIMEOUT_CNT;
92 do {
93 invalidate_cache(port_info->tx_desc_cur,
94 sizeof(struct tx_desc_s));
95 udelay(100);
96 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
97
98 if (timeout < 0) {
99 printf(SHETHER_NAME ": transmit timeout\n");
100 ret = -ETIMEDOUT;
101 goto err;
102 }
103
104 port_info->tx_desc_cur++;
105 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
106 port_info->tx_desc_cur = port_info->tx_desc_base;
107
108 err:
109 return ret;
110 }
111
112 int sh_eth_recv(struct eth_device *dev)
113 {
114 struct sh_eth_dev *eth = dev->priv;
115 int port = eth->port, len = 0;
116 struct sh_eth_info *port_info = &eth->port_info[port];
117 uchar *packet;
118
119 /* Check if the rx descriptor is ready */
120 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
121 if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) {
122 /* Check for errors */
123 if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) {
124 len = port_info->rx_desc_cur->rd1 & 0xffff;
125 packet = (uchar *)
126 ADDR_TO_P2(port_info->rx_desc_cur->rd2);
127 invalidate_cache(packet, len);
128 NetReceive(packet, len);
129 }
130
131 /* Make current descriptor available again */
132 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
133 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
134 else
135 port_info->rx_desc_cur->rd0 = RD_RACT;
136 /* Point to the next descriptor */
137 port_info->rx_desc_cur++;
138 if (port_info->rx_desc_cur >=
139 port_info->rx_desc_base + NUM_RX_DESC)
140 port_info->rx_desc_cur = port_info->rx_desc_base;
141 }
142
143 /* Restart the receiver if disabled */
144 if (!(sh_eth_read(eth, EDRRR) & EDRRR_R))
145 sh_eth_write(eth, EDRRR_R, EDRRR);
146
147 return len;
148 }
149
150 static int sh_eth_reset(struct sh_eth_dev *eth)
151 {
152 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
153 int ret = 0, i;
154
155 /* Start e-dmac transmitter and receiver */
156 sh_eth_write(eth, EDSR_ENALL, EDSR);
157
158 /* Perform a software reset and wait for it to complete */
159 sh_eth_write(eth, EDMR_SRST, EDMR);
160 for (i = 0; i < TIMEOUT_CNT; i++) {
161 if (!(sh_eth_read(eth, EDMR) & EDMR_SRST))
162 break;
163 udelay(1000);
164 }
165
166 if (i == TIMEOUT_CNT) {
167 printf(SHETHER_NAME ": Software reset timeout\n");
168 ret = -EIO;
169 }
170
171 return ret;
172 #else
173 sh_eth_write(eth, sh_eth_read(eth, EDMR) | EDMR_SRST, EDMR);
174 udelay(3000);
175 sh_eth_write(eth, sh_eth_read(eth, EDMR) & ~EDMR_SRST, EDMR);
176
177 return 0;
178 #endif
179 }
180
181 static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
182 {
183 int port = eth->port, i, ret = 0;
184 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
185 struct sh_eth_info *port_info = &eth->port_info[port];
186 struct tx_desc_s *cur_tx_desc;
187
188 /*
189 * Allocate rx descriptors. They must be aligned to size of struct
190 * tx_desc_s.
191 */
192 port_info->tx_desc_alloc =
193 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
194 if (!port_info->tx_desc_alloc) {
195 printf(SHETHER_NAME ": memalign failed\n");
196 ret = -ENOMEM;
197 goto err;
198 }
199
200 flush_cache_wback((u32)port_info->tx_desc_alloc, alloc_desc_size);
201
202 /* Make sure we use a P2 address (non-cacheable) */
203 port_info->tx_desc_base =
204 (struct tx_desc_s *)ADDR_TO_P2((u32)port_info->tx_desc_alloc);
205 port_info->tx_desc_cur = port_info->tx_desc_base;
206
207 /* Initialize all descriptors */
208 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
209 cur_tx_desc++, i++) {
210 cur_tx_desc->td0 = 0x00;
211 cur_tx_desc->td1 = 0x00;
212 cur_tx_desc->td2 = 0x00;
213 }
214
215 /* Mark the end of the descriptors */
216 cur_tx_desc--;
217 cur_tx_desc->td0 |= TD_TDLE;
218
219 /* Point the controller to the tx descriptor list. Must use physical
220 addresses */
221 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
222 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
223 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
224 sh_eth_write(eth, ADDR_TO_PHY(cur_tx_desc), TDFXR);
225 sh_eth_write(eth, 0x01, TDFFR);/* Last discriptor bit */
226 #endif
227
228 err:
229 return ret;
230 }
231
232 static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
233 {
234 int port = eth->port, i , ret = 0;
235 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
236 struct sh_eth_info *port_info = &eth->port_info[port];
237 struct rx_desc_s *cur_rx_desc;
238 u8 *rx_buf;
239
240 /*
241 * Allocate rx descriptors. They must be aligned to size of struct
242 * rx_desc_s.
243 */
244 port_info->rx_desc_alloc =
245 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
246 if (!port_info->rx_desc_alloc) {
247 printf(SHETHER_NAME ": memalign failed\n");
248 ret = -ENOMEM;
249 goto err;
250 }
251
252 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
253
254 /* Make sure we use a P2 address (non-cacheable) */
255 port_info->rx_desc_base =
256 (struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);
257
258 port_info->rx_desc_cur = port_info->rx_desc_base;
259
260 /*
261 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
262 * aligned and in P2 area.
263 */
264 port_info->rx_buf_alloc =
265 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
266 if (!port_info->rx_buf_alloc) {
267 printf(SHETHER_NAME ": alloc failed\n");
268 ret = -ENOMEM;
269 goto err_buf_alloc;
270 }
271
272 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);
273
274 /* Initialize all descriptors */
275 for (cur_rx_desc = port_info->rx_desc_base,
276 rx_buf = port_info->rx_buf_base, i = 0;
277 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
278 cur_rx_desc->rd0 = RD_RACT;
279 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
280 cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf);
281 }
282
283 /* Mark the end of the descriptors */
284 cur_rx_desc--;
285 cur_rx_desc->rd0 |= RD_RDLE;
286
287 /* Point the controller to the rx descriptor list */
288 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
289 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
290 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
291 sh_eth_write(eth, ADDR_TO_PHY(cur_rx_desc), RDFXR);
292 sh_eth_write(eth, RDFFR_RDLF, RDFFR);
293 #endif
294
295 return ret;
296
297 err_buf_alloc:
298 free(port_info->rx_desc_alloc);
299 port_info->rx_desc_alloc = NULL;
300
301 err:
302 return ret;
303 }
304
305 static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
306 {
307 int port = eth->port;
308 struct sh_eth_info *port_info = &eth->port_info[port];
309
310 if (port_info->tx_desc_alloc) {
311 free(port_info->tx_desc_alloc);
312 port_info->tx_desc_alloc = NULL;
313 }
314 }
315
316 static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
317 {
318 int port = eth->port;
319 struct sh_eth_info *port_info = &eth->port_info[port];
320
321 if (port_info->rx_desc_alloc) {
322 free(port_info->rx_desc_alloc);
323 port_info->rx_desc_alloc = NULL;
324 }
325
326 if (port_info->rx_buf_alloc) {
327 free(port_info->rx_buf_alloc);
328 port_info->rx_buf_alloc = NULL;
329 }
330 }
331
332 static int sh_eth_desc_init(struct sh_eth_dev *eth)
333 {
334 int ret = 0;
335
336 ret = sh_eth_tx_desc_init(eth);
337 if (ret)
338 goto err_tx_init;
339
340 ret = sh_eth_rx_desc_init(eth);
341 if (ret)
342 goto err_rx_init;
343
344 return ret;
345 err_rx_init:
346 sh_eth_tx_desc_free(eth);
347
348 err_tx_init:
349 return ret;
350 }
351
352 static int sh_eth_phy_config(struct sh_eth_dev *eth)
353 {
354 int port = eth->port, ret = 0;
355 struct sh_eth_info *port_info = &eth->port_info[port];
356 struct eth_device *dev = port_info->dev;
357 struct phy_device *phydev;
358
359 phydev = phy_connect(
360 miiphy_get_dev_by_name(dev->name),
361 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
362 port_info->phydev = phydev;
363 phy_config(phydev);
364
365 return ret;
366 }
367
368 static int sh_eth_config(struct sh_eth_dev *eth, bd_t *bd)
369 {
370 int port = eth->port, ret = 0;
371 u32 val;
372 struct sh_eth_info *port_info = &eth->port_info[port];
373 struct eth_device *dev = port_info->dev;
374 struct phy_device *phy;
375
376 /* Configure e-dmac registers */
377 sh_eth_write(eth, (sh_eth_read(eth, EDMR) & ~EMDR_DESC_R) |
378 (EMDR_DESC | EDMR_EL), EDMR);
379
380 sh_eth_write(eth, 0, EESIPR);
381 sh_eth_write(eth, 0, TRSCER);
382 sh_eth_write(eth, 0, TFTR);
383 sh_eth_write(eth, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
384 sh_eth_write(eth, RMCR_RST, RMCR);
385 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
386 sh_eth_write(eth, 0, RPADIR);
387 #endif
388 sh_eth_write(eth, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
389
390 /* Configure e-mac registers */
391 sh_eth_write(eth, 0, ECSIPR);
392
393 /* Set Mac address */
394 val = dev->enetaddr[0] << 24 | dev->enetaddr[1] << 16 |
395 dev->enetaddr[2] << 8 | dev->enetaddr[3];
396 sh_eth_write(eth, val, MAHR);
397
398 val = dev->enetaddr[4] << 8 | dev->enetaddr[5];
399 sh_eth_write(eth, val, MALR);
400
401 sh_eth_write(eth, RFLR_RFL_MIN, RFLR);
402 #if defined(SH_ETH_TYPE_GETHER)
403 sh_eth_write(eth, 0, PIPR);
404 #endif
405 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
406 sh_eth_write(eth, APR_AP, APR);
407 sh_eth_write(eth, MPR_MP, MPR);
408 sh_eth_write(eth, TPAUSER_TPAUSE, TPAUSER);
409 #endif
410
411 #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
412 sh_eth_write(eth, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
413 #elif defined(CONFIG_R8A7790) || defined(CONFIG_R8A7791) || \
414 defined(CONFIG_R8A7793) || defined(CONFIG_R8A7794)
415 sh_eth_write(eth, sh_eth_read(eth, RMIIMR) | 0x1, RMIIMR);
416 #endif
417 /* Configure phy */
418 ret = sh_eth_phy_config(eth);
419 if (ret) {
420 printf(SHETHER_NAME ": phy config timeout\n");
421 goto err_phy_cfg;
422 }
423 phy = port_info->phydev;
424 ret = phy_startup(phy);
425 if (ret) {
426 printf(SHETHER_NAME ": phy startup failure\n");
427 return ret;
428 }
429
430 val = 0;
431
432 /* Set the transfer speed */
433 if (phy->speed == 100) {
434 printf(SHETHER_NAME ": 100Base/");
435 #if defined(SH_ETH_TYPE_GETHER)
436 sh_eth_write(eth, GECMR_100B, GECMR);
437 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
438 sh_eth_write(eth, 1, RTRATE);
439 #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_R8A7790) || \
440 defined(CONFIG_R8A7791) || defined(CONFIG_R8A7793) || \
441 defined(CONFIG_R8A7794)
442 val = ECMR_RTM;
443 #endif
444 } else if (phy->speed == 10) {
445 printf(SHETHER_NAME ": 10Base/");
446 #if defined(SH_ETH_TYPE_GETHER)
447 sh_eth_write(eth, GECMR_10B, GECMR);
448 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
449 sh_eth_write(eth, 0, RTRATE);
450 #endif
451 }
452 #if defined(SH_ETH_TYPE_GETHER)
453 else if (phy->speed == 1000) {
454 printf(SHETHER_NAME ": 1000Base/");
455 sh_eth_write(eth, GECMR_1000B, GECMR);
456 }
457 #endif
458
459 /* Check if full duplex mode is supported by the phy */
460 if (phy->duplex) {
461 printf("Full\n");
462 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE|ECMR_DM),
463 ECMR);
464 } else {
465 printf("Half\n");
466 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE), ECMR);
467 }
468
469 return ret;
470
471 err_phy_cfg:
472 return ret;
473 }
474
475 static void sh_eth_start(struct sh_eth_dev *eth)
476 {
477 /*
478 * Enable the e-dmac receiver only. The transmitter will be enabled when
479 * we have something to transmit
480 */
481 sh_eth_write(eth, EDRRR_R, EDRRR);
482 }
483
484 static void sh_eth_stop(struct sh_eth_dev *eth)
485 {
486 sh_eth_write(eth, ~EDRRR_R, EDRRR);
487 }
488
489 int sh_eth_init(struct eth_device *dev, bd_t *bd)
490 {
491 int ret = 0;
492 struct sh_eth_dev *eth = dev->priv;
493
494 ret = sh_eth_reset(eth);
495 if (ret)
496 goto err;
497
498 ret = sh_eth_desc_init(eth);
499 if (ret)
500 goto err;
501
502 ret = sh_eth_config(eth, bd);
503 if (ret)
504 goto err_config;
505
506 sh_eth_start(eth);
507
508 return ret;
509
510 err_config:
511 sh_eth_tx_desc_free(eth);
512 sh_eth_rx_desc_free(eth);
513
514 err:
515 return ret;
516 }
517
518 void sh_eth_halt(struct eth_device *dev)
519 {
520 struct sh_eth_dev *eth = dev->priv;
521 sh_eth_stop(eth);
522 }
523
524 int sh_eth_initialize(bd_t *bd)
525 {
526 int ret = 0;
527 struct sh_eth_dev *eth = NULL;
528 struct eth_device *dev = NULL;
529
530 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
531 if (!eth) {
532 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
533 ret = -ENOMEM;
534 goto err;
535 }
536
537 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
538 if (!dev) {
539 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
540 ret = -ENOMEM;
541 goto err;
542 }
543 memset(dev, 0, sizeof(struct eth_device));
544 memset(eth, 0, sizeof(struct sh_eth_dev));
545
546 eth->port = CONFIG_SH_ETHER_USE_PORT;
547 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
548
549 dev->priv = (void *)eth;
550 dev->iobase = 0;
551 dev->init = sh_eth_init;
552 dev->halt = sh_eth_halt;
553 dev->send = sh_eth_send;
554 dev->recv = sh_eth_recv;
555 eth->port_info[eth->port].dev = dev;
556
557 sprintf(dev->name, SHETHER_NAME);
558
559 /* Register Device to EtherNet subsystem */
560 eth_register(dev);
561
562 bb_miiphy_buses[0].priv = eth;
563 miiphy_register(dev->name, bb_miiphy_read, bb_miiphy_write);
564
565 if (!eth_getenv_enetaddr("ethaddr", dev->enetaddr))
566 puts("Please set MAC address\n");
567
568 return ret;
569
570 err:
571 if (dev)
572 free(dev);
573
574 if (eth)
575 free(eth);
576
577 printf(SHETHER_NAME ": Failed\n");
578 return ret;
579 }
580
581 /******* for bb_miiphy *******/
582 static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
583 {
584 return 0;
585 }
586
587 static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
588 {
589 struct sh_eth_dev *eth = bus->priv;
590
591 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MMD, PIR);
592
593 return 0;
594 }
595
596 static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
597 {
598 struct sh_eth_dev *eth = bus->priv;
599
600 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MMD, PIR);
601
602 return 0;
603 }
604
605 static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
606 {
607 struct sh_eth_dev *eth = bus->priv;
608
609 if (v)
610 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDO, PIR);
611 else
612 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDO, PIR);
613
614 return 0;
615 }
616
617 static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
618 {
619 struct sh_eth_dev *eth = bus->priv;
620
621 *v = (sh_eth_read(eth, PIR) & PIR_MDI) >> 3;
622
623 return 0;
624 }
625
626 static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
627 {
628 struct sh_eth_dev *eth = bus->priv;
629
630 if (v)
631 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDC, PIR);
632 else
633 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDC, PIR);
634
635 return 0;
636 }
637
638 static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
639 {
640 udelay(10);
641
642 return 0;
643 }
644
645 struct bb_miiphy_bus bb_miiphy_buses[] = {
646 {
647 .name = "sh_eth",
648 .init = sh_eth_bb_init,
649 .mdio_active = sh_eth_bb_mdio_active,
650 .mdio_tristate = sh_eth_bb_mdio_tristate,
651 .set_mdio = sh_eth_bb_set_mdio,
652 .get_mdio = sh_eth_bb_get_mdio,
653 .set_mdc = sh_eth_bb_set_mdc,
654 .delay = sh_eth_bb_delay,
655 }
656 };
657 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);