]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/sh_eth.c
net: sh-eth: Add invalidate cache control for rmobile (ARM SoC)
[people/ms/u-boot.git] / drivers / net / sh_eth.c
1 /*
2 * sh_eth.c - Driver for Renesas ethernet controler.
3 *
4 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
5 * Copyright (c) 2008, 2011 Nobuhiro Iwamatsu
6 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
7 *
8 * SPDX-License-Identifier: GPL-2.0+
9 */
10
11 #include <config.h>
12 #include <common.h>
13 #include <malloc.h>
14 #include <net.h>
15 #include <netdev.h>
16 #include <miiphy.h>
17 #include <asm/errno.h>
18 #include <asm/io.h>
19
20 #include "sh_eth.h"
21
22 #ifndef CONFIG_SH_ETHER_USE_PORT
23 # error "Please define CONFIG_SH_ETHER_USE_PORT"
24 #endif
25 #ifndef CONFIG_SH_ETHER_PHY_ADDR
26 # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
27 #endif
28
29 #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
30 #define flush_cache_wback(addr, len) \
31 flush_dcache_range((u32)addr, (u32)(addr + len - 1))
32 #else
33 #define flush_cache_wback(...)
34 #endif
35
36 #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
37 #define invalidate_cache(addr, len) \
38 { \
39 u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
40 u32 start, end; \
41 \
42 start = (u32)addr; \
43 end = start + len; \
44 start &= ~(line_size - 1); \
45 end = ((end + line_size - 1) & ~(line_size - 1)); \
46 \
47 invalidate_dcache_range(start, end); \
48 }
49 #else
50 #define invalidate_cache(...)
51 #endif
52
53 #define TIMEOUT_CNT 1000
54
55 int sh_eth_send(struct eth_device *dev, void *packet, int len)
56 {
57 struct sh_eth_dev *eth = dev->priv;
58 int port = eth->port, ret = 0, timeout;
59 struct sh_eth_info *port_info = &eth->port_info[port];
60
61 if (!packet || len > 0xffff) {
62 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
63 ret = -EINVAL;
64 goto err;
65 }
66
67 /* packet must be a 4 byte boundary */
68 if ((int)packet & 3) {
69 printf(SHETHER_NAME ": %s: packet not 4 byte alligned\n", __func__);
70 ret = -EFAULT;
71 goto err;
72 }
73
74 /* Update tx descriptor */
75 flush_cache_wback(packet, len);
76 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
77 port_info->tx_desc_cur->td1 = len << 16;
78 /* Must preserve the end of descriptor list indication */
79 if (port_info->tx_desc_cur->td0 & TD_TDLE)
80 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
81 else
82 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
83
84 /* Restart the transmitter if disabled */
85 if (!(sh_eth_read(eth, EDTRR) & EDTRR_TRNS))
86 sh_eth_write(eth, EDTRR_TRNS, EDTRR);
87
88 /* Wait until packet is transmitted */
89 timeout = TIMEOUT_CNT;
90 do {
91 invalidate_cache(port_info->tx_desc_cur,
92 sizeof(struct tx_desc_s));
93 udelay(100);
94 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
95
96 if (timeout < 0) {
97 printf(SHETHER_NAME ": transmit timeout\n");
98 ret = -ETIMEDOUT;
99 goto err;
100 }
101
102 port_info->tx_desc_cur++;
103 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
104 port_info->tx_desc_cur = port_info->tx_desc_base;
105
106 err:
107 return ret;
108 }
109
110 int sh_eth_recv(struct eth_device *dev)
111 {
112 struct sh_eth_dev *eth = dev->priv;
113 int port = eth->port, len = 0;
114 struct sh_eth_info *port_info = &eth->port_info[port];
115 uchar *packet;
116
117 /* Check if the rx descriptor is ready */
118 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
119 if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) {
120 /* Check for errors */
121 if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) {
122 len = port_info->rx_desc_cur->rd1 & 0xffff;
123 packet = (uchar *)
124 ADDR_TO_P2(port_info->rx_desc_cur->rd2);
125 invalidate_cache(packet, len);
126 NetReceive(packet, len);
127 }
128
129 /* Make current descriptor available again */
130 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
131 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
132 else
133 port_info->rx_desc_cur->rd0 = RD_RACT;
134 /* Point to the next descriptor */
135 port_info->rx_desc_cur++;
136 if (port_info->rx_desc_cur >=
137 port_info->rx_desc_base + NUM_RX_DESC)
138 port_info->rx_desc_cur = port_info->rx_desc_base;
139 }
140
141 /* Restart the receiver if disabled */
142 if (!(sh_eth_read(eth, EDRRR) & EDRRR_R))
143 sh_eth_write(eth, EDRRR_R, EDRRR);
144
145 return len;
146 }
147
148 static int sh_eth_reset(struct sh_eth_dev *eth)
149 {
150 #if defined(SH_ETH_TYPE_GETHER)
151 int ret = 0, i;
152
153 /* Start e-dmac transmitter and receiver */
154 sh_eth_write(eth, EDSR_ENALL, EDSR);
155
156 /* Perform a software reset and wait for it to complete */
157 sh_eth_write(eth, EDMR_SRST, EDMR);
158 for (i = 0; i < TIMEOUT_CNT ; i++) {
159 if (!(sh_eth_read(eth, EDMR) & EDMR_SRST))
160 break;
161 udelay(1000);
162 }
163
164 if (i == TIMEOUT_CNT) {
165 printf(SHETHER_NAME ": Software reset timeout\n");
166 ret = -EIO;
167 }
168
169 return ret;
170 #else
171 sh_eth_write(eth, sh_eth_read(eth, EDMR) | EDMR_SRST, EDMR);
172 udelay(3000);
173 sh_eth_write(eth, sh_eth_read(eth, EDMR) & ~EDMR_SRST, EDMR);
174
175 return 0;
176 #endif
177 }
178
179 static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
180 {
181 int port = eth->port, i, ret = 0;
182 u32 tmp_addr;
183 struct sh_eth_info *port_info = &eth->port_info[port];
184 struct tx_desc_s *cur_tx_desc;
185
186 /*
187 * Allocate tx descriptors. They must be TX_DESC_SIZE bytes aligned
188 */
189 port_info->tx_desc_malloc = malloc(NUM_TX_DESC *
190 sizeof(struct tx_desc_s) +
191 TX_DESC_SIZE - 1);
192 if (!port_info->tx_desc_malloc) {
193 printf(SHETHER_NAME ": malloc failed\n");
194 ret = -ENOMEM;
195 goto err;
196 }
197
198 tmp_addr = (u32) (((int)port_info->tx_desc_malloc + TX_DESC_SIZE - 1) &
199 ~(TX_DESC_SIZE - 1));
200 flush_cache_wback(tmp_addr, NUM_TX_DESC * sizeof(struct tx_desc_s));
201 /* Make sure we use a P2 address (non-cacheable) */
202 port_info->tx_desc_base = (struct tx_desc_s *)ADDR_TO_P2(tmp_addr);
203 port_info->tx_desc_cur = port_info->tx_desc_base;
204
205 /* Initialize all descriptors */
206 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
207 cur_tx_desc++, i++) {
208 cur_tx_desc->td0 = 0x00;
209 cur_tx_desc->td1 = 0x00;
210 cur_tx_desc->td2 = 0x00;
211 }
212
213 /* Mark the end of the descriptors */
214 cur_tx_desc--;
215 cur_tx_desc->td0 |= TD_TDLE;
216
217 /* Point the controller to the tx descriptor list. Must use physical
218 addresses */
219 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
220 #if defined(SH_ETH_TYPE_GETHER)
221 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
222 sh_eth_write(eth, ADDR_TO_PHY(cur_tx_desc), TDFXR);
223 sh_eth_write(eth, 0x01, TDFFR);/* Last discriptor bit */
224 #endif
225
226 err:
227 return ret;
228 }
229
230 static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
231 {
232 int port = eth->port, i , ret = 0;
233 struct sh_eth_info *port_info = &eth->port_info[port];
234 struct rx_desc_s *cur_rx_desc;
235 u32 tmp_addr;
236 u8 *rx_buf;
237
238 /*
239 * Allocate rx descriptors. They must be RX_DESC_SIZE bytes aligned
240 */
241 port_info->rx_desc_malloc = malloc(NUM_RX_DESC *
242 sizeof(struct rx_desc_s) +
243 RX_DESC_SIZE - 1);
244 if (!port_info->rx_desc_malloc) {
245 printf(SHETHER_NAME ": malloc failed\n");
246 ret = -ENOMEM;
247 goto err;
248 }
249
250 tmp_addr = (u32) (((int)port_info->rx_desc_malloc + RX_DESC_SIZE - 1) &
251 ~(RX_DESC_SIZE - 1));
252 flush_cache_wback(tmp_addr, NUM_RX_DESC * sizeof(struct rx_desc_s));
253 /* Make sure we use a P2 address (non-cacheable) */
254 port_info->rx_desc_base = (struct rx_desc_s *)ADDR_TO_P2(tmp_addr);
255
256 port_info->rx_desc_cur = port_info->rx_desc_base;
257
258 /*
259 * Allocate rx data buffers. They must be 32 bytes aligned and in
260 * P2 area
261 */
262 port_info->rx_buf_malloc = malloc(
263 NUM_RX_DESC * MAX_BUF_SIZE + RX_BUF_ALIGNE_SIZE - 1);
264 if (!port_info->rx_buf_malloc) {
265 printf(SHETHER_NAME ": malloc failed\n");
266 ret = -ENOMEM;
267 goto err_buf_malloc;
268 }
269
270 tmp_addr = (u32)(((int)port_info->rx_buf_malloc
271 + (RX_BUF_ALIGNE_SIZE - 1)) &
272 ~(RX_BUF_ALIGNE_SIZE - 1));
273 port_info->rx_buf_base = (u8 *)ADDR_TO_P2(tmp_addr);
274
275 /* Initialize all descriptors */
276 for (cur_rx_desc = port_info->rx_desc_base,
277 rx_buf = port_info->rx_buf_base, i = 0;
278 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
279 cur_rx_desc->rd0 = RD_RACT;
280 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
281 cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf);
282 }
283
284 /* Mark the end of the descriptors */
285 cur_rx_desc--;
286 cur_rx_desc->rd0 |= RD_RDLE;
287
288 /* Point the controller to the rx descriptor list */
289 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
290 #if defined(SH_ETH_TYPE_GETHER)
291 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
292 sh_eth_write(eth, ADDR_TO_PHY(cur_rx_desc), RDFXR);
293 sh_eth_write(eth, RDFFR_RDLF, RDFFR);
294 #endif
295
296 return ret;
297
298 err_buf_malloc:
299 free(port_info->rx_desc_malloc);
300 port_info->rx_desc_malloc = NULL;
301
302 err:
303 return ret;
304 }
305
306 static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
307 {
308 int port = eth->port;
309 struct sh_eth_info *port_info = &eth->port_info[port];
310
311 if (port_info->tx_desc_malloc) {
312 free(port_info->tx_desc_malloc);
313 port_info->tx_desc_malloc = NULL;
314 }
315 }
316
317 static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
318 {
319 int port = eth->port;
320 struct sh_eth_info *port_info = &eth->port_info[port];
321
322 if (port_info->rx_desc_malloc) {
323 free(port_info->rx_desc_malloc);
324 port_info->rx_desc_malloc = NULL;
325 }
326
327 if (port_info->rx_buf_malloc) {
328 free(port_info->rx_buf_malloc);
329 port_info->rx_buf_malloc = NULL;
330 }
331 }
332
333 static int sh_eth_desc_init(struct sh_eth_dev *eth)
334 {
335 int ret = 0;
336
337 ret = sh_eth_tx_desc_init(eth);
338 if (ret)
339 goto err_tx_init;
340
341 ret = sh_eth_rx_desc_init(eth);
342 if (ret)
343 goto err_rx_init;
344
345 return ret;
346 err_rx_init:
347 sh_eth_tx_desc_free(eth);
348
349 err_tx_init:
350 return ret;
351 }
352
353 static int sh_eth_phy_config(struct sh_eth_dev *eth)
354 {
355 int port = eth->port, ret = 0;
356 struct sh_eth_info *port_info = &eth->port_info[port];
357 struct eth_device *dev = port_info->dev;
358 struct phy_device *phydev;
359
360 phydev = phy_connect(
361 miiphy_get_dev_by_name(dev->name),
362 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
363 port_info->phydev = phydev;
364 phy_config(phydev);
365
366 return ret;
367 }
368
369 static int sh_eth_config(struct sh_eth_dev *eth, bd_t *bd)
370 {
371 int port = eth->port, ret = 0;
372 u32 val;
373 struct sh_eth_info *port_info = &eth->port_info[port];
374 struct eth_device *dev = port_info->dev;
375 struct phy_device *phy;
376
377 /* Configure e-dmac registers */
378 sh_eth_write(eth, (sh_eth_read(eth, EDMR) & ~EMDR_DESC_R) |
379 (EMDR_DESC | EDMR_EL), EDMR);
380
381 sh_eth_write(eth, 0, EESIPR);
382 sh_eth_write(eth, 0, TRSCER);
383 sh_eth_write(eth, 0, TFTR);
384 sh_eth_write(eth, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
385 sh_eth_write(eth, RMCR_RST, RMCR);
386 #if defined(SH_ETH_TYPE_GETHER)
387 sh_eth_write(eth, 0, RPADIR);
388 #endif
389 sh_eth_write(eth, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
390
391 /* Configure e-mac registers */
392 sh_eth_write(eth, 0, ECSIPR);
393
394 /* Set Mac address */
395 val = dev->enetaddr[0] << 24 | dev->enetaddr[1] << 16 |
396 dev->enetaddr[2] << 8 | dev->enetaddr[3];
397 sh_eth_write(eth, val, MAHR);
398
399 val = dev->enetaddr[4] << 8 | dev->enetaddr[5];
400 sh_eth_write(eth, val, MALR);
401
402 sh_eth_write(eth, RFLR_RFL_MIN, RFLR);
403 #if defined(SH_ETH_TYPE_GETHER)
404 sh_eth_write(eth, 0, PIPR);
405 sh_eth_write(eth, APR_AP, APR);
406 sh_eth_write(eth, MPR_MP, MPR);
407 sh_eth_write(eth, TPAUSER_TPAUSE, TPAUSER);
408 #endif
409
410 #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
411 sh_eth_write(eth, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
412 #endif
413 /* Configure phy */
414 ret = sh_eth_phy_config(eth);
415 if (ret) {
416 printf(SHETHER_NAME ": phy config timeout\n");
417 goto err_phy_cfg;
418 }
419 phy = port_info->phydev;
420 ret = phy_startup(phy);
421 if (ret) {
422 printf(SHETHER_NAME ": phy startup failure\n");
423 return ret;
424 }
425
426 val = 0;
427
428 /* Set the transfer speed */
429 if (phy->speed == 100) {
430 printf(SHETHER_NAME ": 100Base/");
431 #if defined(SH_ETH_TYPE_GETHER)
432 sh_eth_write(eth, GECMR_100B, GECMR);
433 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
434 sh_eth_write(eth, 1, RTRATE);
435 #elif defined(CONFIG_CPU_SH7724)
436 val = ECMR_RTM;
437 #endif
438 } else if (phy->speed == 10) {
439 printf(SHETHER_NAME ": 10Base/");
440 #if defined(SH_ETH_TYPE_GETHER)
441 sh_eth_write(eth, GECMR_10B, GECMR);
442 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
443 sh_eth_write(eth, 0, RTRATE);
444 #endif
445 }
446 #if defined(SH_ETH_TYPE_GETHER)
447 else if (phy->speed == 1000) {
448 printf(SHETHER_NAME ": 1000Base/");
449 sh_eth_write(eth, GECMR_1000B, GECMR);
450 }
451 #endif
452
453 /* Check if full duplex mode is supported by the phy */
454 if (phy->duplex) {
455 printf("Full\n");
456 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE|ECMR_DM),
457 ECMR);
458 } else {
459 printf("Half\n");
460 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE), ECMR);
461 }
462
463 return ret;
464
465 err_phy_cfg:
466 return ret;
467 }
468
469 static void sh_eth_start(struct sh_eth_dev *eth)
470 {
471 /*
472 * Enable the e-dmac receiver only. The transmitter will be enabled when
473 * we have something to transmit
474 */
475 sh_eth_write(eth, EDRRR_R, EDRRR);
476 }
477
478 static void sh_eth_stop(struct sh_eth_dev *eth)
479 {
480 sh_eth_write(eth, ~EDRRR_R, EDRRR);
481 }
482
483 int sh_eth_init(struct eth_device *dev, bd_t *bd)
484 {
485 int ret = 0;
486 struct sh_eth_dev *eth = dev->priv;
487
488 ret = sh_eth_reset(eth);
489 if (ret)
490 goto err;
491
492 ret = sh_eth_desc_init(eth);
493 if (ret)
494 goto err;
495
496 ret = sh_eth_config(eth, bd);
497 if (ret)
498 goto err_config;
499
500 sh_eth_start(eth);
501
502 return ret;
503
504 err_config:
505 sh_eth_tx_desc_free(eth);
506 sh_eth_rx_desc_free(eth);
507
508 err:
509 return ret;
510 }
511
512 void sh_eth_halt(struct eth_device *dev)
513 {
514 struct sh_eth_dev *eth = dev->priv;
515 sh_eth_stop(eth);
516 }
517
518 int sh_eth_initialize(bd_t *bd)
519 {
520 int ret = 0;
521 struct sh_eth_dev *eth = NULL;
522 struct eth_device *dev = NULL;
523
524 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
525 if (!eth) {
526 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
527 ret = -ENOMEM;
528 goto err;
529 }
530
531 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
532 if (!dev) {
533 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
534 ret = -ENOMEM;
535 goto err;
536 }
537 memset(dev, 0, sizeof(struct eth_device));
538 memset(eth, 0, sizeof(struct sh_eth_dev));
539
540 eth->port = CONFIG_SH_ETHER_USE_PORT;
541 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
542
543 dev->priv = (void *)eth;
544 dev->iobase = 0;
545 dev->init = sh_eth_init;
546 dev->halt = sh_eth_halt;
547 dev->send = sh_eth_send;
548 dev->recv = sh_eth_recv;
549 eth->port_info[eth->port].dev = dev;
550
551 sprintf(dev->name, SHETHER_NAME);
552
553 /* Register Device to EtherNet subsystem */
554 eth_register(dev);
555
556 bb_miiphy_buses[0].priv = eth;
557 miiphy_register(dev->name, bb_miiphy_read, bb_miiphy_write);
558
559 if (!eth_getenv_enetaddr("ethaddr", dev->enetaddr))
560 puts("Please set MAC address\n");
561
562 return ret;
563
564 err:
565 if (dev)
566 free(dev);
567
568 if (eth)
569 free(eth);
570
571 printf(SHETHER_NAME ": Failed\n");
572 return ret;
573 }
574
575 /******* for bb_miiphy *******/
576 static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
577 {
578 return 0;
579 }
580
581 static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
582 {
583 struct sh_eth_dev *eth = bus->priv;
584
585 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MMD, PIR);
586
587 return 0;
588 }
589
590 static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
591 {
592 struct sh_eth_dev *eth = bus->priv;
593
594 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MMD, PIR);
595
596 return 0;
597 }
598
599 static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
600 {
601 struct sh_eth_dev *eth = bus->priv;
602
603 if (v)
604 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDO, PIR);
605 else
606 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDO, PIR);
607
608 return 0;
609 }
610
611 static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
612 {
613 struct sh_eth_dev *eth = bus->priv;
614
615 *v = (sh_eth_read(eth, PIR) & PIR_MDI) >> 3;
616
617 return 0;
618 }
619
620 static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
621 {
622 struct sh_eth_dev *eth = bus->priv;
623
624 if (v)
625 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDC, PIR);
626 else
627 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDC, PIR);
628
629 return 0;
630 }
631
632 static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
633 {
634 udelay(10);
635
636 return 0;
637 }
638
639 struct bb_miiphy_bus bb_miiphy_buses[] = {
640 {
641 .name = "sh_eth",
642 .init = sh_eth_bb_init,
643 .mdio_active = sh_eth_bb_mdio_active,
644 .mdio_tristate = sh_eth_bb_mdio_tristate,
645 .set_mdio = sh_eth_bb_set_mdio,
646 .get_mdio = sh_eth_bb_get_mdio,
647 .set_mdc = sh_eth_bb_set_mdc,
648 .delay = sh_eth_bb_delay,
649 }
650 };
651 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);