]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/net/mt7628-eth.c
common: Drop net.h from common header
[thirdparty/u-boot.git] / drivers / net / mt7628-eth.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * MediaTek ethernet IP driver for U-Boot
4 *
5 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6 *
7 * This code is mostly based on the code extracted from this MediaTek
8 * github repository:
9 *
10 * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11 *
12 * I was not able to find a specific license or other developers
13 * copyrights here, so I can't add them here.
14 */
15
16 #include <common.h>
17 #include <cpu_func.h>
18 #include <dm.h>
19 #include <malloc.h>
20 #include <miiphy.h>
21 #include <net.h>
22 #include <reset.h>
23 #include <wait_bit.h>
24 #include <asm/cache.h>
25 #include <asm/io.h>
26 #include <linux/bitfield.h>
27 #include <linux/err.h>
28
29 /* Ethernet frame engine register */
30 #define PDMA_RELATED 0x0800
31
32 #define TX_BASE_PTR0 (PDMA_RELATED + 0x000)
33 #define TX_MAX_CNT0 (PDMA_RELATED + 0x004)
34 #define TX_CTX_IDX0 (PDMA_RELATED + 0x008)
35 #define TX_DTX_IDX0 (PDMA_RELATED + 0x00c)
36
37 #define RX_BASE_PTR0 (PDMA_RELATED + 0x100)
38 #define RX_MAX_CNT0 (PDMA_RELATED + 0x104)
39 #define RX_CALC_IDX0 (PDMA_RELATED + 0x108)
40
41 #define PDMA_GLO_CFG (PDMA_RELATED + 0x204)
42 #define PDMA_RST_IDX (PDMA_RELATED + 0x208)
43 #define DLY_INT_CFG (PDMA_RELATED + 0x20c)
44
45 #define SDM_RELATED 0x0c00
46
47 #define SDM_MAC_ADRL (SDM_RELATED + 0x0c) /* MAC address LSB */
48 #define SDM_MAC_ADRH (SDM_RELATED + 0x10) /* MAC Address MSB */
49
50 #define RST_DTX_IDX0 BIT(0)
51 #define RST_DRX_IDX0 BIT(16)
52
53 #define TX_DMA_EN BIT(0)
54 #define TX_DMA_BUSY BIT(1)
55 #define RX_DMA_EN BIT(2)
56 #define RX_DMA_BUSY BIT(3)
57 #define TX_WB_DDONE BIT(6)
58
59 /* Ethernet switch register */
60 #define MT7628_SWITCH_FCT0 0x0008
61 #define MT7628_SWITCH_PFC1 0x0014
62 #define MT7628_SWITCH_PVIDC0 0x0040
63 #define MT7628_SWITCH_PVIDC1 0x0044
64 #define MT7628_SWITCH_PVIDC2 0x0048
65 #define MT7628_SWITCH_PVIDC3 0x004c
66 #define MT7628_SWITCH_VMSC0 0x0070
67 #define MT7628_SWITCH_FPA 0x0084
68 #define MT7628_SWITCH_SOCPC 0x008c
69 #define MT7628_SWITCH_POC0 0x0090
70 #define MT7628_SWITCH_POC2 0x0098
71 #define MT7628_SWITCH_SGC 0x009c
72 #define MT7628_SWITCH_PCR0 0x00c0
73 #define PCR0_PHY_ADDR GENMASK(4, 0)
74 #define PCR0_PHY_REG GENMASK(12, 8)
75 #define PCR0_WT_PHY_CMD BIT(13)
76 #define PCR0_RD_PHY_CMD BIT(14)
77 #define PCR0_WT_DATA GENMASK(31, 16)
78
79 #define MT7628_SWITCH_PCR1 0x00c4
80 #define PCR1_WT_DONE BIT(0)
81 #define PCR1_RD_RDY BIT(1)
82 #define PCR1_RD_DATA GENMASK(31, 16)
83
84 #define MT7628_SWITCH_FPA1 0x00c8
85 #define MT7628_SWITCH_FCT2 0x00cc
86 #define MT7628_SWITCH_SGC2 0x00e4
87 #define MT7628_SWITCH_BMU_CTRL 0x0110
88
89 /* rxd2 */
90 #define RX_DMA_DONE BIT(31)
91 #define RX_DMA_LSO BIT(30)
92 #define RX_DMA_PLEN0 GENMASK(29, 16)
93 #define RX_DMA_TAG BIT(15)
94
95 struct fe_rx_dma {
96 unsigned int rxd1;
97 unsigned int rxd2;
98 unsigned int rxd3;
99 unsigned int rxd4;
100 } __packed __aligned(4);
101
102 #define TX_DMA_PLEN0 GENMASK(29, 16)
103 #define TX_DMA_LS1 BIT(14)
104 #define TX_DMA_LS0 BIT(30)
105 #define TX_DMA_DONE BIT(31)
106
107 #define TX_DMA_INS_VLAN_MT7621 BIT(16)
108 #define TX_DMA_INS_VLAN BIT(7)
109 #define TX_DMA_INS_PPPOE BIT(12)
110 #define TX_DMA_PN GENMASK(26, 24)
111
112 struct fe_tx_dma {
113 unsigned int txd1;
114 unsigned int txd2;
115 unsigned int txd3;
116 unsigned int txd4;
117 } __packed __aligned(4);
118
119 #define NUM_RX_DESC 256
120 #define NUM_TX_DESC 4
121 #define NUM_PHYS 5
122
123 #define PADDING_LENGTH 60
124
125 #define MTK_QDMA_PAGE_SIZE 2048
126
127 #define CONFIG_MDIO_TIMEOUT 100
128 #define CONFIG_DMA_STOP_TIMEOUT 100
129 #define CONFIG_TX_DMA_TIMEOUT 100
130
131 struct mt7628_eth_dev {
132 void __iomem *base; /* frame engine base address */
133 void __iomem *eth_sw_base; /* switch base address */
134
135 struct mii_dev *bus;
136
137 struct fe_tx_dma *tx_ring;
138 struct fe_rx_dma *rx_ring;
139
140 u8 *rx_buf[NUM_RX_DESC];
141
142 /* Point to the next RXD DMA wants to use in RXD Ring0 */
143 int rx_dma_idx;
144 /* Point to the next TXD in TXD Ring0 CPU wants to use */
145 int tx_dma_idx;
146
147 struct reset_ctl rst_ephy;
148
149 struct phy_device *phy;
150
151 int wan_port;
152 };
153
154 static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length);
155
156 static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
157 {
158 void __iomem *base = priv->eth_sw_base;
159 int ret;
160
161 ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
162 CONFIG_MDIO_TIMEOUT, false);
163 if (ret) {
164 printf("MDIO operation timeout!\n");
165 return -ETIMEDOUT;
166 }
167
168 return 0;
169 }
170
171 static int mii_mgr_read(struct mt7628_eth_dev *priv,
172 u32 phy_addr, u32 phy_register, u32 *read_data)
173 {
174 void __iomem *base = priv->eth_sw_base;
175 u32 status = 0;
176 u32 ret;
177
178 *read_data = 0xffff;
179 /* Make sure previous read operation is complete */
180 ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
181 if (ret)
182 return ret;
183
184 writel(PCR0_RD_PHY_CMD |
185 FIELD_PREP(PCR0_PHY_REG, phy_register) |
186 FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
187 base + MT7628_SWITCH_PCR0);
188
189 /* Make sure previous read operation is complete */
190 ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
191 if (ret)
192 return ret;
193
194 status = readl(base + MT7628_SWITCH_PCR1);
195 *read_data = FIELD_GET(PCR1_RD_DATA, status);
196
197 return 0;
198 }
199
200 static int mii_mgr_write(struct mt7628_eth_dev *priv,
201 u32 phy_addr, u32 phy_register, u32 write_data)
202 {
203 void __iomem *base = priv->eth_sw_base;
204 u32 data;
205 int ret;
206
207 /* Make sure previous write operation is complete */
208 ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
209 if (ret)
210 return ret;
211
212 data = FIELD_PREP(PCR0_WT_DATA, write_data) |
213 FIELD_PREP(PCR0_PHY_REG, phy_register) |
214 FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
215 PCR0_WT_PHY_CMD;
216 writel(data, base + MT7628_SWITCH_PCR0);
217
218 return mdio_wait_read(priv, PCR1_WT_DONE, true);
219 }
220
221 static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
222 {
223 u32 val;
224 int ret;
225
226 ret = mii_mgr_read(bus->priv, addr, reg, &val);
227 if (ret)
228 return ret;
229
230 return val;
231 }
232
233 static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
234 u16 value)
235 {
236 return mii_mgr_write(bus->priv, addr, reg, value);
237 }
238
239 static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
240 {
241 int i;
242
243 mii_mgr_write(priv, 0, 31, 0x2000); /* change G2 page */
244 mii_mgr_write(priv, 0, 26, 0x0000);
245
246 for (i = 0; i < 5; i++) {
247 mii_mgr_write(priv, i, 31, 0x8000); /* change L0 page */
248 mii_mgr_write(priv, i, 0, 0x3100);
249
250 /* EEE disable */
251 mii_mgr_write(priv, i, 30, 0xa000);
252 mii_mgr_write(priv, i, 31, 0xa000); /* change L2 page */
253 mii_mgr_write(priv, i, 16, 0x0606);
254 mii_mgr_write(priv, i, 23, 0x0f0e);
255 mii_mgr_write(priv, i, 24, 0x1610);
256 mii_mgr_write(priv, i, 30, 0x1f15);
257 mii_mgr_write(priv, i, 28, 0x6111);
258 }
259
260 /* 100Base AOI setting */
261 mii_mgr_write(priv, 0, 31, 0x5000); /* change G5 page */
262 mii_mgr_write(priv, 0, 19, 0x004a);
263 mii_mgr_write(priv, 0, 20, 0x015a);
264 mii_mgr_write(priv, 0, 21, 0x00ee);
265 mii_mgr_write(priv, 0, 22, 0x0033);
266 mii_mgr_write(priv, 0, 23, 0x020a);
267 mii_mgr_write(priv, 0, 24, 0x0000);
268 mii_mgr_write(priv, 0, 25, 0x024a);
269 mii_mgr_write(priv, 0, 26, 0x035a);
270 mii_mgr_write(priv, 0, 27, 0x02ee);
271 mii_mgr_write(priv, 0, 28, 0x0233);
272 mii_mgr_write(priv, 0, 29, 0x000a);
273 mii_mgr_write(priv, 0, 30, 0x0000);
274
275 /* Fix EPHY idle state abnormal behavior */
276 mii_mgr_write(priv, 0, 31, 0x4000); /* change G4 page */
277 mii_mgr_write(priv, 0, 29, 0x000d);
278 mii_mgr_write(priv, 0, 30, 0x0500);
279 }
280
281 static void rt305x_esw_init(struct mt7628_eth_dev *priv)
282 {
283 void __iomem *base = priv->eth_sw_base;
284 void __iomem *reg;
285 u32 val = 0, pvid;
286 int i;
287
288 /*
289 * FC_RLS_TH=200, FC_SET_TH=160
290 * DROP_RLS=120, DROP_SET_TH=80
291 */
292 writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
293 writel(0x00000000, base + MT7628_SWITCH_SGC2);
294 writel(0x00405555, base + MT7628_SWITCH_PFC1);
295 writel(0x00007f7f, base + MT7628_SWITCH_POC0);
296 writel(0x00007f7f, base + MT7628_SWITCH_POC2); /* disable VLAN */
297 writel(0x0002500c, base + MT7628_SWITCH_FCT2);
298 /* hashing algorithm=XOR48, aging interval=300sec */
299 writel(0x0008a301, base + MT7628_SWITCH_SGC);
300 writel(0x02404040, base + MT7628_SWITCH_SOCPC);
301
302 /* Ext PHY Addr=0x1f */
303 writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
304 writel(0x00000000, base + MT7628_SWITCH_FPA);
305 /* 1us cycle number=125 (FE's clock=125Mhz) */
306 writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
307
308 /* LAN/WAN partition, WAN port will be unusable in u-boot network */
309 if (priv->wan_port >= 0 && priv->wan_port < 6) {
310 for (i = 0; i < 8; i++) {
311 pvid = i == priv->wan_port ? 2 : 1;
312 reg = base + MT7628_SWITCH_PVIDC0 + (i / 2) * 4;
313 if (i % 2 == 0) {
314 val = pvid;
315 } else {
316 val |= (pvid << 12);
317 writel(val, reg);
318 }
319 }
320
321 val = 0xffff407f;
322 val |= 1 << (8 + priv->wan_port);
323 val &= ~(1 << priv->wan_port);
324 writel(val, base + MT7628_SWITCH_VMSC0);
325 }
326
327 /* Reset PHY */
328 reset_assert(&priv->rst_ephy);
329 reset_deassert(&priv->rst_ephy);
330 mdelay(10);
331
332 mt7628_ephy_init(priv);
333 }
334
335 static void eth_dma_start(struct mt7628_eth_dev *priv)
336 {
337 void __iomem *base = priv->base;
338
339 setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
340 }
341
342 static void eth_dma_stop(struct mt7628_eth_dev *priv)
343 {
344 void __iomem *base = priv->base;
345 int ret;
346
347 clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
348
349 /* Wait for DMA to stop */
350 ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
351 RX_DMA_BUSY | TX_DMA_BUSY, false,
352 CONFIG_DMA_STOP_TIMEOUT, false);
353 if (ret)
354 printf("DMA stop timeout error!\n");
355 }
356
357 static int mt7628_eth_write_hwaddr(struct udevice *dev)
358 {
359 struct mt7628_eth_dev *priv = dev_get_priv(dev);
360 void __iomem *base = priv->base;
361 u8 *addr = ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr;
362 u32 val;
363
364 /* Set MAC address. */
365 val = addr[0];
366 val = (val << 8) | addr[1];
367 writel(val, base + SDM_MAC_ADRH);
368
369 val = addr[2];
370 val = (val << 8) | addr[3];
371 val = (val << 8) | addr[4];
372 val = (val << 8) | addr[5];
373 writel(val, base + SDM_MAC_ADRL);
374
375 return 0;
376 }
377
378 static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
379 {
380 struct mt7628_eth_dev *priv = dev_get_priv(dev);
381 void __iomem *base = priv->base;
382 int ret;
383 int idx;
384 int i;
385
386 idx = priv->tx_dma_idx;
387
388 /* Pad message to a minimum length */
389 if (length < PADDING_LENGTH) {
390 char *p = (char *)packet;
391
392 for (i = 0; i < PADDING_LENGTH - length; i++)
393 p[length + i] = 0;
394 length = PADDING_LENGTH;
395 }
396
397 /* Check if buffer is ready for next TX DMA */
398 ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
399 CONFIG_TX_DMA_TIMEOUT, false);
400 if (ret) {
401 printf("TX: DMA still busy on buffer %d\n", idx);
402 return ret;
403 }
404
405 flush_dcache_range((u32)packet, (u32)packet + length);
406
407 priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
408 priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
409 priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
410 priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
411
412 idx = (idx + 1) % NUM_TX_DESC;
413
414 /* Make sure the writes executed at this place */
415 wmb();
416 writel(idx, base + TX_CTX_IDX0);
417
418 priv->tx_dma_idx = idx;
419
420 return 0;
421 }
422
423 static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
424 {
425 struct mt7628_eth_dev *priv = dev_get_priv(dev);
426 u32 rxd_info;
427 int length;
428 int idx;
429
430 idx = priv->rx_dma_idx;
431
432 rxd_info = priv->rx_ring[idx].rxd2;
433 if ((rxd_info & RX_DMA_DONE) == 0)
434 return -EAGAIN;
435
436 length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
437 if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
438 printf("%s: invalid length (%d bytes)\n", __func__, length);
439 mt7628_eth_free_pkt(dev, NULL, 0);
440 return -EIO;
441 }
442
443 *packetp = priv->rx_buf[idx];
444 invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
445
446 priv->rx_ring[idx].rxd4 = 0;
447 priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
448
449 /* Make sure the writes executed at this place */
450 wmb();
451
452 return length;
453 }
454
455 static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
456 {
457 struct mt7628_eth_dev *priv = dev_get_priv(dev);
458 void __iomem *base = priv->base;
459 int idx;
460
461 idx = priv->rx_dma_idx;
462
463 /* Move point to next RXD which wants to alloc */
464 writel(idx, base + RX_CALC_IDX0);
465
466 /* Update to Next packet point that was received */
467 idx = (idx + 1) % NUM_RX_DESC;
468
469 priv->rx_dma_idx = idx;
470
471 return 0;
472 }
473
474 static int mt7628_eth_start(struct udevice *dev)
475 {
476 struct mt7628_eth_dev *priv = dev_get_priv(dev);
477 void __iomem *base = priv->base;
478 uchar packet[MTK_QDMA_PAGE_SIZE];
479 uchar *packetp;
480 int ret;
481 int i;
482
483 for (i = 0; i < NUM_RX_DESC; i++) {
484 memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
485 priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
486 priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
487 }
488
489 for (i = 0; i < NUM_TX_DESC; i++) {
490 memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
491 priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
492 priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
493 }
494
495 priv->rx_dma_idx = 0;
496 priv->tx_dma_idx = 0;
497
498 /* Make sure the writes executed at this place */
499 wmb();
500
501 /* disable delay interrupt */
502 writel(0, base + DLY_INT_CFG);
503
504 clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
505
506 /* Tell the adapter where the TX/RX rings are located. */
507 writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
508 writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
509
510 writel(NUM_RX_DESC, base + RX_MAX_CNT0);
511 writel(NUM_TX_DESC, base + TX_MAX_CNT0);
512
513 writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
514 writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
515
516 writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
517 writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
518
519 /* Make sure the writes executed at this place */
520 wmb();
521 eth_dma_start(priv);
522
523 if (priv->phy) {
524 ret = phy_startup(priv->phy);
525 if (ret)
526 return ret;
527
528 if (!priv->phy->link)
529 return -EAGAIN;
530 }
531
532 /*
533 * The integrated switch seems to queue some received ethernet
534 * packets in some FIFO. Lets read the already queued packets
535 * out by using the receive routine, so that these old messages
536 * are dropped before the new xfer starts.
537 */
538 packetp = &packet[0];
539 while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
540 mt7628_eth_free_pkt(dev, packetp, 0);
541
542 return 0;
543 }
544
545 static void mt7628_eth_stop(struct udevice *dev)
546 {
547 struct mt7628_eth_dev *priv = dev_get_priv(dev);
548
549 eth_dma_stop(priv);
550 }
551
552 static int mt7628_eth_probe(struct udevice *dev)
553 {
554 struct mt7628_eth_dev *priv = dev_get_priv(dev);
555 struct mii_dev *bus;
556 int poll_link_phy;
557 int ret;
558 int i;
559
560 /* Save frame-engine base address for later use */
561 priv->base = dev_remap_addr_index(dev, 0);
562 if (IS_ERR(priv->base))
563 return PTR_ERR(priv->base);
564
565 /* Save switch base address for later use */
566 priv->eth_sw_base = dev_remap_addr_index(dev, 1);
567 if (IS_ERR(priv->eth_sw_base))
568 return PTR_ERR(priv->eth_sw_base);
569
570 /* Reset controller */
571 ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
572 if (ret) {
573 pr_err("unable to find reset controller for ethernet PHYs\n");
574 return ret;
575 }
576
577 /* WAN port will be isolated from LAN ports */
578 priv->wan_port = dev_read_u32_default(dev, "mediatek,wan-port", -1);
579
580 /* Put rx and tx rings into KSEG1 area (uncached) */
581 priv->tx_ring = (struct fe_tx_dma *)
582 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
583 sizeof(*priv->tx_ring) * NUM_TX_DESC));
584 priv->rx_ring = (struct fe_rx_dma *)
585 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
586 sizeof(*priv->rx_ring) * NUM_RX_DESC));
587
588 for (i = 0; i < NUM_RX_DESC; i++)
589 priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
590
591 bus = mdio_alloc();
592 if (!bus) {
593 printf("Failed to allocate MDIO bus\n");
594 return -ENOMEM;
595 }
596
597 bus->read = mt7628_mdio_read;
598 bus->write = mt7628_mdio_write;
599 snprintf(bus->name, sizeof(bus->name), dev->name);
600 bus->priv = (void *)priv;
601
602 ret = mdio_register(bus);
603 if (ret)
604 return ret;
605
606 poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
607 if (poll_link_phy >= 0) {
608 if (poll_link_phy >= NUM_PHYS) {
609 pr_err("invalid phy %d for poll-link-phy\n",
610 poll_link_phy);
611 return ret;
612 }
613
614 priv->phy = phy_connect(bus, poll_link_phy, dev,
615 PHY_INTERFACE_MODE_MII);
616 if (!priv->phy) {
617 pr_err("failed to probe phy %d\n", poll_link_phy);
618 return -ENODEV;
619 }
620
621 priv->phy->advertising = priv->phy->supported;
622 phy_config(priv->phy);
623 }
624
625 /* Switch configuration */
626 rt305x_esw_init(priv);
627
628 return 0;
629 }
630
631 static const struct eth_ops mt7628_eth_ops = {
632 .start = mt7628_eth_start,
633 .send = mt7628_eth_send,
634 .recv = mt7628_eth_recv,
635 .free_pkt = mt7628_eth_free_pkt,
636 .stop = mt7628_eth_stop,
637 .write_hwaddr = mt7628_eth_write_hwaddr,
638 };
639
640 static const struct udevice_id mt7628_eth_ids[] = {
641 { .compatible = "mediatek,mt7628-eth" },
642 { }
643 };
644
645 U_BOOT_DRIVER(mt7628_eth) = {
646 .name = "mt7628_eth",
647 .id = UCLASS_ETH,
648 .of_match = mt7628_eth_ids,
649 .probe = mt7628_eth_probe,
650 .ops = &mt7628_eth_ops,
651 .priv_auto_alloc_size = sizeof(struct mt7628_eth_dev),
652 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
653 };