]> git.ipfire.org Git - thirdparty/u-boot.git/blame - drivers/net/cortina_ni.c
Revert "Merge patch series "arm: dts: am62-beagleplay: Fix Beagleplay Ethernet""
[thirdparty/u-boot.git] / drivers / net / cortina_ni.c
CommitLineData
febe13b4
AT
1// SPDX-License-Identifier: GPL-2.0+
2
3/*
4 * Copyright (C) 2020 Cortina Access Inc.
5 * Author: Aaron Tseng <aaron.tseng@cortina-access.com>
6 *
7 * Ethernet MAC Driver for all supported CAxxxx SoCs
8 */
9
d678a59d 10#include <common.h>
febe13b4
AT
11#include <command.h>
12#include <malloc.h>
13#include <net.h>
14#include <miiphy.h>
15#include <env.h>
16#include <linux/delay.h>
17#include <linux/bitops.h>
18#include <u-boot/crc.h>
19#include <led.h>
20
21#include "cortina_ni.h"
22
23#define HEADER_A_SIZE 8
24
25enum ca_led_state_t {
26 CA_LED_OFF = 0,
27 CA_LED_ON = 1,
28};
29
30enum ca_port_t {
31 NI_PORT_0 = 0,
32 NI_PORT_1,
33 NI_PORT_2,
34 NI_PORT_3,
35 NI_PORT_4,
36 NI_PORT_5,
37 NI_PORT_MAX,
38};
39
40static struct udevice *curr_dev;
41
42static u32 *ca_rdwrptr_adv_one(u32 *x, unsigned long base, unsigned long max)
43{
44 if (x + 1 >= (u32 *)max)
45 return (u32 *)base;
46 else
47 return (x + 1);
48}
49
50static void ca_reg_read(void *reg, u64 base, u64 offset)
51{
52 u32 *val = (u32 *)reg;
53
54 *val = readl(KSEG1_ATU_XLAT(base + offset));
55}
56
57static void ca_reg_write(void *reg, u64 base, u64 offset)
58{
59 u32 val = *(u32 *)reg;
60
61 writel(val, KSEG1_ATU_XLAT(base + offset));
62}
63
64static int ca_mdio_write_rgmii(u32 addr, u32 offset, u16 data)
65{
66 /* up to 10000 cycles*/
67 u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
68 struct PER_MDIO_ADDR_t mdio_addr;
69 struct PER_MDIO_CTRL_t mdio_ctrl;
70 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
71
72 memset(&mdio_addr, 0, sizeof(mdio_addr));
73 mdio_addr.mdio_addr = addr;
74 mdio_addr.mdio_offset = offset;
75 mdio_addr.mdio_rd_wr = __MDIO_WR_FLAG;
76 ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
77 PER_MDIO_ADDR_OFFSET);
78 ca_reg_write(&data, (u64)priv->per_mdio_base_addr,
79 PER_MDIO_WRDATA_OFFSET);
80
81 memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
82 mdio_ctrl.mdiostart = 1;
83 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
84 PER_MDIO_CTRL_OFFSET);
85
86 debug("%s: phy_addr=%d, offset=%d, data=0x%x\n",
87 __func__, addr, offset, data);
88
89 do {
90 ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
91 PER_MDIO_CTRL_OFFSET);
92 if (mdio_ctrl.mdiodone) {
93 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
94 PER_MDIO_CTRL_OFFSET);
95 return 0;
96 }
97 } while (--loop_wait);
98
99 printf("CA NI %s: PHY write timeout!!!\n", __func__);
100 return -ETIMEDOUT;
101}
102
103int ca_mdio_write(u32 addr, u32 offset, u16 data)
104{
105 u32 reg_addr, reg_val;
106 struct NI_MDIO_OPER_T mdio_oper;
107
108 /* support range: 1~31*/
109 if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
110 return -EINVAL;
111
112 /* the phy addr 5 is connect to RGMII */
113 if (addr >= 5)
114 return ca_mdio_write_rgmii(addr, offset, data);
115
116 memset(&mdio_oper, 0, sizeof(mdio_oper));
117 mdio_oper.reg_off = offset;
118 mdio_oper.phy_addr = addr;
119 mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
120 reg_val = data;
121 memcpy(&reg_addr, &mdio_oper, sizeof(reg_addr));
122 ca_reg_write(&reg_val, (u64)reg_addr, 0);
123
124 return 0;
125}
126
127static int ca_mdio_read_rgmii(u32 addr, u32 offset, u16 *data)
128{
129 u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
130 struct PER_MDIO_ADDR_t mdio_addr;
131 struct PER_MDIO_CTRL_t mdio_ctrl;
132 struct PER_MDIO_RDDATA_t read_data;
133 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
134
135 memset(&mdio_addr, 0, sizeof(mdio_addr));
136 mdio_addr.mdio_addr = addr;
137 mdio_addr.mdio_offset = offset;
138 mdio_addr.mdio_rd_wr = __MDIO_RD_FLAG;
139 ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
140 PER_MDIO_ADDR_OFFSET);
141
142 memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
143 mdio_ctrl.mdiostart = 1;
144 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
145 PER_MDIO_CTRL_OFFSET);
146
147 do {
148 ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
149 PER_MDIO_CTRL_OFFSET);
150 if (mdio_ctrl.mdiodone) {
151 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
152 PER_MDIO_CTRL_OFFSET);
153 ca_reg_read(&read_data, (u64)priv->per_mdio_base_addr,
154 PER_MDIO_RDDATA_OFFSET);
155 *data = read_data.mdio_rddata;
156 return 0;
157 }
158 } while (--loop_wait);
159
160 printf("CA NI %s: TIMEOUT!!\n", __func__);
161 return -ETIMEDOUT;
162}
163
164int ca_mdio_read(u32 addr, u32 offset, u16 *data)
165{
166 u32 reg_addr, reg_val;
167 struct NI_MDIO_OPER_T mdio_oper;
168
169 if (!data)
170 return -EINVAL;
171
172 /* support range: 1~31*/
173 if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
174 return -EINVAL;
175
176 /* the phy addr 5 is connect to RGMII */
177 if (addr >= 5)
178 return ca_mdio_read_rgmii(addr, offset, data);
179
180 memset(&mdio_oper, 0, sizeof(mdio_oper));
181 mdio_oper.reg_off = offset;
182 mdio_oper.phy_addr = addr;
183 mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
184 reg_val = *data;
185 memcpy(&reg_addr, &mdio_oper, sizeof(reg_addr));
186 ca_reg_read(&reg_val, (u64)reg_addr, 0);
187 *data = reg_val;
188 return 0;
189}
190
191int ca_miiphy_read(const char *devname, u8 addr, u8 reg, u16 *value)
192{
193 return ca_mdio_read(addr, reg, value);
194}
195
196int ca_miiphy_write(const char *devname, u8 addr, u8 reg, u16 value)
197{
198 return ca_mdio_write(addr, reg, value);
199}
200
201static int cortina_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
202{
203 u16 data;
204
205 ca_mdio_read(addr, reg, &data);
206 return data;
207}
208
209static int cortina_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
210 u16 val)
211{
212 return ca_mdio_write(addr, reg, val);
213}
214
215static void ca_ni_setup_mac_addr(void)
216{
217 u8 mac[6];
218 struct NI_HV_GLB_MAC_ADDR_CFG0_t mac_addr_cfg0;
219 struct NI_HV_GLB_MAC_ADDR_CFG1_t mac_addr_cfg1;
220 struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
221 struct NI_HV_XRAM_CPUXRAM_CFG_t cpuxram_cfg;
222 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
223
224 /* parsing ethaddr and set to NI registers. */
225 if (eth_env_get_enetaddr("ethaddr", mac)) {
226 /* The complete MAC address consists of
227 * {MAC_ADDR0_mac_addr0[0-3], MAC_ADDR1_mac_addr1[4],
228 * PT_PORT_STATIC_CFG_mac_addr6[5]}.
229 */
230 mac_addr_cfg0.mac_addr0 = (mac[0] << 24) + (mac[1] << 16) +
231 (mac[2] << 8) + mac[3];
232 ca_reg_write(&mac_addr_cfg0, (u64)priv->ni_hv_base_addr,
233 NI_HV_GLB_MAC_ADDR_CFG0_OFFSET);
234
235 memset(&mac_addr_cfg1, 0, sizeof(mac_addr_cfg1));
236 mac_addr_cfg1.mac_addr1 = mac[4];
237 ca_reg_write(&mac_addr_cfg1, (u64)priv->ni_hv_base_addr,
238 NI_HV_GLB_MAC_ADDR_CFG1_OFFSET);
239
240 ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
241 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
242 (APB0_NI_HV_PT_STRIDE * priv->active_port));
243
244 port_static_cfg.mac_addr6 = mac[5];
245 ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
246 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
247 (APB0_NI_HV_PT_STRIDE * priv->active_port));
248
249 /* received only Broadcast and Address matched packets */
250 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
251 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
252 cpuxram_cfg.xram_mgmt_promisc_mode = 0;
253 cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
254 cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
255 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
256 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
257 } else {
258 /* received all packets(promiscuous mode) */
259 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
260 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
261 cpuxram_cfg.xram_mgmt_promisc_mode = 3;
262 cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
263 cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
264 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
265 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
266 }
267}
268
269static void ca_ni_enable_tx_rx(void)
270{
271 struct NI_HV_PT_RXMAC_CFG_t rxmac_cfg;
272 struct NI_HV_PT_TXMAC_CFG_t txmac_cfg;
273 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
274
275 /* Enable TX and RX functions */
276 ca_reg_read(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
277 NI_HV_PT_RXMAC_CFG_OFFSET +
278 (APB0_NI_HV_PT_STRIDE * priv->active_port));
279 rxmac_cfg.rx_en = 1;
280 ca_reg_write(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
281 NI_HV_PT_RXMAC_CFG_OFFSET +
282 (APB0_NI_HV_PT_STRIDE * priv->active_port));
283
284 ca_reg_read(&txmac_cfg, (u64)priv->ni_hv_base_addr,
285 NI_HV_PT_TXMAC_CFG_OFFSET +
286 (APB0_NI_HV_PT_STRIDE * priv->active_port));
287 txmac_cfg.tx_en = 1;
288 ca_reg_write(&txmac_cfg, (u64)priv->ni_hv_base_addr,
289 NI_HV_PT_TXMAC_CFG_OFFSET +
290 (APB0_NI_HV_PT_STRIDE * priv->active_port));
291}
292
293#define AUTO_SCAN_TIMEOUT 3000 /* 3 seconds */
294static int ca_ni_auto_scan_active_port(struct cortina_ni_priv *priv)
295{
296 u8 i;
297 u16 data;
298 u32 start_time;
299
300 start_time = get_timer(0);
301 while (get_timer(start_time) < AUTO_SCAN_TIMEOUT) {
302 for (i = 0; i < priv->valid_port_num; i++) {
303 if (!priv->port_map[i].phy_addr)
304 continue;
305
306 ca_mdio_read(priv->port_map[i].phy_addr, 1, &data);
307 if (data & 0x04) {
308 priv->active_port = priv->port_map[i].port;
309 return 0;
310 }
311 }
312 }
313
314 printf("CA NI %s: auto scan active_port timeout.\n", __func__);
315 return -1;
316}
317
318static void ca_ni_led(int port, int status)
319{
320 char label[10];
321 struct udevice *led_dev;
322
323 if (IS_ENABLED(CONFIG_LED_CORTINA)) {
324 snprintf(label, sizeof(label), "led%d", port);
325 debug("%s: set port %d led %s.\n",
326 __func__, port, status ? "on" : "off");
327 led_get_by_label(label, &led_dev);
328 led_set_state(led_dev, status);
329 }
330}
331
332static void ca_ni_reset(void)
333{
334 int i;
335 struct NI_HV_GLB_INIT_DONE_t init_done;
336 struct NI_HV_GLB_INTF_RST_CONFIG_t intf_rst_config;
337 struct NI_HV_GLB_STATIC_CFG_t static_cfg;
338 struct GLOBAL_BLOCK_RESET_t glb_blk_reset;
339 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
340
341 /* NI global resets */
342 ca_reg_read(&glb_blk_reset, (u64)priv->glb_base_addr,
343 GLOBAL_BLOCK_RESET_OFFSET);
344 glb_blk_reset.reset_ni = 1;
345 ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
346 GLOBAL_BLOCK_RESET_OFFSET);
347 /* Remove resets */
348 glb_blk_reset.reset_ni = 0;
349 ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
350 GLOBAL_BLOCK_RESET_OFFSET);
351
352 /* check the ready bit of NI module */
353 for (i = 0; i < NI_READ_POLL_COUNT; i++) {
354 ca_reg_read(&init_done, (u64)priv->ni_hv_base_addr,
355 NI_HV_GLB_INIT_DONE_OFFSET);
356 if (init_done.ni_init_done)
357 break;
358 }
359 if (i == NI_READ_POLL_COUNT) {
360 printf("CA NI %s: NI init done not ready, init_done=0x%x!!!\n",
361 __func__, init_done.ni_init_done);
362 }
363
364 ca_reg_read(&intf_rst_config, (u64)priv->ni_hv_base_addr,
365 NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
366 switch (priv->active_port) {
367 case NI_PORT_0:
368 intf_rst_config.intf_rst_p0 = 0;
369 intf_rst_config.mac_rx_rst_p0 = 0;
370 intf_rst_config.mac_tx_rst_p0 = 0;
371 break;
372 case NI_PORT_1:
373 intf_rst_config.intf_rst_p1 = 0;
374 intf_rst_config.mac_rx_rst_p1 = 0;
375 intf_rst_config.mac_tx_rst_p1 = 0;
376 break;
377 case NI_PORT_2:
378 intf_rst_config.intf_rst_p2 = 0;
379 intf_rst_config.mac_rx_rst_p2 = 0;
380 intf_rst_config.mac_tx_rst_p2 = 0;
381 break;
382 case NI_PORT_3:
383 intf_rst_config.intf_rst_p3 = 0;
384 intf_rst_config.mac_tx_rst_p3 = 0;
385 intf_rst_config.mac_rx_rst_p3 = 0;
386 break;
387 case NI_PORT_4:
388 intf_rst_config.intf_rst_p4 = 0;
389 intf_rst_config.mac_tx_rst_p4 = 0;
390 intf_rst_config.mac_rx_rst_p4 = 0;
391 break;
392 }
393
394 ca_reg_write(&intf_rst_config, (u64)priv->ni_hv_base_addr,
395 NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
396
397 /* Only one GMAC can connect to CPU */
398 ca_reg_read(&static_cfg, (u64)priv->ni_hv_base_addr,
399 NI_HV_GLB_STATIC_CFG_OFFSET);
400 static_cfg.port_to_cpu = priv->active_port;
401 static_cfg.txmib_mode = 1;
402 static_cfg.rxmib_mode = 1;
403
404 ca_reg_write(&static_cfg, (u64)priv->ni_hv_base_addr,
405 NI_HV_GLB_STATIC_CFG_OFFSET);
406}
407
408static void ca_internal_gphy_cal(struct cortina_ni_priv *priv)
409{
410 int i, port, num;
411 u32 reg_off, value;
412
413 num = priv->gphy_num;
414 for (port = 0; port < 4; port++) {
415 for (i = 0; i < num; i++) {
416 reg_off = priv->gphy_values[i].reg_off + (port * 0x80);
417 value = priv->gphy_values[i].value;
418 ca_reg_write(&value, reg_off, 0);
419 mdelay(50);
420 }
421 }
422}
423
424static int ca_mdio_register(struct udevice *dev)
425{
426 int ret;
427 struct cortina_ni_priv *priv = dev_get_priv(dev);
428 struct mii_dev *mdio_bus = mdio_alloc();
429
430 if (!mdio_bus)
431 return -ENOMEM;
432
433 mdio_bus->read = cortina_mdio_read;
434 mdio_bus->write = cortina_mdio_write;
435 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
436
437 mdio_bus->priv = (void *)priv;
438
439 ret = mdio_register(mdio_bus);
440 if (ret)
441 return ret;
442
443 priv->mdio_bus = mdio_bus;
444 return 0;
445}
446
447static void ca_rgmii_init(struct cortina_ni_priv *priv)
448{
449 struct GLOBAL_GLOBAL_CONFIG_t glb_config;
450 struct GLOBAL_IO_DRIVE_CONTROL_t io_drive_control;
451
452 /* Generating 25Mhz reference clock for switch */
453 ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
454 GLOBAL_GLOBAL_CONFIG_OFFSET);
455 glb_config.refclk_sel = 0x01;
456 glb_config.ext_reset = 0x01;
457 ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
458 GLOBAL_GLOBAL_CONFIG_OFFSET);
459
460 mdelay(20);
461
462 /* Do external reset */
463 ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
464 GLOBAL_GLOBAL_CONFIG_OFFSET);
465 glb_config.ext_reset = 0x0;
466 ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
467 GLOBAL_GLOBAL_CONFIG_OFFSET);
468
469 ca_reg_read(&io_drive_control, (u64)priv->glb_base_addr,
470 GLOBAL_IO_DRIVE_CONTROL_OFFSET);
471 io_drive_control.gmac_mode = 2;
472 io_drive_control.gmac_dn = 1;
473 io_drive_control.gmac_dp = 1;
474 ca_reg_write(&io_drive_control, (u64)priv->glb_base_addr,
475 GLOBAL_IO_DRIVE_CONTROL_OFFSET);
476}
477
478static int ca_phy_probe(struct udevice *dev)
479{
480 int auto_scan_active_port = 0, tmp_port;
481 char *buf;
482 struct cortina_ni_priv *priv = dev_get_priv(dev);
483 struct phy_device *int_phydev, *ext_phydev;
484
485 /* Initialize internal phy device */
486 int_phydev = phy_connect(priv->mdio_bus,
487 priv->port_map[NI_PORT_3].phy_addr,
488 dev, priv->phy_interface);
489 if (int_phydev) {
490 int_phydev->supported &= PHY_GBIT_FEATURES;
491 int_phydev->advertising = int_phydev->supported;
492 phy_config(int_phydev);
493 } else {
494 printf("CA NI %s: There is no internal phy device\n", __func__);
495 }
496
497 /* Initialize external phy device */
498 ext_phydev = phy_connect(priv->mdio_bus,
499 priv->port_map[NI_PORT_4].phy_addr,
500 dev, priv->phy_interface);
501 if (ext_phydev) {
502 ext_phydev->supported &= PHY_GBIT_FEATURES;
503 ext_phydev->advertising = int_phydev->supported;
504 phy_config(ext_phydev);
505 } else {
506 printf("CA NI %s: There is no external phy device\n", __func__);
507 }
508
509 /* auto scan the first link up port as active_port */
510 buf = env_get("auto_scan_active_port");
511 if (buf != 0) {
512 auto_scan_active_port = simple_strtoul(buf, NULL, 0);
513 printf("CA NI %s: auto_scan_active_port=%d\n", __func__,
514 auto_scan_active_port);
515 }
516
517 if (auto_scan_active_port) {
518 ca_ni_auto_scan_active_port(priv);
519 } else {
520 buf = env_get("active_port");
521 if (buf != 0) {
522 tmp_port = simple_strtoul(buf, NULL, 0);
523 if (tmp_port < 0 &&
524 !(priv->valid_port_map && BIT(tmp_port))) {
525 printf("CA NI ERROR: not support this port.");
526 free(dev);
527 free(priv);
528 return 1;
529 }
530
531 priv->active_port = tmp_port;
532 }
533 }
534
535 printf("CA NI %s: active_port=%d\n", __func__, priv->active_port);
536 if (priv->active_port == NI_PORT_4)
537 priv->phydev = ext_phydev;
538 else
539 priv->phydev = int_phydev;
540
541 return 0;
542}
543
544static int cortina_eth_start(struct udevice *dev)
545{
546 int ret;
547 struct NI_HV_XRAM_CPUXRAM_ADRCFG_RX_t cpuxram_adrcfg_rx;
548 struct NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_t cpuxram_adrcfg_tx;
549 struct NI_HV_XRAM_CPUXRAM_CFG_t cpuxram_cfg;
550 struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
551 struct NI_HV_PT_PORT_GLB_CFG_t port_glb_cfg;
552 struct cortina_ni_priv *priv = dev_get_priv(dev);
553 struct phy_device *phydev = priv->phydev;
554
555 ret = phy_startup(priv->phydev);
556 if (ret) {
557 ca_ni_led(priv->active_port, CA_LED_OFF);
558 printf("CA NI Could not initialize PHY %s, active_port=%d\n",
559 priv->phydev->dev->name, priv->active_port);
560 return ret;
561 }
562
563 if (!priv->phydev->link) {
564 printf("CA NI %s: link down.\n", priv->phydev->dev->name);
565 return 0;
566 }
567
568 ca_ni_led(priv->active_port, CA_LED_ON);
569 printf("CA NI PHY ID 0x%08X %dMbps %s duplex\n",
570 phydev->phy_id, phydev->speed,
571 phydev->duplex == DUPLEX_HALF ? "half" : "full");
572
573 /* RX XRAM ADDRESS CONFIG (start and end address) */
574 memset(&cpuxram_adrcfg_rx, 0, sizeof(cpuxram_adrcfg_rx));
575 cpuxram_adrcfg_rx.rx_top_addr = RX_TOP_ADDR;
576 cpuxram_adrcfg_rx.rx_base_addr = RX_BASE_ADDR;
577 ca_reg_write(&cpuxram_adrcfg_rx, (u64)priv->ni_hv_base_addr,
578 NI_HV_XRAM_CPUXRAM_ADRCFG_RX_OFFSET);
579
580 /* TX XRAM ADDRESS CONFIG (start and end address) */
581 memset(&cpuxram_adrcfg_tx, 0, sizeof(cpuxram_adrcfg_tx));
582 cpuxram_adrcfg_tx.tx_top_addr = TX_TOP_ADDR;
583 cpuxram_adrcfg_tx.tx_base_addr = TX_BASE_ADDR;
584 ca_reg_write(&cpuxram_adrcfg_tx, (u64)priv->ni_hv_base_addr,
585 NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_OFFSET);
586
587 /*
588 * Configuration for Management Ethernet Interface:
589 * - RGMII 1000 mode or RGMII 100 mode
590 * - MAC mode
591 */
592 ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
593 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
594 (APB0_NI_HV_PT_STRIDE * priv->active_port));
595 if (phydev->speed == SPEED_1000) {
596 /* port 4 connects to RGMII PHY */
597 if (phydev->addr == 5)
598 port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_1000;
599 else
600 port_static_cfg.int_cfg = GE_MAC_INTF_GMII;
601 } else {
602 /* port 4 connects to RGMII PHY */
603 if (phydev->addr == 5)
604 port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_100;
605 else
606 port_static_cfg.int_cfg = GE_MAC_INTF_MII;
607 }
608
609 ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
610 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
611 (APB0_NI_HV_PT_STRIDE * priv->active_port));
612
613 ca_reg_read(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
614 NI_HV_PT_PORT_GLB_CFG_OFFSET +
615 (APB0_NI_HV_PT_STRIDE * priv->active_port));
616 port_glb_cfg.speed = phydev->speed == SPEED_10 ? 1 : 0;
617 port_glb_cfg.duplex = phydev->duplex == DUPLEX_HALF ? 1 : 0;
618 ca_reg_write(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
619 NI_HV_PT_PORT_GLB_CFG_OFFSET +
620 (APB0_NI_HV_PT_STRIDE * priv->active_port));
621
622 /* Need to toggle the tx and rx cpu_pkt_dis bit */
623 /* after changing Address config register. */
624 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
625 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
626 cpuxram_cfg.rx_0_cpu_pkt_dis = 1;
627 cpuxram_cfg.tx_0_cpu_pkt_dis = 1;
628 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
629 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
630
631 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
632 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
633 cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
634 cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
635 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
636 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
637
638 ca_ni_enable_tx_rx();
639
640 return 0;
641}
642
643/*********************************************
644 * Packet receive routine from Management FE
645 * Expects a previously allocated buffer and
646 * fills the length
647 * Retruns 0 on success -1 on failure
648 *******************************************/
649static int cortina_eth_recv(struct udevice *dev, int flags, uchar **packetp)
650{
651 u8 *ptr;
652 u32 next_link, pktlen = 0;
653 u32 sw_rx_rd_ptr, hw_rx_wr_ptr, *rx_xram_ptr, *data_ptr;
654 int loop, index = 0, blk_num;
655 struct cortina_ni_priv *priv = dev_get_priv(dev);
656 struct NI_HEADER_X_T header_x;
657 struct NI_PACKET_STATUS packet_status;
658 struct NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_t cpuxram_cpu_sta_rx;
659 struct NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_t cpuxram_cpu_cfg_rx;
660
661 /* get the hw write pointer */
662 memset(&cpuxram_cpu_sta_rx, 0, sizeof(cpuxram_cpu_sta_rx));
663 ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
664 NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
665 hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
666
667 /* get the sw read pointer */
668 memset(&cpuxram_cpu_cfg_rx, 0, sizeof(cpuxram_cpu_cfg_rx));
669 ca_reg_read(&cpuxram_cpu_cfg_rx, (u64)priv->ni_hv_base_addr,
670 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
671 sw_rx_rd_ptr = cpuxram_cpu_cfg_rx.pkt_rd_ptr;
672
673 debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0 = 0x%p, ", __func__,
674 priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
675 debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0 = 0x%p\n",
676 priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
677 debug("%s : RX hw_wr_ptr = %d, sw_rd_ptr = %d\n",
678 __func__, hw_rx_wr_ptr, sw_rx_rd_ptr);
679
680 while (sw_rx_rd_ptr != hw_rx_wr_ptr) {
681 /* Point to the absolute memory address of XRAM
682 * where read pointer is
683 */
684 rx_xram_ptr = (u32 *)
685 ((unsigned long)priv->ni_xram_base
686 + sw_rx_rd_ptr * 8);
687
688 /* Wrap around if required */
689 if (rx_xram_ptr >= (u32 *)(unsigned long)priv->rx_xram_end_adr)
690 rx_xram_ptr = (u32 *)
691 (unsigned long)priv->rx_xram_base_adr;
692
693 /* Checking header XR. Do not update the read pointer yet */
694 /* skip unused 32-bit in Header XR */
695 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
696 priv->rx_xram_base_adr,
697 priv->rx_xram_end_adr);
698
699 memcpy(&header_x, rx_xram_ptr, sizeof(header_x));
700 next_link = header_x.next_link;
701 /* Header XR [31:0] */
702
703 if (*rx_xram_ptr == 0xffffffff)
704 printf("CA NI %s: XRAM Error !\n", __func__);
705
706 debug("%s : RX next link 0x%x\n", __func__, next_link);
707 debug("%s : bytes_valid %x\n", __func__, header_x.bytes_valid);
708
709 if (header_x.ownership == 0) {
710 /* point to Packet status [31:0] */
711 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
712 priv->rx_xram_base_adr,
713 priv->rx_xram_end_adr);
714
715 memcpy(&packet_status, rx_xram_ptr,
df88a0e5 716 sizeof(*rx_xram_ptr));
febe13b4
AT
717 if (packet_status.valid == 0) {
718 debug("%s: Invalid Packet !!, ", __func__);
719 debug("next_link=%d\n", next_link);
720
721 /* Update the software read pointer */
722 ca_reg_write(&next_link,
723 (u64)priv->ni_hv_base_addr,
724 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
725 return 0;
726 }
727
728 if (packet_status.drop ||
729 packet_status.runt ||
730 packet_status.oversize ||
731 packet_status.jabber ||
732 packet_status.crc_error ||
733 packet_status.jumbo) {
734 debug("%s: Error Packet!!, ", __func__);
735 debug("next_link=%d\n", next_link);
736
737 /* Update the software read pointer */
738 ca_reg_write(&next_link,
739 (u64)priv->ni_hv_base_addr,
740 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
741 return 0;
742 }
743
744 /* check whether packet size is larger than 1514 */
745 if (packet_status.packet_size > 1518) {
746 debug("%s: Error Packet !! Packet size=%d, ",
747 __func__, packet_status.packet_size);
748 debug("larger than 1518, next_link=%d\n",
749 next_link);
750
751 /* Update the software read pointer */
752 ca_reg_write(&next_link,
753 (u64)priv->ni_hv_base_addr,
754 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
755 return 0;
756 }
757
758 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
759 priv->rx_xram_base_adr,
760 priv->rx_xram_end_adr);
761
762 pktlen = packet_status.packet_size;
763
764 debug("%s : rx packet length = %d\n",
765 __func__, packet_status.packet_size);
766
767 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
768 priv->rx_xram_base_adr,
769 priv->rx_xram_end_adr);
770
771 data_ptr = (u32 *)net_rx_packets[index];
772
773 /* Read out the packet */
774 /* Data is in little endian form in the XRAM */
775
776 /* Send the packet to upper layer */
777
778 debug("%s: packet data[]=", __func__);
779
780 for (loop = 0; loop <= pktlen / 4; loop++) {
781 ptr = (u8 *)rx_xram_ptr;
782 if (loop < 10)
783 debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]",
784 ptr[0], ptr[1], ptr[2], ptr[3]);
785 *data_ptr++ = *rx_xram_ptr++;
786 /* Wrap around if required */
787 if (rx_xram_ptr >= (u32 *)
788 (unsigned long)priv->rx_xram_end_adr) {
789 rx_xram_ptr = (u32 *)(unsigned long)
790 (priv->rx_xram_base_adr);
791 }
792 }
793
794 debug("\n");
795 net_process_received_packet(net_rx_packets[index],
796 pktlen);
797 if (++index >= PKTBUFSRX)
798 index = 0;
799 blk_num = net_rx_packets[index][0x2c] * 255 +
800 net_rx_packets[index][0x2d];
801 debug("%s: tftp block number=%d\n", __func__, blk_num);
802
803 /* Update the software read pointer */
804 ca_reg_write(&next_link,
805 (u64)priv->ni_hv_base_addr,
806 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
807 }
808
809 /* get the hw write pointer */
810 ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
811 NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
812 hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
813
814 /* get the sw read pointer */
815 ca_reg_read(&sw_rx_rd_ptr, (u64)priv->ni_hv_base_addr,
816 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
817 }
818 return 0;
819}
820
821static int cortina_eth_send(struct udevice *dev, void *packet, int length)
822{
823 u32 hw_tx_rd_ptr = 0, sw_tx_wr_ptr = 0;
824 u32 loop, new_pkt_len, ca_crc32;
825 u32 *tx_xram_ptr, *data_ptr;
826 u16 next_link = 0;
827 u8 *ptr, *pkt_buf_ptr, valid_bytes = 0;
828 int pad = 0;
829 static u8 pkt_buf[2048];
830 struct NI_HEADER_X_T hdr_xt;
831 struct NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_t cpuxram_cpu_cfg_tx;
832 struct cortina_ni_priv *priv = dev_get_priv(dev);
833
834 if (!packet || length > 2032)
835 return -1;
836
837 /* Get the hardware read pointer */
838 ca_reg_read(&hw_tx_rd_ptr, (u64)priv->ni_hv_base_addr,
839 NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET);
840
841 /* Get the software write pointer */
842 ca_reg_read(&sw_tx_wr_ptr, (u64)priv->ni_hv_base_addr,
843 NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
844
845 debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0=0x%p, ",
846 __func__,
847 KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
848 NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET));
849 debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0=0x%p\n",
850 KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
851 NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET));
852 debug("%s : hw_tx_rd_ptr = %d\n", __func__, hw_tx_rd_ptr);
853 debug("%s : sw_tx_wr_ptr = %d\n", __func__, sw_tx_wr_ptr);
854
855 if (hw_tx_rd_ptr != sw_tx_wr_ptr) {
856 printf("CA NI %s: Tx FIFO is not available!\n", __func__);
857 return 1;
858 }
859
860 /* a workaround on 2015/10/01
861 * the packet size+CRC should be 8-byte alignment
862 */
863 if (((length + 4) % 8) != 0)
864 length += (8 - ((length + 4) % 8));
865
866 memset(pkt_buf, 0x00, sizeof(pkt_buf));
867
868 /* add 8-byte header_A at the beginning of packet */
869 memcpy(&pkt_buf[HEADER_A_SIZE], (const void *)packet, length);
870
871 pad = 64 - (length + 4); /* if packet length < 60 */
872 pad = (pad < 0) ? 0 : pad;
873
874 debug("%s: length=%d, pad=%d\n", __func__, length, pad);
875
876 new_pkt_len = length + pad; /* new packet length */
877
878 pkt_buf_ptr = (u8 *)pkt_buf;
879
880 /* Calculate the CRC32, skip 8-byte header_A */
881 ca_crc32 = crc32(0, (u8 *)(pkt_buf_ptr + HEADER_A_SIZE), new_pkt_len);
882
883 debug("%s: crc32 is 0x%x\n", __func__, ca_crc32);
884 debug("%s: ~crc32 is 0x%x\n", __func__, ~ca_crc32);
885 debug("%s: pkt len %d\n", __func__, new_pkt_len);
886 /* should add 8-byte header_! */
887 /* CRC will re-calculated by hardware */
888 memcpy((pkt_buf_ptr + new_pkt_len + HEADER_A_SIZE),
889 (u8 *)(&ca_crc32), sizeof(ca_crc32));
890 new_pkt_len = new_pkt_len + 4; /* add CRC */
891
892 valid_bytes = new_pkt_len % 8;
893 valid_bytes = valid_bytes ? valid_bytes : 0;
894 debug("%s: valid_bytes %d\n", __func__, valid_bytes);
895
896 /* should add 8-byte headerA */
897 next_link = sw_tx_wr_ptr +
898 (new_pkt_len + 7 + HEADER_A_SIZE) / 8; /* for headr XT */
899 /* add header */
900 next_link = next_link + 1;
901 /* Wrap around if required */
902 if (next_link > priv->tx_xram_end) {
903 next_link = priv->tx_xram_start +
904 (next_link - (priv->tx_xram_end + 1));
905 }
906
907 debug("%s: TX next_link %x\n", __func__, next_link);
908 memset(&hdr_xt, 0, sizeof(hdr_xt));
909 hdr_xt.ownership = 1;
910 hdr_xt.bytes_valid = valid_bytes;
911 hdr_xt.next_link = next_link;
912
913 tx_xram_ptr = (u32 *)((unsigned long)priv->ni_xram_base
914 + sw_tx_wr_ptr * 8);
915
916 /* Wrap around if required */
917 if (tx_xram_ptr >= (u32 *)(unsigned long)priv->tx_xram_end_adr)
918 tx_xram_ptr = (u32 *)(unsigned long)priv->tx_xram_base_adr;
919
920 tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
921 priv->tx_xram_base_adr,
922 priv->tx_xram_end_adr);
923
924 memcpy(tx_xram_ptr, &hdr_xt, sizeof(*tx_xram_ptr));
925
926 tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
927 priv->tx_xram_base_adr,
928 priv->tx_xram_end_adr);
929
930 /* Now to copy the data. The first byte on the line goes first */
931 data_ptr = (u32 *)pkt_buf_ptr;
932 debug("%s: packet data[]=", __func__);
933
934 /* copy header_A to XRAM */
935 for (loop = 0; loop <= (new_pkt_len + HEADER_A_SIZE) / 4; loop++) {
936 ptr = (u8 *)data_ptr;
937 if ((loop % 4) == 0)
938 debug("\n");
939 debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]-",
940 ptr[0], ptr[1], ptr[2], ptr[3]);
941
942 *tx_xram_ptr = *data_ptr++;
943 tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
944 priv->tx_xram_base_adr,
945 priv->tx_xram_end_adr);
946 }
947 debug("\n");
948
949 /* Publish the software write pointer */
950 cpuxram_cpu_cfg_tx.pkt_wr_ptr = next_link;
951 ca_reg_write(&cpuxram_cpu_cfg_tx,
952 (u64)priv->ni_hv_base_addr,
953 NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
954
955 return 0;
956}
957
958static void cortina_eth_stop(struct udevice *netdev)
959{
960 /* Nothing to do for now. */
961}
962
963static int cortina_eth_probe(struct udevice *dev)
964{
965 int ret, reg_value;
966 struct cortina_ni_priv *priv;
967
968 priv = dev_get_priv(dev);
969 priv->rx_xram_base_adr = priv->ni_xram_base + (RX_BASE_ADDR * 8);
970 priv->rx_xram_end_adr = priv->ni_xram_base + ((RX_TOP_ADDR + 1) * 8);
971 priv->rx_xram_start = RX_BASE_ADDR;
972 priv->rx_xram_end = RX_TOP_ADDR;
973 priv->tx_xram_base_adr = priv->ni_xram_base + (TX_BASE_ADDR * 8);
974 priv->tx_xram_end_adr = priv->ni_xram_base + ((TX_TOP_ADDR + 1) * 8);
975 priv->tx_xram_start = TX_BASE_ADDR;
976 priv->tx_xram_end = TX_TOP_ADDR;
977
978 curr_dev = dev;
979 debug("%s: rx_base_addr:%x\t rx_top_addr %x\n",
980 __func__, priv->rx_xram_start, priv->rx_xram_end);
981 debug("%s: tx_base_addr:%x\t tx_top_addr %x\n",
982 __func__, priv->tx_xram_start, priv->tx_xram_end);
983 debug("%s: rx physical start address = %x end address = %x\n",
984 __func__, priv->rx_xram_base_adr, priv->rx_xram_end_adr);
985 debug("%s: tx physical start address = %x end address = %x\n",
986 __func__, priv->tx_xram_base_adr, priv->tx_xram_end_adr);
987
988 /* MDIO register */
989 ret = ca_mdio_register(dev);
990 if (ret)
991 return ret;
992
993 /* set MDIO pre-scale value */
994 ca_reg_read(&reg_value, (u64)priv->per_mdio_base_addr,
995 PER_MDIO_CFG_OFFSET);
996 reg_value = reg_value | 0x00280000;
997 ca_reg_write(&reg_value, (u64)priv->per_mdio_base_addr,
998 PER_MDIO_CFG_OFFSET);
999
1000 ca_phy_probe(dev);
1001 priv->phydev->addr = priv->port_map[priv->active_port].phy_addr;
1002
1003 ca_ni_led(priv->active_port, CA_LED_ON);
1004
1005 ca_ni_reset();
1006
1007 printf("CA NI %s: active_port=%d, phy_addr=%d\n",
1008 __func__, priv->active_port, priv->phydev->addr);
1009 printf("CA NI %s: phy_id=0x%x, phy_id & PHY_ID_MASK=0x%x\n", __func__,
1010 priv->phydev->phy_id, priv->phydev->phy_id & 0xFFFFFFF0);
1011
1012 /* parsing ethaddr and set to NI registers. */
1013 ca_ni_setup_mac_addr();
1014
1015#ifdef MIIPHY_REGISTER
1016 /* the phy_read and phy_write
1017 * should meet the proto type of miiphy_register
1018 */
1019 miiphy_register(dev->name, ca_miiphy_read, ca_miiphy_write);
1020#endif
1021
1022 if (priv->init_rgmii) {
1023 /* hardware settings for RGMII port */
1024 ca_rgmii_init(priv);
1025 }
1026
1027 if (priv->gphy_num > 0) {
1028 /* do internal gphy calibration */
1029 ca_internal_gphy_cal(priv);
1030 }
1031 return 0;
1032}
1033
1034static int ca_ni_of_to_plat(struct udevice *dev)
1035{
1036 int i, ret;
1037 struct cortina_ni_priv *priv = dev_get_priv(dev);
1038
1039 memset(priv, 0, sizeof(struct cortina_ni_priv));
1040 priv->glb_base_addr = dev_remap_addr_index(dev, 0);
1041 if (!priv->glb_base_addr)
1042 return -ENOENT;
1043 printf("CA NI %s: priv->glb_base_addr for index 0 is 0x%p\n",
1044 __func__, priv->glb_base_addr);
1045
1046 priv->per_mdio_base_addr = dev_remap_addr_index(dev, 1);
1047 if (!priv->per_mdio_base_addr)
1048 return -ENOENT;
1049 printf("CA NI %s: priv->per_mdio_base_addr for index 1 is 0x%p\n",
1050 __func__, priv->per_mdio_base_addr);
1051
1052 priv->ni_hv_base_addr = dev_remap_addr_index(dev, 2);
1053 if (!priv->ni_hv_base_addr)
1054 return -ENOENT;
1055 printf("CA NI %s: priv->ni_hv_base_addr for index 2 is 0x%p\n",
1056 __func__, priv->ni_hv_base_addr);
1057
1058 priv->valid_port_map = dev_read_u32_default(dev, "valid-port-map", 1);
1059 priv->valid_port_num = dev_read_u32_default(dev, "valid-port-num", 1);
1060
1061 for (i = 0; i < priv->valid_port_num; i++) {
1062 ret = dev_read_u32_index(dev, "valid-ports", i * 2,
1063 &priv->port_map[i].phy_addr);
1064 ret = dev_read_u32_index(dev, "valid-ports", (i * 2) + 1,
1065 &priv->port_map[i].port);
1066 }
1067
1068 priv->gphy_num = dev_read_u32_default(dev, "inter-gphy-num", 1);
1069 for (i = 0; i < priv->gphy_num; i++) {
1070 ret = dev_read_u32_index(dev, "inter-gphy-val", i * 2,
1071 &priv->gphy_values[i].reg_off);
1072 ret = dev_read_u32_index(dev, "inter-gphy-val", (i * 2) + 1,
1073 &priv->gphy_values[i].value);
1074 }
1075
1076 priv->active_port = dev_read_u32_default(dev, "def-active-port", 1);
1077 priv->init_rgmii = dev_read_u32_default(dev, "init-rgmii", 1);
1078 priv->ni_xram_base = dev_read_u32_default(dev, "ni-xram-base", 1);
1079 return 0;
1080}
1081
1082static const struct eth_ops cortina_eth_ops = {
1083 .start = cortina_eth_start,
1084 .send = cortina_eth_send,
1085 .recv = cortina_eth_recv,
1086 .stop = cortina_eth_stop,
1087};
1088
1089static const struct udevice_id cortina_eth_ids[] = {
1090 { .compatible = "eth_cortina" },
1091 { }
1092};
1093
1094U_BOOT_DRIVER(eth_cortina) = {
1095 .name = "eth_cortina",
1096 .id = UCLASS_ETH,
1097 .of_match = cortina_eth_ids,
1098 .probe = cortina_eth_probe,
1099 .ops = &cortina_eth_ops,
1100 .priv_auto = sizeof(struct cortina_ni_priv),
1101 .plat_auto = sizeof(struct eth_pdata),
1102 .of_to_plat = ca_ni_of_to_plat,
1103};