]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/zynq_gem.c
net: gem: Check if priv->phydev is valid
[people/ms/u-boot.git] / drivers / net / zynq_gem.c
1 /*
2 * (C) Copyright 2011 Michal Simek
3 *
4 * Michal SIMEK <monstr@monstr.eu>
5 *
6 * Based on Xilinx gmac driver:
7 * (C) Copyright 2011 Xilinx
8 *
9 * SPDX-License-Identifier: GPL-2.0+
10 */
11
12 #include <common.h>
13 #include <net.h>
14 #include <netdev.h>
15 #include <config.h>
16 #include <fdtdec.h>
17 #include <libfdt.h>
18 #include <malloc.h>
19 #include <asm/io.h>
20 #include <phy.h>
21 #include <miiphy.h>
22 #include <watchdog.h>
23 #include <asm/system.h>
24 #include <asm/arch/hardware.h>
25 #include <asm/arch/sys_proto.h>
26 #include <asm-generic/errno.h>
27
28 #if !defined(CONFIG_PHYLIB)
29 # error XILINX_GEM_ETHERNET requires PHYLIB
30 #endif
31
32 /* Bit/mask specification */
33 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
34 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
35 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
36 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
37 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
38
39 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
40 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
41 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
42
43 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
44 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
45 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
46
47 /* Wrap bit, last descriptor */
48 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
49 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
50 #define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
51
52 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
53 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
54 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
55 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
56
57 #define ZYNQ_GEM_NWCFG_SPEED100 0x000000001 /* 100 Mbps operation */
58 #define ZYNQ_GEM_NWCFG_SPEED1000 0x000000400 /* 1Gbps operation */
59 #define ZYNQ_GEM_NWCFG_FDEN 0x000000002 /* Full Duplex mode */
60 #define ZYNQ_GEM_NWCFG_FSREM 0x000020000 /* FCS removal */
61 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x0000c0000 /* Div pclk by 48, max 120MHz */
62
63 #ifdef CONFIG_ARM64
64 # define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
65 #else
66 # define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
67 #endif
68
69 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
70 ZYNQ_GEM_NWCFG_FDEN | \
71 ZYNQ_GEM_NWCFG_FSREM | \
72 ZYNQ_GEM_NWCFG_MDCCLKDIV)
73
74 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
75
76 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
77 /* Use full configured addressable space (8 Kb) */
78 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
79 /* Use full configured addressable space (4 Kb) */
80 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
81 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
82 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000
83
84 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
85 ZYNQ_GEM_DMACR_RXSIZE | \
86 ZYNQ_GEM_DMACR_TXSIZE | \
87 ZYNQ_GEM_DMACR_RXBUF)
88
89 #define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
90
91 /* Use MII register 1 (MII status register) to detect PHY */
92 #define PHY_DETECT_REG 1
93
94 /* Mask used to verify certain PHY features (or register contents)
95 * in the register above:
96 * 0x1000: 10Mbps full duplex support
97 * 0x0800: 10Mbps half duplex support
98 * 0x0008: Auto-negotiation support
99 */
100 #define PHY_DETECT_MASK 0x1808
101
102 /* TX BD status masks */
103 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
104 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
105 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
106
107 /* Clock frequencies for different speeds */
108 #define ZYNQ_GEM_FREQUENCY_10 2500000UL
109 #define ZYNQ_GEM_FREQUENCY_100 25000000UL
110 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL
111
112 /* Device registers */
113 struct zynq_gem_regs {
114 u32 nwctrl; /* 0x0 - Network Control reg */
115 u32 nwcfg; /* 0x4 - Network Config reg */
116 u32 nwsr; /* 0x8 - Network Status reg */
117 u32 reserved1;
118 u32 dmacr; /* 0x10 - DMA Control reg */
119 u32 txsr; /* 0x14 - TX Status reg */
120 u32 rxqbase; /* 0x18 - RX Q Base address reg */
121 u32 txqbase; /* 0x1c - TX Q Base address reg */
122 u32 rxsr; /* 0x20 - RX Status reg */
123 u32 reserved2[2];
124 u32 idr; /* 0x2c - Interrupt Disable reg */
125 u32 reserved3;
126 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
127 u32 reserved4[18];
128 u32 hashl; /* 0x80 - Hash Low address reg */
129 u32 hashh; /* 0x84 - Hash High address reg */
130 #define LADDR_LOW 0
131 #define LADDR_HIGH 1
132 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
133 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
134 u32 reserved6[18];
135 #define STAT_SIZE 44
136 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
137 u32 reserved7[164];
138 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
139 u32 reserved8[15];
140 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
141 };
142
143 /* BD descriptors */
144 struct emac_bd {
145 u32 addr; /* Next descriptor pointer */
146 u32 status;
147 };
148
149 #define RX_BUF 32
150 /* Page table entries are set to 1MB, or multiples of 1MB
151 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
152 */
153 #define BD_SPACE 0x100000
154 /* BD separation space */
155 #define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
156
157 /* Setup the first free TX descriptor */
158 #define TX_FREE_DESC 2
159
160 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
161 struct zynq_gem_priv {
162 struct emac_bd *tx_bd;
163 struct emac_bd *rx_bd;
164 char *rxbuffers;
165 u32 rxbd_current;
166 u32 rx_first_buf;
167 int phyaddr;
168 u32 emio;
169 int init;
170 struct zynq_gem_regs *iobase;
171 phy_interface_t interface;
172 struct phy_device *phydev;
173 struct mii_dev *bus;
174 };
175
176 static inline int mdio_wait(struct zynq_gem_regs *regs)
177 {
178 u32 timeout = 20000;
179
180 /* Wait till MDIO interface is ready to accept a new transaction. */
181 while (--timeout) {
182 if (readl(&regs->nwsr) & ZYNQ_GEM_NWSR_MDIOIDLE_MASK)
183 break;
184 WATCHDOG_RESET();
185 }
186
187 if (!timeout) {
188 printf("%s: Timeout\n", __func__);
189 return 1;
190 }
191
192 return 0;
193 }
194
195 static u32 phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
196 u32 op, u16 *data)
197 {
198 u32 mgtcr;
199 struct zynq_gem_regs *regs = priv->iobase;
200
201 if (mdio_wait(regs))
202 return 1;
203
204 /* Construct mgtcr mask for the operation */
205 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
206 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
207 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
208
209 /* Write mgtcr and wait for completion */
210 writel(mgtcr, &regs->phymntnc);
211
212 if (mdio_wait(regs))
213 return 1;
214
215 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
216 *data = readl(&regs->phymntnc);
217
218 return 0;
219 }
220
221 static u32 phyread(struct zynq_gem_priv *priv, u32 phy_addr,
222 u32 regnum, u16 *val)
223 {
224 u32 ret;
225
226 ret = phy_setup_op(priv, phy_addr, regnum,
227 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
228
229 if (!ret)
230 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
231 phy_addr, regnum, *val);
232
233 return ret;
234 }
235
236 static u32 phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
237 u32 regnum, u16 data)
238 {
239 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
240 regnum, data);
241
242 return phy_setup_op(priv, phy_addr, regnum,
243 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
244 }
245
246 static int phy_detection(struct eth_device *dev)
247 {
248 int i;
249 u16 phyreg;
250 struct zynq_gem_priv *priv = dev->priv;
251
252 if (priv->phyaddr != -1) {
253 phyread(priv, priv->phyaddr, PHY_DETECT_REG, &phyreg);
254 if ((phyreg != 0xFFFF) &&
255 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
256 /* Found a valid PHY address */
257 debug("Default phy address %d is valid\n",
258 priv->phyaddr);
259 return 0;
260 } else {
261 debug("PHY address is not setup correctly %d\n",
262 priv->phyaddr);
263 priv->phyaddr = -1;
264 }
265 }
266
267 debug("detecting phy address\n");
268 if (priv->phyaddr == -1) {
269 /* detect the PHY address */
270 for (i = 31; i >= 0; i--) {
271 phyread(priv, i, PHY_DETECT_REG, &phyreg);
272 if ((phyreg != 0xFFFF) &&
273 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
274 /* Found a valid PHY address */
275 priv->phyaddr = i;
276 debug("Found valid phy address, %d\n", i);
277 return 0;
278 }
279 }
280 }
281 printf("PHY is not detected\n");
282 return -1;
283 }
284
285 static int zynq_gem_setup_mac(struct eth_device *dev)
286 {
287 u32 i, macaddrlow, macaddrhigh;
288 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
289
290 /* Set the MAC bits [31:0] in BOT */
291 macaddrlow = dev->enetaddr[0];
292 macaddrlow |= dev->enetaddr[1] << 8;
293 macaddrlow |= dev->enetaddr[2] << 16;
294 macaddrlow |= dev->enetaddr[3] << 24;
295
296 /* Set MAC bits [47:32] in TOP */
297 macaddrhigh = dev->enetaddr[4];
298 macaddrhigh |= dev->enetaddr[5] << 8;
299
300 for (i = 0; i < 4; i++) {
301 writel(0, &regs->laddr[i][LADDR_LOW]);
302 writel(0, &regs->laddr[i][LADDR_HIGH]);
303 /* Do not use MATCHx register */
304 writel(0, &regs->match[i]);
305 }
306
307 writel(macaddrlow, &regs->laddr[0][LADDR_LOW]);
308 writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]);
309
310 return 0;
311 }
312
313 static int zynq_phy_init(struct eth_device *dev)
314 {
315 int ret;
316 struct zynq_gem_priv *priv = dev->priv;
317 const u32 supported = SUPPORTED_10baseT_Half |
318 SUPPORTED_10baseT_Full |
319 SUPPORTED_100baseT_Half |
320 SUPPORTED_100baseT_Full |
321 SUPPORTED_1000baseT_Half |
322 SUPPORTED_1000baseT_Full;
323
324 ret = phy_detection(dev);
325 if (ret) {
326 printf("GEM PHY init failed\n");
327 return ret;
328 }
329
330 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
331 priv->interface);
332 if (!priv->phydev)
333 return -ENODEV;
334
335 priv->phydev->supported = supported | ADVERTISED_Pause |
336 ADVERTISED_Asym_Pause;
337 priv->phydev->advertising = priv->phydev->supported;
338 phy_config(priv->phydev);
339
340 return 0;
341 }
342
343 static int zynq_gem_init(struct eth_device *dev, bd_t *bis)
344 {
345 u32 i;
346 int ret;
347 unsigned long clk_rate = 0;
348 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
349 struct zynq_gem_priv *priv = dev->priv;
350 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
351 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
352
353 if (!priv->init) {
354 /* Disable all interrupts */
355 writel(0xFFFFFFFF, &regs->idr);
356
357 /* Disable the receiver & transmitter */
358 writel(0, &regs->nwctrl);
359 writel(0, &regs->txsr);
360 writel(0, &regs->rxsr);
361 writel(0, &regs->phymntnc);
362
363 /* Clear the Hash registers for the mac address
364 * pointed by AddressPtr
365 */
366 writel(0x0, &regs->hashl);
367 /* Write bits [63:32] in TOP */
368 writel(0x0, &regs->hashh);
369
370 /* Clear all counters */
371 for (i = 0; i < STAT_SIZE; i++)
372 readl(&regs->stat[i]);
373
374 /* Setup RxBD space */
375 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
376
377 for (i = 0; i < RX_BUF; i++) {
378 priv->rx_bd[i].status = 0xF0000000;
379 priv->rx_bd[i].addr =
380 ((ulong)(priv->rxbuffers) +
381 (i * PKTSIZE_ALIGN));
382 }
383 /* WRAP bit to last BD */
384 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
385 /* Write RxBDs to IP */
386 writel((ulong)priv->rx_bd, &regs->rxqbase);
387
388 /* Setup for DMA Configuration register */
389 writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);
390
391 /* Setup for Network Control register, MDIO, Rx and Tx enable */
392 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
393
394 /* Disable the second priority queue */
395 dummy_tx_bd->addr = 0;
396 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
397 ZYNQ_GEM_TXBUF_LAST_MASK|
398 ZYNQ_GEM_TXBUF_USED_MASK;
399
400 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
401 ZYNQ_GEM_RXBUF_NEW_MASK;
402 dummy_rx_bd->status = 0;
403 flush_dcache_range((ulong)&dummy_tx_bd, (ulong)&dummy_tx_bd +
404 sizeof(dummy_tx_bd));
405 flush_dcache_range((ulong)&dummy_rx_bd, (ulong)&dummy_rx_bd +
406 sizeof(dummy_rx_bd));
407
408 writel((ulong)dummy_tx_bd, &regs->transmit_q1_ptr);
409 writel((ulong)dummy_rx_bd, &regs->receive_q1_ptr);
410
411 priv->init++;
412 }
413
414 ret = zynq_phy_init(dev);
415 if (ret)
416 return ret;
417
418 phy_startup(priv->phydev);
419
420 if (!priv->phydev->link) {
421 printf("%s: No link.\n", priv->phydev->dev->name);
422 return -1;
423 }
424
425 switch (priv->phydev->speed) {
426 case SPEED_1000:
427 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED1000,
428 &regs->nwcfg);
429 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
430 break;
431 case SPEED_100:
432 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED100,
433 &regs->nwcfg);
434 clk_rate = ZYNQ_GEM_FREQUENCY_100;
435 break;
436 case SPEED_10:
437 clk_rate = ZYNQ_GEM_FREQUENCY_10;
438 break;
439 }
440
441 /* Change the rclk and clk only not using EMIO interface */
442 if (!priv->emio)
443 zynq_slcr_gem_clk_setup(dev->iobase !=
444 ZYNQ_GEM_BASEADDR0, clk_rate);
445
446 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
447 ZYNQ_GEM_NWCTRL_TXEN_MASK);
448
449 return 0;
450 }
451
452 static int wait_for_bit(const char *func, u32 *reg, const u32 mask,
453 bool set, unsigned int timeout)
454 {
455 u32 val;
456 unsigned long start = get_timer(0);
457
458 while (1) {
459 val = readl(reg);
460
461 if (!set)
462 val = ~val;
463
464 if ((val & mask) == mask)
465 return 0;
466
467 if (get_timer(start) > timeout)
468 break;
469
470 udelay(1);
471 }
472
473 debug("%s: Timeout (reg=%p mask=%08x wait_set=%i)\n",
474 func, reg, mask, set);
475
476 return -ETIMEDOUT;
477 }
478
479 static int zynq_gem_send(struct eth_device *dev, void *ptr, int len)
480 {
481 u32 addr, size;
482 struct zynq_gem_priv *priv = dev->priv;
483 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
484 struct emac_bd *current_bd = &priv->tx_bd[1];
485
486 /* Setup Tx BD */
487 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
488
489 priv->tx_bd->addr = (ulong)ptr;
490 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
491 ZYNQ_GEM_TXBUF_LAST_MASK;
492 /* Dummy descriptor to mark it as the last in descriptor chain */
493 current_bd->addr = 0x0;
494 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
495 ZYNQ_GEM_TXBUF_LAST_MASK|
496 ZYNQ_GEM_TXBUF_USED_MASK;
497
498 /* setup BD */
499 writel((ulong)priv->tx_bd, &regs->txqbase);
500
501 addr = (ulong) ptr;
502 addr &= ~(ARCH_DMA_MINALIGN - 1);
503 size = roundup(len, ARCH_DMA_MINALIGN);
504 flush_dcache_range(addr, addr + size);
505
506 addr = (ulong)priv->rxbuffers;
507 addr &= ~(ARCH_DMA_MINALIGN - 1);
508 size = roundup((RX_BUF * PKTSIZE_ALIGN), ARCH_DMA_MINALIGN);
509 flush_dcache_range(addr, addr + size);
510 barrier();
511
512 /* Start transmit */
513 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
514
515 /* Read TX BD status */
516 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
517 printf("TX buffers exhausted in mid frame\n");
518
519 return wait_for_bit(__func__, &regs->txsr, ZYNQ_GEM_TSR_DONE,
520 true, 20000);
521 }
522
523 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
524 static int zynq_gem_recv(struct eth_device *dev)
525 {
526 int frame_len;
527 struct zynq_gem_priv *priv = dev->priv;
528 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
529 struct emac_bd *first_bd;
530
531 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
532 return 0;
533
534 if (!(current_bd->status &
535 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
536 printf("GEM: SOF or EOF not set for last buffer received!\n");
537 return 0;
538 }
539
540 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
541 if (frame_len) {
542 u32 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
543 addr &= ~(ARCH_DMA_MINALIGN - 1);
544
545 net_process_received_packet((u8 *)(ulong)addr, frame_len);
546
547 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK)
548 priv->rx_first_buf = priv->rxbd_current;
549 else {
550 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
551 current_bd->status = 0xF0000000; /* FIXME */
552 }
553
554 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
555 first_bd = &priv->rx_bd[priv->rx_first_buf];
556 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
557 first_bd->status = 0xF0000000;
558 }
559
560 if ((++priv->rxbd_current) >= RX_BUF)
561 priv->rxbd_current = 0;
562 }
563
564 return frame_len;
565 }
566
567 static void zynq_gem_halt(struct eth_device *dev)
568 {
569 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
570
571 clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
572 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
573 }
574
575 static int zynq_gem_miiphyread(const char *devname, uchar addr,
576 uchar reg, ushort *val)
577 {
578 struct eth_device *dev = eth_get_dev();
579 struct zynq_gem_priv *priv = dev->priv;
580 int ret;
581
582 ret = phyread(priv, addr, reg, val);
583 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, *val);
584 return ret;
585 }
586
587 static int zynq_gem_miiphy_write(const char *devname, uchar addr,
588 uchar reg, ushort val)
589 {
590 struct eth_device *dev = eth_get_dev();
591 struct zynq_gem_priv *priv = dev->priv;
592
593 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val);
594 return phywrite(priv, addr, reg, val);
595 }
596
597 int zynq_gem_initialize(bd_t *bis, phys_addr_t base_addr,
598 int phy_addr, u32 emio)
599 {
600 struct eth_device *dev;
601 struct zynq_gem_priv *priv;
602 void *bd_space;
603
604 dev = calloc(1, sizeof(*dev));
605 if (dev == NULL)
606 return -1;
607
608 dev->priv = calloc(1, sizeof(struct zynq_gem_priv));
609 if (dev->priv == NULL) {
610 free(dev);
611 return -1;
612 }
613 priv = dev->priv;
614
615 /* Align rxbuffers to ARCH_DMA_MINALIGN */
616 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
617 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
618
619 /* Align bd_space to MMU_SECTION_SHIFT */
620 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
621 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
622 BD_SPACE, DCACHE_OFF);
623
624 /* Initialize the bd spaces for tx and rx bd's */
625 priv->tx_bd = (struct emac_bd *)bd_space;
626 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
627
628 priv->phyaddr = phy_addr;
629 priv->emio = emio;
630
631 #ifndef CONFIG_ZYNQ_GEM_INTERFACE
632 priv->interface = PHY_INTERFACE_MODE_MII;
633 #else
634 priv->interface = CONFIG_ZYNQ_GEM_INTERFACE;
635 #endif
636
637 sprintf(dev->name, "Gem.%lx", base_addr);
638
639 dev->iobase = base_addr;
640 priv->iobase = (struct zynq_gem_regs *)base_addr;
641
642 dev->init = zynq_gem_init;
643 dev->halt = zynq_gem_halt;
644 dev->send = zynq_gem_send;
645 dev->recv = zynq_gem_recv;
646 dev->write_hwaddr = zynq_gem_setup_mac;
647
648 eth_register(dev);
649
650 miiphy_register(dev->name, zynq_gem_miiphyread, zynq_gem_miiphy_write);
651 priv->bus = miiphy_get_dev_by_name(dev->name);
652
653 return 1;
654 }
655
656 #if CONFIG_IS_ENABLED(OF_CONTROL)
657 int zynq_gem_of_init(const void *blob)
658 {
659 int offset = 0;
660 u32 ret = 0;
661 u32 reg, phy_reg;
662
663 debug("ZYNQ GEM: Initialization\n");
664
665 do {
666 offset = fdt_node_offset_by_compatible(blob, offset,
667 "xlnx,ps7-ethernet-1.00.a");
668 if (offset != -1) {
669 reg = fdtdec_get_addr(blob, offset, "reg");
670 if (reg != FDT_ADDR_T_NONE) {
671 offset = fdtdec_lookup_phandle(blob, offset,
672 "phy-handle");
673 if (offset != -1)
674 phy_reg = fdtdec_get_addr(blob, offset,
675 "reg");
676 else
677 phy_reg = 0;
678
679 debug("ZYNQ GEM: addr %x, phyaddr %x\n",
680 reg, phy_reg);
681
682 ret |= zynq_gem_initialize(NULL, reg,
683 phy_reg, 0);
684
685 } else {
686 debug("ZYNQ GEM: Can't get base address\n");
687 return -1;
688 }
689 }
690 } while (offset != -1);
691
692 return ret;
693 }
694 #endif