]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/cpsw.c
treewide: replace with error() with pr_err()
[people/ms/u-boot.git] / drivers / net / cpsw.c
1 /*
2 * CPSW Ethernet Switch Driver
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <common.h>
17 #include <command.h>
18 #include <net.h>
19 #include <miiphy.h>
20 #include <malloc.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <cpsw.h>
24 #include <linux/errno.h>
25 #include <asm/gpio.h>
26 #include <asm/io.h>
27 #include <phy.h>
28 #include <asm/arch/cpu.h>
29 #include <dm.h>
30 #include <fdt_support.h>
31
32 DECLARE_GLOBAL_DATA_PTR;
33
34 #define BITMASK(bits) (BIT(bits) - 1)
35 #define PHY_REG_MASK 0x1f
36 #define PHY_ID_MASK 0x1f
37 #define NUM_DESCS (PKTBUFSRX * 2)
38 #define PKT_MIN 60
39 #define PKT_MAX (1500 + 14 + 4 + 4)
40 #define CLEAR_BIT 1
41 #define GIGABITEN BIT(7)
42 #define FULLDUPLEXEN BIT(0)
43 #define MIIEN BIT(15)
44
45 /* reg offset */
46 #define CPSW_HOST_PORT_OFFSET 0x108
47 #define CPSW_SLAVE0_OFFSET 0x208
48 #define CPSW_SLAVE1_OFFSET 0x308
49 #define CPSW_SLAVE_SIZE 0x100
50 #define CPSW_CPDMA_OFFSET 0x800
51 #define CPSW_HW_STATS 0x900
52 #define CPSW_STATERAM_OFFSET 0xa00
53 #define CPSW_CPTS_OFFSET 0xc00
54 #define CPSW_ALE_OFFSET 0xd00
55 #define CPSW_SLIVER0_OFFSET 0xd80
56 #define CPSW_SLIVER1_OFFSET 0xdc0
57 #define CPSW_BD_OFFSET 0x2000
58 #define CPSW_MDIO_DIV 0xff
59
60 #define AM335X_GMII_SEL_OFFSET 0x630
61
62 /* DMA Registers */
63 #define CPDMA_TXCONTROL 0x004
64 #define CPDMA_RXCONTROL 0x014
65 #define CPDMA_SOFTRESET 0x01c
66 #define CPDMA_RXFREE 0x0e0
67 #define CPDMA_TXHDP_VER1 0x100
68 #define CPDMA_TXHDP_VER2 0x200
69 #define CPDMA_RXHDP_VER1 0x120
70 #define CPDMA_RXHDP_VER2 0x220
71 #define CPDMA_TXCP_VER1 0x140
72 #define CPDMA_TXCP_VER2 0x240
73 #define CPDMA_RXCP_VER1 0x160
74 #define CPDMA_RXCP_VER2 0x260
75
76 /* Descriptor mode bits */
77 #define CPDMA_DESC_SOP BIT(31)
78 #define CPDMA_DESC_EOP BIT(30)
79 #define CPDMA_DESC_OWNER BIT(29)
80 #define CPDMA_DESC_EOQ BIT(28)
81
82 /*
83 * This timeout definition is a worst-case ultra defensive measure against
84 * unexpected controller lock ups. Ideally, we should never ever hit this
85 * scenario in practice.
86 */
87 #define MDIO_TIMEOUT 100 /* msecs */
88 #define CPDMA_TIMEOUT 100 /* msecs */
89
90 struct cpsw_mdio_regs {
91 u32 version;
92 u32 control;
93 #define CONTROL_IDLE BIT(31)
94 #define CONTROL_ENABLE BIT(30)
95
96 u32 alive;
97 u32 link;
98 u32 linkintraw;
99 u32 linkintmasked;
100 u32 __reserved_0[2];
101 u32 userintraw;
102 u32 userintmasked;
103 u32 userintmaskset;
104 u32 userintmaskclr;
105 u32 __reserved_1[20];
106
107 struct {
108 u32 access;
109 u32 physel;
110 #define USERACCESS_GO BIT(31)
111 #define USERACCESS_WRITE BIT(30)
112 #define USERACCESS_ACK BIT(29)
113 #define USERACCESS_READ (0)
114 #define USERACCESS_DATA (0xffff)
115 } user[0];
116 };
117
118 struct cpsw_regs {
119 u32 id_ver;
120 u32 control;
121 u32 soft_reset;
122 u32 stat_port_en;
123 u32 ptype;
124 };
125
126 struct cpsw_slave_regs {
127 u32 max_blks;
128 u32 blk_cnt;
129 u32 flow_thresh;
130 u32 port_vlan;
131 u32 tx_pri_map;
132 #ifdef CONFIG_AM33XX
133 u32 gap_thresh;
134 #elif defined(CONFIG_TI814X)
135 u32 ts_ctl;
136 u32 ts_seq_ltype;
137 u32 ts_vlan;
138 #endif
139 u32 sa_lo;
140 u32 sa_hi;
141 };
142
143 struct cpsw_host_regs {
144 u32 max_blks;
145 u32 blk_cnt;
146 u32 flow_thresh;
147 u32 port_vlan;
148 u32 tx_pri_map;
149 u32 cpdma_tx_pri_map;
150 u32 cpdma_rx_chan_map;
151 };
152
153 struct cpsw_sliver_regs {
154 u32 id_ver;
155 u32 mac_control;
156 u32 mac_status;
157 u32 soft_reset;
158 u32 rx_maxlen;
159 u32 __reserved_0;
160 u32 rx_pause;
161 u32 tx_pause;
162 u32 __reserved_1;
163 u32 rx_pri_map;
164 };
165
166 #define ALE_ENTRY_BITS 68
167 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
168
169 /* ALE Registers */
170 #define ALE_CONTROL 0x08
171 #define ALE_UNKNOWNVLAN 0x18
172 #define ALE_TABLE_CONTROL 0x20
173 #define ALE_TABLE 0x34
174 #define ALE_PORTCTL 0x40
175
176 #define ALE_TABLE_WRITE BIT(31)
177
178 #define ALE_TYPE_FREE 0
179 #define ALE_TYPE_ADDR 1
180 #define ALE_TYPE_VLAN 2
181 #define ALE_TYPE_VLAN_ADDR 3
182
183 #define ALE_UCAST_PERSISTANT 0
184 #define ALE_UCAST_UNTOUCHED 1
185 #define ALE_UCAST_OUI 2
186 #define ALE_UCAST_TOUCHED 3
187
188 #define ALE_MCAST_FWD 0
189 #define ALE_MCAST_BLOCK_LEARN_FWD 1
190 #define ALE_MCAST_FWD_LEARN 2
191 #define ALE_MCAST_FWD_2 3
192
193 enum cpsw_ale_port_state {
194 ALE_PORT_STATE_DISABLE = 0x00,
195 ALE_PORT_STATE_BLOCK = 0x01,
196 ALE_PORT_STATE_LEARN = 0x02,
197 ALE_PORT_STATE_FORWARD = 0x03,
198 };
199
200 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
201 #define ALE_SECURE 1
202 #define ALE_BLOCKED 2
203
204 struct cpsw_slave {
205 struct cpsw_slave_regs *regs;
206 struct cpsw_sliver_regs *sliver;
207 int slave_num;
208 u32 mac_control;
209 struct cpsw_slave_data *data;
210 };
211
212 struct cpdma_desc {
213 /* hardware fields */
214 u32 hw_next;
215 u32 hw_buffer;
216 u32 hw_len;
217 u32 hw_mode;
218 /* software fields */
219 u32 sw_buffer;
220 u32 sw_len;
221 };
222
223 struct cpdma_chan {
224 struct cpdma_desc *head, *tail;
225 void *hdp, *cp, *rxfree;
226 };
227
228 /* AM33xx SoC specific definitions for the CONTROL port */
229 #define AM33XX_GMII_SEL_MODE_MII 0
230 #define AM33XX_GMII_SEL_MODE_RMII 1
231 #define AM33XX_GMII_SEL_MODE_RGMII 2
232
233 #define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
234 #define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
235 #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
236 #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
237
238 #define GMII_SEL_MODE_MASK 0x3
239
240 #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
241 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
242 #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
243
244 #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
245 #define chan_read(chan, fld) __raw_readl((chan)->fld)
246 #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
247
248 #define for_active_slave(slave, priv) \
249 slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
250 #define for_each_slave(slave, priv) \
251 for (slave = (priv)->slaves; slave != (priv)->slaves + \
252 (priv)->data.slaves; slave++)
253
254 struct cpsw_priv {
255 #ifdef CONFIG_DM_ETH
256 struct udevice *dev;
257 #else
258 struct eth_device *dev;
259 #endif
260 struct cpsw_platform_data data;
261 int host_port;
262
263 struct cpsw_regs *regs;
264 void *dma_regs;
265 struct cpsw_host_regs *host_port_regs;
266 void *ale_regs;
267
268 struct cpdma_desc *descs;
269 struct cpdma_desc *desc_free;
270 struct cpdma_chan rx_chan, tx_chan;
271
272 struct cpsw_slave *slaves;
273 struct phy_device *phydev;
274 struct mii_dev *bus;
275
276 u32 phy_mask;
277 };
278
279 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
280 {
281 int idx;
282
283 idx = start / 32;
284 start -= idx * 32;
285 idx = 2 - idx; /* flip */
286 return (ale_entry[idx] >> start) & BITMASK(bits);
287 }
288
289 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
290 u32 value)
291 {
292 int idx;
293
294 value &= BITMASK(bits);
295 idx = start / 32;
296 start -= idx * 32;
297 idx = 2 - idx; /* flip */
298 ale_entry[idx] &= ~(BITMASK(bits) << start);
299 ale_entry[idx] |= (value << start);
300 }
301
302 #define DEFINE_ALE_FIELD(name, start, bits) \
303 static inline int cpsw_ale_get_##name(u32 *ale_entry) \
304 { \
305 return cpsw_ale_get_field(ale_entry, start, bits); \
306 } \
307 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
308 { \
309 cpsw_ale_set_field(ale_entry, start, bits, value); \
310 }
311
312 DEFINE_ALE_FIELD(entry_type, 60, 2)
313 DEFINE_ALE_FIELD(mcast_state, 62, 2)
314 DEFINE_ALE_FIELD(port_mask, 66, 3)
315 DEFINE_ALE_FIELD(ucast_type, 62, 2)
316 DEFINE_ALE_FIELD(port_num, 66, 2)
317 DEFINE_ALE_FIELD(blocked, 65, 1)
318 DEFINE_ALE_FIELD(secure, 64, 1)
319 DEFINE_ALE_FIELD(mcast, 40, 1)
320
321 /* The MAC address field in the ALE entry cannot be macroized as above */
322 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
323 {
324 int i;
325
326 for (i = 0; i < 6; i++)
327 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
328 }
329
330 static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
331 {
332 int i;
333
334 for (i = 0; i < 6; i++)
335 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
336 }
337
338 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
339 {
340 int i;
341
342 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
343
344 for (i = 0; i < ALE_ENTRY_WORDS; i++)
345 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
346
347 return idx;
348 }
349
350 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
351 {
352 int i;
353
354 for (i = 0; i < ALE_ENTRY_WORDS; i++)
355 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
356
357 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
358
359 return idx;
360 }
361
362 static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
363 {
364 u32 ale_entry[ALE_ENTRY_WORDS];
365 int type, idx;
366
367 for (idx = 0; idx < priv->data.ale_entries; idx++) {
368 u8 entry_addr[6];
369
370 cpsw_ale_read(priv, idx, ale_entry);
371 type = cpsw_ale_get_entry_type(ale_entry);
372 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
373 continue;
374 cpsw_ale_get_addr(ale_entry, entry_addr);
375 if (memcmp(entry_addr, addr, 6) == 0)
376 return idx;
377 }
378 return -ENOENT;
379 }
380
381 static int cpsw_ale_match_free(struct cpsw_priv *priv)
382 {
383 u32 ale_entry[ALE_ENTRY_WORDS];
384 int type, idx;
385
386 for (idx = 0; idx < priv->data.ale_entries; idx++) {
387 cpsw_ale_read(priv, idx, ale_entry);
388 type = cpsw_ale_get_entry_type(ale_entry);
389 if (type == ALE_TYPE_FREE)
390 return idx;
391 }
392 return -ENOENT;
393 }
394
395 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
396 {
397 u32 ale_entry[ALE_ENTRY_WORDS];
398 int type, idx;
399
400 for (idx = 0; idx < priv->data.ale_entries; idx++) {
401 cpsw_ale_read(priv, idx, ale_entry);
402 type = cpsw_ale_get_entry_type(ale_entry);
403 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
404 continue;
405 if (cpsw_ale_get_mcast(ale_entry))
406 continue;
407 type = cpsw_ale_get_ucast_type(ale_entry);
408 if (type != ALE_UCAST_PERSISTANT &&
409 type != ALE_UCAST_OUI)
410 return idx;
411 }
412 return -ENOENT;
413 }
414
415 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
416 int port, int flags)
417 {
418 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
419 int idx;
420
421 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
422 cpsw_ale_set_addr(ale_entry, addr);
423 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
424 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
425 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
426 cpsw_ale_set_port_num(ale_entry, port);
427
428 idx = cpsw_ale_match_addr(priv, addr);
429 if (idx < 0)
430 idx = cpsw_ale_match_free(priv);
431 if (idx < 0)
432 idx = cpsw_ale_find_ageable(priv);
433 if (idx < 0)
434 return -ENOMEM;
435
436 cpsw_ale_write(priv, idx, ale_entry);
437 return 0;
438 }
439
440 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
441 int port_mask)
442 {
443 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
444 int idx, mask;
445
446 idx = cpsw_ale_match_addr(priv, addr);
447 if (idx >= 0)
448 cpsw_ale_read(priv, idx, ale_entry);
449
450 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
451 cpsw_ale_set_addr(ale_entry, addr);
452 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
453
454 mask = cpsw_ale_get_port_mask(ale_entry);
455 port_mask |= mask;
456 cpsw_ale_set_port_mask(ale_entry, port_mask);
457
458 if (idx < 0)
459 idx = cpsw_ale_match_free(priv);
460 if (idx < 0)
461 idx = cpsw_ale_find_ageable(priv);
462 if (idx < 0)
463 return -ENOMEM;
464
465 cpsw_ale_write(priv, idx, ale_entry);
466 return 0;
467 }
468
469 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
470 {
471 u32 tmp, mask = BIT(bit);
472
473 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
474 tmp &= ~mask;
475 tmp |= val ? mask : 0;
476 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
477 }
478
479 #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
480 #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
481 #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
482
483 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
484 int val)
485 {
486 int offset = ALE_PORTCTL + 4 * port;
487 u32 tmp, mask = 0x3;
488
489 tmp = __raw_readl(priv->ale_regs + offset);
490 tmp &= ~mask;
491 tmp |= val & mask;
492 __raw_writel(tmp, priv->ale_regs + offset);
493 }
494
495 static struct cpsw_mdio_regs *mdio_regs;
496
497 /* wait until hardware is ready for another user access */
498 static inline u32 wait_for_user_access(void)
499 {
500 u32 reg = 0;
501 int timeout = MDIO_TIMEOUT;
502
503 while (timeout-- &&
504 ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
505 udelay(10);
506
507 if (timeout == -1) {
508 printf("wait_for_user_access Timeout\n");
509 return -ETIMEDOUT;
510 }
511 return reg;
512 }
513
514 /* wait until hardware state machine is idle */
515 static inline void wait_for_idle(void)
516 {
517 int timeout = MDIO_TIMEOUT;
518
519 while (timeout-- &&
520 ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
521 udelay(10);
522
523 if (timeout == -1)
524 printf("wait_for_idle Timeout\n");
525 }
526
527 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
528 int dev_addr, int phy_reg)
529 {
530 int data;
531 u32 reg;
532
533 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
534 return -EINVAL;
535
536 wait_for_user_access();
537 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
538 (phy_id << 16));
539 __raw_writel(reg, &mdio_regs->user[0].access);
540 reg = wait_for_user_access();
541
542 data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
543 return data;
544 }
545
546 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
547 int phy_reg, u16 data)
548 {
549 u32 reg;
550
551 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
552 return -EINVAL;
553
554 wait_for_user_access();
555 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
556 (phy_id << 16) | (data & USERACCESS_DATA));
557 __raw_writel(reg, &mdio_regs->user[0].access);
558 wait_for_user_access();
559
560 return 0;
561 }
562
563 static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
564 {
565 struct mii_dev *bus = mdio_alloc();
566
567 mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
568
569 /* set enable and clock divider */
570 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
571
572 /*
573 * wait for scan logic to settle:
574 * the scan time consists of (a) a large fixed component, and (b) a
575 * small component that varies with the mii bus frequency. These
576 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
577 * silicon. Since the effect of (b) was found to be largely
578 * negligible, we keep things simple here.
579 */
580 udelay(1000);
581
582 bus->read = cpsw_mdio_read;
583 bus->write = cpsw_mdio_write;
584 strcpy(bus->name, name);
585
586 mdio_register(bus);
587 }
588
589 /* Set a self-clearing bit in a register, and wait for it to clear */
590 static inline void setbit_and_wait_for_clear32(void *addr)
591 {
592 __raw_writel(CLEAR_BIT, addr);
593 while (__raw_readl(addr) & CLEAR_BIT)
594 ;
595 }
596
597 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
598 ((mac)[2] << 16) | ((mac)[3] << 24))
599 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
600
601 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
602 struct cpsw_priv *priv)
603 {
604 #ifdef CONFIG_DM_ETH
605 struct eth_pdata *pdata = dev_get_platdata(priv->dev);
606
607 writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
608 writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
609 #else
610 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
611 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
612 #endif
613 }
614
615 static int cpsw_slave_update_link(struct cpsw_slave *slave,
616 struct cpsw_priv *priv, int *link)
617 {
618 struct phy_device *phy;
619 u32 mac_control = 0;
620 int ret = -ENODEV;
621
622 phy = priv->phydev;
623 if (!phy)
624 goto out;
625
626 ret = phy_startup(phy);
627 if (ret)
628 goto out;
629
630 if (link)
631 *link = phy->link;
632
633 if (phy->link) { /* link up */
634 mac_control = priv->data.mac_control;
635 if (phy->speed == 1000)
636 mac_control |= GIGABITEN;
637 if (phy->duplex == DUPLEX_FULL)
638 mac_control |= FULLDUPLEXEN;
639 if (phy->speed == 100)
640 mac_control |= MIIEN;
641 }
642
643 if (mac_control == slave->mac_control)
644 goto out;
645
646 if (mac_control) {
647 printf("link up on port %d, speed %d, %s duplex\n",
648 slave->slave_num, phy->speed,
649 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
650 } else {
651 printf("link down on port %d\n", slave->slave_num);
652 }
653
654 __raw_writel(mac_control, &slave->sliver->mac_control);
655 slave->mac_control = mac_control;
656
657 out:
658 return ret;
659 }
660
661 static int cpsw_update_link(struct cpsw_priv *priv)
662 {
663 int ret = -ENODEV;
664 struct cpsw_slave *slave;
665
666 for_active_slave(slave, priv)
667 ret = cpsw_slave_update_link(slave, priv, NULL);
668
669 return ret;
670 }
671
672 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
673 {
674 if (priv->host_port == 0)
675 return slave_num + 1;
676 else
677 return slave_num;
678 }
679
680 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
681 {
682 u32 slave_port;
683
684 setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
685
686 /* setup priority mapping */
687 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
688 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
689
690 /* setup max packet size, and mac address */
691 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
692 cpsw_set_slave_mac(slave, priv);
693
694 slave->mac_control = 0; /* no link yet */
695
696 /* enable forwarding */
697 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
698 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
699
700 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
701
702 priv->phy_mask |= 1 << slave->data->phy_addr;
703 }
704
705 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
706 {
707 struct cpdma_desc *desc = priv->desc_free;
708
709 if (desc)
710 priv->desc_free = desc_read_ptr(desc, hw_next);
711 return desc;
712 }
713
714 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
715 {
716 if (desc) {
717 desc_write(desc, hw_next, priv->desc_free);
718 priv->desc_free = desc;
719 }
720 }
721
722 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
723 void *buffer, int len)
724 {
725 struct cpdma_desc *desc, *prev;
726 u32 mode;
727
728 desc = cpdma_desc_alloc(priv);
729 if (!desc)
730 return -ENOMEM;
731
732 if (len < PKT_MIN)
733 len = PKT_MIN;
734
735 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
736
737 desc_write(desc, hw_next, 0);
738 desc_write(desc, hw_buffer, buffer);
739 desc_write(desc, hw_len, len);
740 desc_write(desc, hw_mode, mode | len);
741 desc_write(desc, sw_buffer, buffer);
742 desc_write(desc, sw_len, len);
743
744 if (!chan->head) {
745 /* simple case - first packet enqueued */
746 chan->head = desc;
747 chan->tail = desc;
748 chan_write(chan, hdp, desc);
749 goto done;
750 }
751
752 /* not the first packet - enqueue at the tail */
753 prev = chan->tail;
754 desc_write(prev, hw_next, desc);
755 chan->tail = desc;
756
757 /* next check if EOQ has been triggered already */
758 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
759 chan_write(chan, hdp, desc);
760
761 done:
762 if (chan->rxfree)
763 chan_write(chan, rxfree, 1);
764 return 0;
765 }
766
767 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
768 void **buffer, int *len)
769 {
770 struct cpdma_desc *desc = chan->head;
771 u32 status;
772
773 if (!desc)
774 return -ENOENT;
775
776 status = desc_read(desc, hw_mode);
777
778 if (len)
779 *len = status & 0x7ff;
780
781 if (buffer)
782 *buffer = desc_read_ptr(desc, sw_buffer);
783
784 if (status & CPDMA_DESC_OWNER) {
785 if (chan_read(chan, hdp) == 0) {
786 if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
787 chan_write(chan, hdp, desc);
788 }
789
790 return -EBUSY;
791 }
792
793 chan->head = desc_read_ptr(desc, hw_next);
794 chan_write(chan, cp, desc);
795
796 cpdma_desc_free(priv, desc);
797 return 0;
798 }
799
800 static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
801 {
802 struct cpsw_slave *slave;
803 int i, ret;
804
805 /* soft reset the controller and initialize priv */
806 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
807
808 /* initialize and reset the address lookup engine */
809 cpsw_ale_enable(priv, 1);
810 cpsw_ale_clear(priv, 1);
811 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
812
813 /* setup host port priority mapping */
814 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
815 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
816
817 /* disable priority elevation and enable statistics on all ports */
818 __raw_writel(0, &priv->regs->ptype);
819
820 /* enable statistics collection only on the host port */
821 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
822 __raw_writel(0x7, &priv->regs->stat_port_en);
823
824 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
825
826 cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
827 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
828
829 for_active_slave(slave, priv)
830 cpsw_slave_init(slave, priv);
831
832 ret = cpsw_update_link(priv);
833 if (ret)
834 goto out;
835
836 /* init descriptor pool */
837 for (i = 0; i < NUM_DESCS; i++) {
838 desc_write(&priv->descs[i], hw_next,
839 (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
840 }
841 priv->desc_free = &priv->descs[0];
842
843 /* initialize channels */
844 if (priv->data.version == CPSW_CTRL_VERSION_2) {
845 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
846 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
847 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
848 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
849
850 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
851 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
852 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
853 } else {
854 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
855 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
856 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
857 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
858
859 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
860 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
861 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
862 }
863
864 /* clear dma state */
865 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
866
867 if (priv->data.version == CPSW_CTRL_VERSION_2) {
868 for (i = 0; i < priv->data.channels; i++) {
869 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
870 * i);
871 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
872 * i);
873 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
874 * i);
875 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
876 * i);
877 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
878 * i);
879 }
880 } else {
881 for (i = 0; i < priv->data.channels; i++) {
882 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
883 * i);
884 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
885 * i);
886 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
887 * i);
888 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
889 * i);
890 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
891 * i);
892
893 }
894 }
895
896 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
897 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
898
899 /* submit rx descs */
900 for (i = 0; i < PKTBUFSRX; i++) {
901 ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
902 PKTSIZE);
903 if (ret < 0) {
904 printf("error %d submitting rx desc\n", ret);
905 break;
906 }
907 }
908
909 out:
910 return ret;
911 }
912
913 static void _cpsw_halt(struct cpsw_priv *priv)
914 {
915 writel(0, priv->dma_regs + CPDMA_TXCONTROL);
916 writel(0, priv->dma_regs + CPDMA_RXCONTROL);
917
918 /* soft reset the controller and initialize priv */
919 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
920
921 /* clear dma state */
922 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
923
924 }
925
926 static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
927 {
928 void *buffer;
929 int len;
930 int timeout = CPDMA_TIMEOUT;
931
932 flush_dcache_range((unsigned long)packet,
933 (unsigned long)packet + ALIGN(length, PKTALIGN));
934
935 /* first reap completed packets */
936 while (timeout-- &&
937 (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
938 ;
939
940 if (timeout == -1) {
941 printf("cpdma_process timeout\n");
942 return -ETIMEDOUT;
943 }
944
945 return cpdma_submit(priv, &priv->tx_chan, packet, length);
946 }
947
948 static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
949 {
950 void *buffer;
951 int len;
952 int ret = -EAGAIN;
953
954 ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
955 if (ret < 0)
956 return ret;
957
958 invalidate_dcache_range((unsigned long)buffer,
959 (unsigned long)buffer + PKTSIZE_ALIGN);
960 *pkt = buffer;
961
962 return len;
963 }
964
965 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
966 struct cpsw_priv *priv)
967 {
968 void *regs = priv->regs;
969 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
970 slave->slave_num = slave_num;
971 slave->data = data;
972 slave->regs = regs + data->slave_reg_ofs;
973 slave->sliver = regs + data->sliver_reg_ofs;
974 }
975
976 static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
977 {
978 struct phy_device *phydev;
979 u32 supported = PHY_GBIT_FEATURES;
980
981 phydev = phy_connect(priv->bus,
982 slave->data->phy_addr,
983 priv->dev,
984 slave->data->phy_if);
985
986 if (!phydev)
987 return -1;
988
989 phydev->supported &= supported;
990 phydev->advertising = phydev->supported;
991
992 #ifdef CONFIG_DM_ETH
993 if (slave->data->phy_of_handle)
994 dev_set_of_offset(phydev->dev, slave->data->phy_of_handle);
995 #endif
996
997 priv->phydev = phydev;
998 phy_config(phydev);
999
1000 return 1;
1001 }
1002
1003 int _cpsw_register(struct cpsw_priv *priv)
1004 {
1005 struct cpsw_slave *slave;
1006 struct cpsw_platform_data *data = &priv->data;
1007 void *regs = (void *)data->cpsw_base;
1008
1009 priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
1010 if (!priv->slaves) {
1011 return -ENOMEM;
1012 }
1013
1014 priv->host_port = data->host_port_num;
1015 priv->regs = regs;
1016 priv->host_port_regs = regs + data->host_port_reg_ofs;
1017 priv->dma_regs = regs + data->cpdma_reg_ofs;
1018 priv->ale_regs = regs + data->ale_reg_ofs;
1019 priv->descs = (void *)regs + data->bd_ram_ofs;
1020
1021 int idx = 0;
1022
1023 for_each_slave(slave, priv) {
1024 cpsw_slave_setup(slave, idx, priv);
1025 idx = idx + 1;
1026 }
1027
1028 cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
1029 priv->bus = miiphy_get_dev_by_name(priv->dev->name);
1030 for_active_slave(slave, priv)
1031 cpsw_phy_init(priv, slave);
1032
1033 return 0;
1034 }
1035
1036 #ifndef CONFIG_DM_ETH
1037 static int cpsw_init(struct eth_device *dev, bd_t *bis)
1038 {
1039 struct cpsw_priv *priv = dev->priv;
1040
1041 return _cpsw_init(priv, dev->enetaddr);
1042 }
1043
1044 static void cpsw_halt(struct eth_device *dev)
1045 {
1046 struct cpsw_priv *priv = dev->priv;
1047
1048 return _cpsw_halt(priv);
1049 }
1050
1051 static int cpsw_send(struct eth_device *dev, void *packet, int length)
1052 {
1053 struct cpsw_priv *priv = dev->priv;
1054
1055 return _cpsw_send(priv, packet, length);
1056 }
1057
1058 static int cpsw_recv(struct eth_device *dev)
1059 {
1060 struct cpsw_priv *priv = dev->priv;
1061 uchar *pkt = NULL;
1062 int len;
1063
1064 len = _cpsw_recv(priv, &pkt);
1065
1066 if (len > 0) {
1067 net_process_received_packet(pkt, len);
1068 cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
1069 }
1070
1071 return len;
1072 }
1073
1074 int cpsw_register(struct cpsw_platform_data *data)
1075 {
1076 struct cpsw_priv *priv;
1077 struct eth_device *dev;
1078 int ret;
1079
1080 dev = calloc(sizeof(*dev), 1);
1081 if (!dev)
1082 return -ENOMEM;
1083
1084 priv = calloc(sizeof(*priv), 1);
1085 if (!priv) {
1086 free(dev);
1087 return -ENOMEM;
1088 }
1089
1090 priv->dev = dev;
1091 priv->data = *data;
1092
1093 strcpy(dev->name, "cpsw");
1094 dev->iobase = 0;
1095 dev->init = cpsw_init;
1096 dev->halt = cpsw_halt;
1097 dev->send = cpsw_send;
1098 dev->recv = cpsw_recv;
1099 dev->priv = priv;
1100
1101 eth_register(dev);
1102
1103 ret = _cpsw_register(priv);
1104 if (ret < 0) {
1105 eth_unregister(dev);
1106 free(dev);
1107 free(priv);
1108 return ret;
1109 }
1110
1111 return 1;
1112 }
1113 #else
1114 static int cpsw_eth_start(struct udevice *dev)
1115 {
1116 struct eth_pdata *pdata = dev_get_platdata(dev);
1117 struct cpsw_priv *priv = dev_get_priv(dev);
1118
1119 return _cpsw_init(priv, pdata->enetaddr);
1120 }
1121
1122 static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
1123 {
1124 struct cpsw_priv *priv = dev_get_priv(dev);
1125
1126 return _cpsw_send(priv, packet, length);
1127 }
1128
1129 static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1130 {
1131 struct cpsw_priv *priv = dev_get_priv(dev);
1132
1133 return _cpsw_recv(priv, packetp);
1134 }
1135
1136 static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
1137 int length)
1138 {
1139 struct cpsw_priv *priv = dev_get_priv(dev);
1140
1141 return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
1142 }
1143
1144 static void cpsw_eth_stop(struct udevice *dev)
1145 {
1146 struct cpsw_priv *priv = dev_get_priv(dev);
1147
1148 return _cpsw_halt(priv);
1149 }
1150
1151
1152 static int cpsw_eth_probe(struct udevice *dev)
1153 {
1154 struct cpsw_priv *priv = dev_get_priv(dev);
1155
1156 priv->dev = dev;
1157
1158 return _cpsw_register(priv);
1159 }
1160
1161 static const struct eth_ops cpsw_eth_ops = {
1162 .start = cpsw_eth_start,
1163 .send = cpsw_eth_send,
1164 .recv = cpsw_eth_recv,
1165 .free_pkt = cpsw_eth_free_pkt,
1166 .stop = cpsw_eth_stop,
1167 };
1168
1169 static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
1170 {
1171 return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
1172 false);
1173 }
1174
1175 static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv,
1176 phy_interface_t phy_mode)
1177 {
1178 u32 reg;
1179 u32 mask;
1180 u32 mode = 0;
1181 bool rgmii_id = false;
1182 int slave = priv->data.active_slave;
1183
1184 reg = readl(priv->data.gmii_sel);
1185
1186 switch (phy_mode) {
1187 case PHY_INTERFACE_MODE_RMII:
1188 mode = AM33XX_GMII_SEL_MODE_RMII;
1189 break;
1190
1191 case PHY_INTERFACE_MODE_RGMII:
1192 mode = AM33XX_GMII_SEL_MODE_RGMII;
1193 break;
1194 case PHY_INTERFACE_MODE_RGMII_ID:
1195 case PHY_INTERFACE_MODE_RGMII_RXID:
1196 case PHY_INTERFACE_MODE_RGMII_TXID:
1197 mode = AM33XX_GMII_SEL_MODE_RGMII;
1198 rgmii_id = true;
1199 break;
1200
1201 case PHY_INTERFACE_MODE_MII:
1202 default:
1203 mode = AM33XX_GMII_SEL_MODE_MII;
1204 break;
1205 };
1206
1207 mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
1208 mode <<= slave * 2;
1209
1210 if (priv->data.rmii_clock_external) {
1211 if (slave == 0)
1212 mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
1213 else
1214 mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
1215 }
1216
1217 if (rgmii_id) {
1218 if (slave == 0)
1219 mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
1220 else
1221 mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
1222 }
1223
1224 reg &= ~mask;
1225 reg |= mode;
1226
1227 writel(reg, priv->data.gmii_sel);
1228 }
1229
1230 static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv,
1231 phy_interface_t phy_mode)
1232 {
1233 u32 reg;
1234 u32 mask;
1235 u32 mode = 0;
1236 int slave = priv->data.active_slave;
1237
1238 reg = readl(priv->data.gmii_sel);
1239
1240 switch (phy_mode) {
1241 case PHY_INTERFACE_MODE_RMII:
1242 mode = AM33XX_GMII_SEL_MODE_RMII;
1243 break;
1244
1245 case PHY_INTERFACE_MODE_RGMII:
1246 case PHY_INTERFACE_MODE_RGMII_ID:
1247 case PHY_INTERFACE_MODE_RGMII_RXID:
1248 case PHY_INTERFACE_MODE_RGMII_TXID:
1249 mode = AM33XX_GMII_SEL_MODE_RGMII;
1250 break;
1251
1252 case PHY_INTERFACE_MODE_MII:
1253 default:
1254 mode = AM33XX_GMII_SEL_MODE_MII;
1255 break;
1256 };
1257
1258 switch (slave) {
1259 case 0:
1260 mask = GMII_SEL_MODE_MASK;
1261 break;
1262 case 1:
1263 mask = GMII_SEL_MODE_MASK << 4;
1264 mode <<= 4;
1265 break;
1266 default:
1267 dev_err(priv->dev, "invalid slave number...\n");
1268 return;
1269 }
1270
1271 if (priv->data.rmii_clock_external)
1272 dev_err(priv->dev, "RMII External clock is not supported\n");
1273
1274 reg &= ~mask;
1275 reg |= mode;
1276
1277 writel(reg, priv->data.gmii_sel);
1278 }
1279
1280 static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat,
1281 phy_interface_t phy_mode)
1282 {
1283 if (!strcmp(compat, "ti,am3352-cpsw-phy-sel"))
1284 cpsw_gmii_sel_am3352(priv, phy_mode);
1285 if (!strcmp(compat, "ti,am43xx-cpsw-phy-sel"))
1286 cpsw_gmii_sel_am3352(priv, phy_mode);
1287 else if (!strcmp(compat, "ti,dra7xx-cpsw-phy-sel"))
1288 cpsw_gmii_sel_dra7xx(priv, phy_mode);
1289 }
1290
1291 static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
1292 {
1293 struct eth_pdata *pdata = dev_get_platdata(dev);
1294 struct cpsw_priv *priv = dev_get_priv(dev);
1295 struct gpio_desc *mode_gpios;
1296 const char *phy_mode;
1297 const char *phy_sel_compat = NULL;
1298 const void *fdt = gd->fdt_blob;
1299 int node = dev_of_offset(dev);
1300 int subnode;
1301 int slave_index = 0;
1302 int active_slave;
1303 int num_mode_gpios;
1304 int ret;
1305
1306 pdata->iobase = devfdt_get_addr(dev);
1307 priv->data.version = CPSW_CTRL_VERSION_2;
1308 priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
1309 priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
1310 priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
1311 priv->data.mdio_div = CPSW_MDIO_DIV;
1312 priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
1313
1314 pdata->phy_interface = -1;
1315
1316 priv->data.cpsw_base = pdata->iobase;
1317 priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
1318 if (priv->data.channels <= 0) {
1319 printf("error: cpdma_channels not found in dt\n");
1320 return -ENOENT;
1321 }
1322
1323 priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
1324 if (priv->data.slaves <= 0) {
1325 printf("error: slaves not found in dt\n");
1326 return -ENOENT;
1327 }
1328 priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
1329 priv->data.slaves);
1330
1331 priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
1332 if (priv->data.ale_entries <= 0) {
1333 printf("error: ale_entries not found in dt\n");
1334 return -ENOENT;
1335 }
1336
1337 priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
1338 if (priv->data.bd_ram_ofs <= 0) {
1339 printf("error: bd_ram_size not found in dt\n");
1340 return -ENOENT;
1341 }
1342
1343 priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
1344 if (priv->data.mac_control <= 0) {
1345 printf("error: ale_entries not found in dt\n");
1346 return -ENOENT;
1347 }
1348
1349 num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
1350 if (num_mode_gpios > 0) {
1351 mode_gpios = malloc(sizeof(struct gpio_desc) *
1352 num_mode_gpios);
1353 gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
1354 num_mode_gpios, GPIOD_IS_OUT);
1355 free(mode_gpios);
1356 }
1357
1358 active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
1359 priv->data.active_slave = active_slave;
1360
1361 fdt_for_each_subnode(subnode, fdt, node) {
1362 int len;
1363 const char *name;
1364
1365 name = fdt_get_name(fdt, subnode, &len);
1366 if (!strncmp(name, "mdio", 4)) {
1367 u32 mdio_base;
1368
1369 mdio_base = cpsw_get_addr_by_node(fdt, subnode);
1370 if (mdio_base == FDT_ADDR_T_NONE) {
1371 pr_err("Not able to get MDIO address space\n");
1372 return -ENOENT;
1373 }
1374 priv->data.mdio_base = mdio_base;
1375 }
1376
1377 if (!strncmp(name, "slave", 5)) {
1378 u32 phy_id[2];
1379
1380 if (slave_index >= priv->data.slaves)
1381 continue;
1382 phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
1383 if (phy_mode)
1384 priv->data.slave_data[slave_index].phy_if =
1385 phy_get_interface_by_name(phy_mode);
1386
1387 priv->data.slave_data[slave_index].phy_of_handle =
1388 fdtdec_lookup_phandle(fdt, subnode,
1389 "phy-handle");
1390
1391 if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
1392 priv->data.slave_data[slave_index].phy_addr =
1393 fdtdec_get_int(gd->fdt_blob,
1394 priv->data.slave_data[slave_index].phy_of_handle,
1395 "reg", -1);
1396 } else {
1397 fdtdec_get_int_array(fdt, subnode, "phy_id",
1398 phy_id, 2);
1399 priv->data.slave_data[slave_index].phy_addr =
1400 phy_id[1];
1401 }
1402 slave_index++;
1403 }
1404
1405 if (!strncmp(name, "cpsw-phy-sel", 12)) {
1406 priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
1407 subnode);
1408
1409 if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
1410 pr_err("Not able to get gmii_sel reg address\n");
1411 return -ENOENT;
1412 }
1413
1414 if (fdt_get_property(fdt, subnode, "rmii-clock-ext",
1415 NULL))
1416 priv->data.rmii_clock_external = true;
1417
1418 phy_sel_compat = fdt_getprop(fdt, subnode, "compatible",
1419 NULL);
1420 if (!phy_sel_compat) {
1421 pr_err("Not able to get gmii_sel compatible\n");
1422 return -ENOENT;
1423 }
1424 }
1425 }
1426
1427 priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
1428 priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
1429
1430 if (priv->data.slaves == 2) {
1431 priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
1432 priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
1433 }
1434
1435 ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
1436 if (ret < 0) {
1437 pr_err("cpsw read efuse mac failed\n");
1438 return ret;
1439 }
1440
1441 pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
1442 if (pdata->phy_interface == -1) {
1443 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1444 return -EINVAL;
1445 }
1446
1447 /* Select phy interface in control module */
1448 cpsw_phy_sel(priv, phy_sel_compat, pdata->phy_interface);
1449
1450 return 0;
1451 }
1452
1453
1454 static const struct udevice_id cpsw_eth_ids[] = {
1455 { .compatible = "ti,cpsw" },
1456 { .compatible = "ti,am335x-cpsw" },
1457 { }
1458 };
1459
1460 U_BOOT_DRIVER(eth_cpsw) = {
1461 .name = "eth_cpsw",
1462 .id = UCLASS_ETH,
1463 .of_match = cpsw_eth_ids,
1464 .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
1465 .probe = cpsw_eth_probe,
1466 .ops = &cpsw_eth_ops,
1467 .priv_auto_alloc_size = sizeof(struct cpsw_priv),
1468 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1469 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1470 };
1471 #endif /* CONFIG_DM_ETH */