]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/cpsw.c
Merge branch 'u-boot-imx/master' into 'u-boot-arm/master'
[people/ms/u-boot.git] / drivers / net / cpsw.c
1 /*
2 * CPSW Ethernet Switch Driver
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <common.h>
17 #include <command.h>
18 #include <net.h>
19 #include <miiphy.h>
20 #include <malloc.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <cpsw.h>
24 #include <asm/errno.h>
25 #include <asm/io.h>
26 #include <phy.h>
27
28 #define BITMASK(bits) (BIT(bits) - 1)
29 #define PHY_REG_MASK 0x1f
30 #define PHY_ID_MASK 0x1f
31 #define NUM_DESCS (PKTBUFSRX * 2)
32 #define PKT_MIN 60
33 #define PKT_MAX (1500 + 14 + 4 + 4)
34 #define CLEAR_BIT 1
35 #define GIGABITEN BIT(7)
36 #define FULLDUPLEXEN BIT(0)
37 #define MIIEN BIT(15)
38
39 /* DMA Registers */
40 #define CPDMA_TXCONTROL 0x004
41 #define CPDMA_RXCONTROL 0x014
42 #define CPDMA_SOFTRESET 0x01c
43 #define CPDMA_RXFREE 0x0e0
44 #define CPDMA_TXHDP_VER1 0x100
45 #define CPDMA_TXHDP_VER2 0x200
46 #define CPDMA_RXHDP_VER1 0x120
47 #define CPDMA_RXHDP_VER2 0x220
48 #define CPDMA_TXCP_VER1 0x140
49 #define CPDMA_TXCP_VER2 0x240
50 #define CPDMA_RXCP_VER1 0x160
51 #define CPDMA_RXCP_VER2 0x260
52
53 #define CPDMA_RAM_ADDR 0x4a102000
54
55 /* Descriptor mode bits */
56 #define CPDMA_DESC_SOP BIT(31)
57 #define CPDMA_DESC_EOP BIT(30)
58 #define CPDMA_DESC_OWNER BIT(29)
59 #define CPDMA_DESC_EOQ BIT(28)
60
61 /*
62 * This timeout definition is a worst-case ultra defensive measure against
63 * unexpected controller lock ups. Ideally, we should never ever hit this
64 * scenario in practice.
65 */
66 #define MDIO_TIMEOUT 100 /* msecs */
67 #define CPDMA_TIMEOUT 100 /* msecs */
68
69 struct cpsw_mdio_regs {
70 u32 version;
71 u32 control;
72 #define CONTROL_IDLE BIT(31)
73 #define CONTROL_ENABLE BIT(30)
74
75 u32 alive;
76 u32 link;
77 u32 linkintraw;
78 u32 linkintmasked;
79 u32 __reserved_0[2];
80 u32 userintraw;
81 u32 userintmasked;
82 u32 userintmaskset;
83 u32 userintmaskclr;
84 u32 __reserved_1[20];
85
86 struct {
87 u32 access;
88 u32 physel;
89 #define USERACCESS_GO BIT(31)
90 #define USERACCESS_WRITE BIT(30)
91 #define USERACCESS_ACK BIT(29)
92 #define USERACCESS_READ (0)
93 #define USERACCESS_DATA (0xffff)
94 } user[0];
95 };
96
97 struct cpsw_regs {
98 u32 id_ver;
99 u32 control;
100 u32 soft_reset;
101 u32 stat_port_en;
102 u32 ptype;
103 };
104
105 struct cpsw_slave_regs {
106 u32 max_blks;
107 u32 blk_cnt;
108 u32 flow_thresh;
109 u32 port_vlan;
110 u32 tx_pri_map;
111 u32 gap_thresh;
112 u32 sa_lo;
113 u32 sa_hi;
114 };
115
116 struct cpsw_host_regs {
117 u32 max_blks;
118 u32 blk_cnt;
119 u32 flow_thresh;
120 u32 port_vlan;
121 u32 tx_pri_map;
122 u32 cpdma_tx_pri_map;
123 u32 cpdma_rx_chan_map;
124 };
125
126 struct cpsw_sliver_regs {
127 u32 id_ver;
128 u32 mac_control;
129 u32 mac_status;
130 u32 soft_reset;
131 u32 rx_maxlen;
132 u32 __reserved_0;
133 u32 rx_pause;
134 u32 tx_pause;
135 u32 __reserved_1;
136 u32 rx_pri_map;
137 };
138
139 #define ALE_ENTRY_BITS 68
140 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
141
142 /* ALE Registers */
143 #define ALE_CONTROL 0x08
144 #define ALE_UNKNOWNVLAN 0x18
145 #define ALE_TABLE_CONTROL 0x20
146 #define ALE_TABLE 0x34
147 #define ALE_PORTCTL 0x40
148
149 #define ALE_TABLE_WRITE BIT(31)
150
151 #define ALE_TYPE_FREE 0
152 #define ALE_TYPE_ADDR 1
153 #define ALE_TYPE_VLAN 2
154 #define ALE_TYPE_VLAN_ADDR 3
155
156 #define ALE_UCAST_PERSISTANT 0
157 #define ALE_UCAST_UNTOUCHED 1
158 #define ALE_UCAST_OUI 2
159 #define ALE_UCAST_TOUCHED 3
160
161 #define ALE_MCAST_FWD 0
162 #define ALE_MCAST_BLOCK_LEARN_FWD 1
163 #define ALE_MCAST_FWD_LEARN 2
164 #define ALE_MCAST_FWD_2 3
165
166 enum cpsw_ale_port_state {
167 ALE_PORT_STATE_DISABLE = 0x00,
168 ALE_PORT_STATE_BLOCK = 0x01,
169 ALE_PORT_STATE_LEARN = 0x02,
170 ALE_PORT_STATE_FORWARD = 0x03,
171 };
172
173 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
174 #define ALE_SECURE 1
175 #define ALE_BLOCKED 2
176
177 struct cpsw_slave {
178 struct cpsw_slave_regs *regs;
179 struct cpsw_sliver_regs *sliver;
180 int slave_num;
181 u32 mac_control;
182 struct cpsw_slave_data *data;
183 };
184
185 struct cpdma_desc {
186 /* hardware fields */
187 u32 hw_next;
188 u32 hw_buffer;
189 u32 hw_len;
190 u32 hw_mode;
191 /* software fields */
192 u32 sw_buffer;
193 u32 sw_len;
194 };
195
196 struct cpdma_chan {
197 struct cpdma_desc *head, *tail;
198 void *hdp, *cp, *rxfree;
199 };
200
201 #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
202 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
203 #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
204
205 #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
206 #define chan_read(chan, fld) __raw_readl((chan)->fld)
207 #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
208
209 #define for_each_slave(slave, priv) \
210 for (slave = (priv)->slaves; slave != (priv)->slaves + \
211 (priv)->data.slaves; slave++)
212
213 struct cpsw_priv {
214 struct eth_device *dev;
215 struct cpsw_platform_data data;
216 int host_port;
217
218 struct cpsw_regs *regs;
219 void *dma_regs;
220 struct cpsw_host_regs *host_port_regs;
221 void *ale_regs;
222
223 struct cpdma_desc *descs;
224 struct cpdma_desc *desc_free;
225 struct cpdma_chan rx_chan, tx_chan;
226
227 struct cpsw_slave *slaves;
228 struct phy_device *phydev;
229 struct mii_dev *bus;
230 };
231
232 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
233 {
234 int idx;
235
236 idx = start / 32;
237 start -= idx * 32;
238 idx = 2 - idx; /* flip */
239 return (ale_entry[idx] >> start) & BITMASK(bits);
240 }
241
242 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
243 u32 value)
244 {
245 int idx;
246
247 value &= BITMASK(bits);
248 idx = start / 32;
249 start -= idx * 32;
250 idx = 2 - idx; /* flip */
251 ale_entry[idx] &= ~(BITMASK(bits) << start);
252 ale_entry[idx] |= (value << start);
253 }
254
255 #define DEFINE_ALE_FIELD(name, start, bits) \
256 static inline int cpsw_ale_get_##name(u32 *ale_entry) \
257 { \
258 return cpsw_ale_get_field(ale_entry, start, bits); \
259 } \
260 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
261 { \
262 cpsw_ale_set_field(ale_entry, start, bits, value); \
263 }
264
265 DEFINE_ALE_FIELD(entry_type, 60, 2)
266 DEFINE_ALE_FIELD(mcast_state, 62, 2)
267 DEFINE_ALE_FIELD(port_mask, 66, 3)
268 DEFINE_ALE_FIELD(ucast_type, 62, 2)
269 DEFINE_ALE_FIELD(port_num, 66, 2)
270 DEFINE_ALE_FIELD(blocked, 65, 1)
271 DEFINE_ALE_FIELD(secure, 64, 1)
272 DEFINE_ALE_FIELD(mcast, 40, 1)
273
274 /* The MAC address field in the ALE entry cannot be macroized as above */
275 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
276 {
277 int i;
278
279 for (i = 0; i < 6; i++)
280 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
281 }
282
283 static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
284 {
285 int i;
286
287 for (i = 0; i < 6; i++)
288 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
289 }
290
291 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
292 {
293 int i;
294
295 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
296
297 for (i = 0; i < ALE_ENTRY_WORDS; i++)
298 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
299
300 return idx;
301 }
302
303 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
304 {
305 int i;
306
307 for (i = 0; i < ALE_ENTRY_WORDS; i++)
308 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
309
310 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
311
312 return idx;
313 }
314
315 static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
316 {
317 u32 ale_entry[ALE_ENTRY_WORDS];
318 int type, idx;
319
320 for (idx = 0; idx < priv->data.ale_entries; idx++) {
321 u8 entry_addr[6];
322
323 cpsw_ale_read(priv, idx, ale_entry);
324 type = cpsw_ale_get_entry_type(ale_entry);
325 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
326 continue;
327 cpsw_ale_get_addr(ale_entry, entry_addr);
328 if (memcmp(entry_addr, addr, 6) == 0)
329 return idx;
330 }
331 return -ENOENT;
332 }
333
334 static int cpsw_ale_match_free(struct cpsw_priv *priv)
335 {
336 u32 ale_entry[ALE_ENTRY_WORDS];
337 int type, idx;
338
339 for (idx = 0; idx < priv->data.ale_entries; idx++) {
340 cpsw_ale_read(priv, idx, ale_entry);
341 type = cpsw_ale_get_entry_type(ale_entry);
342 if (type == ALE_TYPE_FREE)
343 return idx;
344 }
345 return -ENOENT;
346 }
347
348 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
349 {
350 u32 ale_entry[ALE_ENTRY_WORDS];
351 int type, idx;
352
353 for (idx = 0; idx < priv->data.ale_entries; idx++) {
354 cpsw_ale_read(priv, idx, ale_entry);
355 type = cpsw_ale_get_entry_type(ale_entry);
356 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
357 continue;
358 if (cpsw_ale_get_mcast(ale_entry))
359 continue;
360 type = cpsw_ale_get_ucast_type(ale_entry);
361 if (type != ALE_UCAST_PERSISTANT &&
362 type != ALE_UCAST_OUI)
363 return idx;
364 }
365 return -ENOENT;
366 }
367
368 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
369 int port, int flags)
370 {
371 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
372 int idx;
373
374 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
375 cpsw_ale_set_addr(ale_entry, addr);
376 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
377 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
378 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
379 cpsw_ale_set_port_num(ale_entry, port);
380
381 idx = cpsw_ale_match_addr(priv, addr);
382 if (idx < 0)
383 idx = cpsw_ale_match_free(priv);
384 if (idx < 0)
385 idx = cpsw_ale_find_ageable(priv);
386 if (idx < 0)
387 return -ENOMEM;
388
389 cpsw_ale_write(priv, idx, ale_entry);
390 return 0;
391 }
392
393 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
394 {
395 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
396 int idx, mask;
397
398 idx = cpsw_ale_match_addr(priv, addr);
399 if (idx >= 0)
400 cpsw_ale_read(priv, idx, ale_entry);
401
402 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
403 cpsw_ale_set_addr(ale_entry, addr);
404 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
405
406 mask = cpsw_ale_get_port_mask(ale_entry);
407 port_mask |= mask;
408 cpsw_ale_set_port_mask(ale_entry, port_mask);
409
410 if (idx < 0)
411 idx = cpsw_ale_match_free(priv);
412 if (idx < 0)
413 idx = cpsw_ale_find_ageable(priv);
414 if (idx < 0)
415 return -ENOMEM;
416
417 cpsw_ale_write(priv, idx, ale_entry);
418 return 0;
419 }
420
421 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
422 {
423 u32 tmp, mask = BIT(bit);
424
425 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
426 tmp &= ~mask;
427 tmp |= val ? mask : 0;
428 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
429 }
430
431 #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
432 #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
433 #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
434
435 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
436 int val)
437 {
438 int offset = ALE_PORTCTL + 4 * port;
439 u32 tmp, mask = 0x3;
440
441 tmp = __raw_readl(priv->ale_regs + offset);
442 tmp &= ~mask;
443 tmp |= val & mask;
444 __raw_writel(tmp, priv->ale_regs + offset);
445 }
446
447 static struct cpsw_mdio_regs *mdio_regs;
448
449 /* wait until hardware is ready for another user access */
450 static inline u32 wait_for_user_access(void)
451 {
452 u32 reg = 0;
453 int timeout = MDIO_TIMEOUT;
454
455 while (timeout-- &&
456 ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
457 udelay(10);
458
459 if (timeout == -1) {
460 printf("wait_for_user_access Timeout\n");
461 return -ETIMEDOUT;
462 }
463 return reg;
464 }
465
466 /* wait until hardware state machine is idle */
467 static inline void wait_for_idle(void)
468 {
469 int timeout = MDIO_TIMEOUT;
470
471 while (timeout-- &&
472 ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
473 udelay(10);
474
475 if (timeout == -1)
476 printf("wait_for_idle Timeout\n");
477 }
478
479 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
480 int dev_addr, int phy_reg)
481 {
482 unsigned short data;
483 u32 reg;
484
485 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
486 return -EINVAL;
487
488 wait_for_user_access();
489 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
490 (phy_id << 16));
491 __raw_writel(reg, &mdio_regs->user[0].access);
492 reg = wait_for_user_access();
493
494 data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
495 return data;
496 }
497
498 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
499 int phy_reg, u16 data)
500 {
501 u32 reg;
502
503 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
504 return -EINVAL;
505
506 wait_for_user_access();
507 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
508 (phy_id << 16) | (data & USERACCESS_DATA));
509 __raw_writel(reg, &mdio_regs->user[0].access);
510 wait_for_user_access();
511
512 return 0;
513 }
514
515 static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
516 {
517 struct mii_dev *bus = mdio_alloc();
518
519 mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
520
521 /* set enable and clock divider */
522 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
523
524 /*
525 * wait for scan logic to settle:
526 * the scan time consists of (a) a large fixed component, and (b) a
527 * small component that varies with the mii bus frequency. These
528 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
529 * silicon. Since the effect of (b) was found to be largely
530 * negligible, we keep things simple here.
531 */
532 udelay(1000);
533
534 bus->read = cpsw_mdio_read;
535 bus->write = cpsw_mdio_write;
536 sprintf(bus->name, name);
537
538 mdio_register(bus);
539 }
540
541 /* Set a self-clearing bit in a register, and wait for it to clear */
542 static inline void setbit_and_wait_for_clear32(void *addr)
543 {
544 __raw_writel(CLEAR_BIT, addr);
545 while (__raw_readl(addr) & CLEAR_BIT)
546 ;
547 }
548
549 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
550 ((mac)[2] << 16) | ((mac)[3] << 24))
551 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
552
553 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
554 struct cpsw_priv *priv)
555 {
556 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
557 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
558 }
559
560 static void cpsw_slave_update_link(struct cpsw_slave *slave,
561 struct cpsw_priv *priv, int *link)
562 {
563 struct phy_device *phy = priv->phydev;
564 u32 mac_control = 0;
565
566 phy_startup(phy);
567 *link = phy->link;
568
569 if (*link) { /* link up */
570 mac_control = priv->data.mac_control;
571 if (phy->speed == 1000)
572 mac_control |= GIGABITEN;
573 if (phy->duplex == DUPLEX_FULL)
574 mac_control |= FULLDUPLEXEN;
575 if (phy->speed == 100)
576 mac_control |= MIIEN;
577 }
578
579 if (mac_control == slave->mac_control)
580 return;
581
582 if (mac_control) {
583 printf("link up on port %d, speed %d, %s duplex\n",
584 slave->slave_num, phy->speed,
585 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
586 } else {
587 printf("link down on port %d\n", slave->slave_num);
588 }
589
590 __raw_writel(mac_control, &slave->sliver->mac_control);
591 slave->mac_control = mac_control;
592 }
593
594 static int cpsw_update_link(struct cpsw_priv *priv)
595 {
596 int link = 0;
597 struct cpsw_slave *slave;
598
599 for_each_slave(slave, priv)
600 cpsw_slave_update_link(slave, priv, &link);
601
602 return link;
603 }
604
605 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
606 {
607 if (priv->host_port == 0)
608 return slave_num + 1;
609 else
610 return slave_num;
611 }
612
613 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
614 {
615 u32 slave_port;
616
617 setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
618
619 /* setup priority mapping */
620 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
621 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
622
623 /* setup max packet size, and mac address */
624 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
625 cpsw_set_slave_mac(slave, priv);
626
627 slave->mac_control = 0; /* no link yet */
628
629 /* enable forwarding */
630 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
631 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
632
633 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
634 }
635
636 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
637 {
638 struct cpdma_desc *desc = priv->desc_free;
639
640 if (desc)
641 priv->desc_free = desc_read_ptr(desc, hw_next);
642 return desc;
643 }
644
645 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
646 {
647 if (desc) {
648 desc_write(desc, hw_next, priv->desc_free);
649 priv->desc_free = desc;
650 }
651 }
652
653 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
654 void *buffer, int len)
655 {
656 struct cpdma_desc *desc, *prev;
657 u32 mode;
658
659 desc = cpdma_desc_alloc(priv);
660 if (!desc)
661 return -ENOMEM;
662
663 if (len < PKT_MIN)
664 len = PKT_MIN;
665
666 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
667
668 desc_write(desc, hw_next, 0);
669 desc_write(desc, hw_buffer, buffer);
670 desc_write(desc, hw_len, len);
671 desc_write(desc, hw_mode, mode | len);
672 desc_write(desc, sw_buffer, buffer);
673 desc_write(desc, sw_len, len);
674
675 if (!chan->head) {
676 /* simple case - first packet enqueued */
677 chan->head = desc;
678 chan->tail = desc;
679 chan_write(chan, hdp, desc);
680 goto done;
681 }
682
683 /* not the first packet - enqueue at the tail */
684 prev = chan->tail;
685 desc_write(prev, hw_next, desc);
686 chan->tail = desc;
687
688 /* next check if EOQ has been triggered already */
689 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
690 chan_write(chan, hdp, desc);
691
692 done:
693 if (chan->rxfree)
694 chan_write(chan, rxfree, 1);
695 return 0;
696 }
697
698 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
699 void **buffer, int *len)
700 {
701 struct cpdma_desc *desc = chan->head;
702 u32 status;
703
704 if (!desc)
705 return -ENOENT;
706
707 status = desc_read(desc, hw_mode);
708
709 if (len)
710 *len = status & 0x7ff;
711
712 if (buffer)
713 *buffer = desc_read_ptr(desc, sw_buffer);
714
715 if (status & CPDMA_DESC_OWNER) {
716 if (chan_read(chan, hdp) == 0) {
717 if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
718 chan_write(chan, hdp, desc);
719 }
720
721 return -EBUSY;
722 }
723
724 chan->head = desc_read_ptr(desc, hw_next);
725 chan_write(chan, cp, desc);
726
727 cpdma_desc_free(priv, desc);
728 return 0;
729 }
730
731 static int cpsw_init(struct eth_device *dev, bd_t *bis)
732 {
733 struct cpsw_priv *priv = dev->priv;
734 struct cpsw_slave *slave;
735 int i, ret;
736
737 /* soft reset the controller and initialize priv */
738 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
739
740 /* initialize and reset the address lookup engine */
741 cpsw_ale_enable(priv, 1);
742 cpsw_ale_clear(priv, 1);
743 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
744
745 /* setup host port priority mapping */
746 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
747 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
748
749 /* disable priority elevation and enable statistics on all ports */
750 __raw_writel(0, &priv->regs->ptype);
751
752 /* enable statistics collection only on the host port */
753 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
754
755 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
756
757 cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
758 ALE_SECURE);
759 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
760
761 for_each_slave(slave, priv)
762 cpsw_slave_init(slave, priv);
763
764 cpsw_update_link(priv);
765
766 /* init descriptor pool */
767 for (i = 0; i < NUM_DESCS; i++) {
768 desc_write(&priv->descs[i], hw_next,
769 (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
770 }
771 priv->desc_free = &priv->descs[0];
772
773 /* initialize channels */
774 if (priv->data.version == CPSW_CTRL_VERSION_2) {
775 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
776 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
777 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
778 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
779
780 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
781 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
782 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
783 } else {
784 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
785 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
786 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
787 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
788
789 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
790 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
791 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
792 }
793
794 /* clear dma state */
795 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
796
797 if (priv->data.version == CPSW_CTRL_VERSION_2) {
798 for (i = 0; i < priv->data.channels; i++) {
799 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
800 * i);
801 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
802 * i);
803 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
804 * i);
805 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
806 * i);
807 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
808 * i);
809 }
810 } else {
811 for (i = 0; i < priv->data.channels; i++) {
812 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
813 * i);
814 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
815 * i);
816 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
817 * i);
818 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
819 * i);
820 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
821 * i);
822
823 }
824 }
825
826 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
827 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
828
829 /* submit rx descs */
830 for (i = 0; i < PKTBUFSRX; i++) {
831 ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
832 PKTSIZE);
833 if (ret < 0) {
834 printf("error %d submitting rx desc\n", ret);
835 break;
836 }
837 }
838
839 return 0;
840 }
841
842 static void cpsw_halt(struct eth_device *dev)
843 {
844 struct cpsw_priv *priv = dev->priv;
845
846 writel(0, priv->dma_regs + CPDMA_TXCONTROL);
847 writel(0, priv->dma_regs + CPDMA_RXCONTROL);
848
849 /* soft reset the controller and initialize priv */
850 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
851
852 /* clear dma state */
853 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
854
855 priv->data.control(0);
856 }
857
858 static int cpsw_send(struct eth_device *dev, void *packet, int length)
859 {
860 struct cpsw_priv *priv = dev->priv;
861 void *buffer;
862 int len;
863 int timeout = CPDMA_TIMEOUT;
864
865 if (!cpsw_update_link(priv))
866 return -EIO;
867
868 flush_dcache_range((unsigned long)packet,
869 (unsigned long)packet + length);
870
871 /* first reap completed packets */
872 while (timeout-- &&
873 (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
874 ;
875
876 if (timeout == -1) {
877 printf("cpdma_process timeout\n");
878 return -ETIMEDOUT;
879 }
880
881 return cpdma_submit(priv, &priv->tx_chan, packet, length);
882 }
883
884 static int cpsw_recv(struct eth_device *dev)
885 {
886 struct cpsw_priv *priv = dev->priv;
887 void *buffer;
888 int len;
889
890 cpsw_update_link(priv);
891
892 while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
893 invalidate_dcache_range((unsigned long)buffer,
894 (unsigned long)buffer + PKTSIZE_ALIGN);
895 NetReceive(buffer, len);
896 cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
897 }
898
899 return 0;
900 }
901
902 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
903 struct cpsw_priv *priv)
904 {
905 void *regs = priv->regs;
906 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
907 slave->slave_num = slave_num;
908 slave->data = data;
909 slave->regs = regs + data->slave_reg_ofs;
910 slave->sliver = regs + data->sliver_reg_ofs;
911 }
912
913 static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
914 {
915 struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
916 struct phy_device *phydev;
917 u32 supported = (SUPPORTED_10baseT_Half |
918 SUPPORTED_10baseT_Full |
919 SUPPORTED_100baseT_Half |
920 SUPPORTED_100baseT_Full |
921 SUPPORTED_1000baseT_Full);
922
923 phydev = phy_connect(priv->bus,
924 CONFIG_PHY_ADDR,
925 dev,
926 slave->data->phy_if);
927
928 phydev->supported &= supported;
929 phydev->advertising = phydev->supported;
930
931 priv->phydev = phydev;
932 phy_config(phydev);
933
934 return 1;
935 }
936
937 int cpsw_register(struct cpsw_platform_data *data)
938 {
939 struct cpsw_priv *priv;
940 struct cpsw_slave *slave;
941 void *regs = (void *)data->cpsw_base;
942 struct eth_device *dev;
943
944 dev = calloc(sizeof(*dev), 1);
945 if (!dev)
946 return -ENOMEM;
947
948 priv = calloc(sizeof(*priv), 1);
949 if (!priv) {
950 free(dev);
951 return -ENOMEM;
952 }
953
954 priv->data = *data;
955 priv->dev = dev;
956
957 priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
958 if (!priv->slaves) {
959 free(dev);
960 free(priv);
961 return -ENOMEM;
962 }
963
964 priv->descs = (void *)CPDMA_RAM_ADDR;
965 priv->host_port = data->host_port_num;
966 priv->regs = regs;
967 priv->host_port_regs = regs + data->host_port_reg_ofs;
968 priv->dma_regs = regs + data->cpdma_reg_ofs;
969 priv->ale_regs = regs + data->ale_reg_ofs;
970
971 int idx = 0;
972
973 for_each_slave(slave, priv) {
974 cpsw_slave_setup(slave, idx, priv);
975 idx = idx + 1;
976 }
977
978 strcpy(dev->name, "cpsw");
979 dev->iobase = 0;
980 dev->init = cpsw_init;
981 dev->halt = cpsw_halt;
982 dev->send = cpsw_send;
983 dev->recv = cpsw_recv;
984 dev->priv = priv;
985
986 eth_register(dev);
987
988 cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
989 priv->bus = miiphy_get_dev_by_name(dev->name);
990 for_each_slave(slave, priv)
991 cpsw_phy_init(dev, slave);
992
993 return 1;
994 }