]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/cpsw.c
Merge branch 'u-boot-imx/master' into 'u-boot-arm/master'
[people/ms/u-boot.git] / drivers / net / cpsw.c
1 /*
2 * CPSW Ethernet Switch Driver
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <common.h>
17 #include <command.h>
18 #include <net.h>
19 #include <miiphy.h>
20 #include <malloc.h>
21 #include <net.h>
22 #include <netdev.h>
23 #include <cpsw.h>
24 #include <asm/errno.h>
25 #include <asm/io.h>
26 #include <phy.h>
27 #include <asm/arch/cpu.h>
28
29 #define BITMASK(bits) (BIT(bits) - 1)
30 #define PHY_REG_MASK 0x1f
31 #define PHY_ID_MASK 0x1f
32 #define NUM_DESCS (PKTBUFSRX * 2)
33 #define PKT_MIN 60
34 #define PKT_MAX (1500 + 14 + 4 + 4)
35 #define CLEAR_BIT 1
36 #define GIGABITEN BIT(7)
37 #define FULLDUPLEXEN BIT(0)
38 #define MIIEN BIT(15)
39
40 /* DMA Registers */
41 #define CPDMA_TXCONTROL 0x004
42 #define CPDMA_RXCONTROL 0x014
43 #define CPDMA_SOFTRESET 0x01c
44 #define CPDMA_RXFREE 0x0e0
45 #define CPDMA_TXHDP_VER1 0x100
46 #define CPDMA_TXHDP_VER2 0x200
47 #define CPDMA_RXHDP_VER1 0x120
48 #define CPDMA_RXHDP_VER2 0x220
49 #define CPDMA_TXCP_VER1 0x140
50 #define CPDMA_TXCP_VER2 0x240
51 #define CPDMA_RXCP_VER1 0x160
52 #define CPDMA_RXCP_VER2 0x260
53
54 #define CPDMA_RAM_ADDR 0x4a102000
55
56 /* Descriptor mode bits */
57 #define CPDMA_DESC_SOP BIT(31)
58 #define CPDMA_DESC_EOP BIT(30)
59 #define CPDMA_DESC_OWNER BIT(29)
60 #define CPDMA_DESC_EOQ BIT(28)
61
62 /*
63 * This timeout definition is a worst-case ultra defensive measure against
64 * unexpected controller lock ups. Ideally, we should never ever hit this
65 * scenario in practice.
66 */
67 #define MDIO_TIMEOUT 100 /* msecs */
68 #define CPDMA_TIMEOUT 100 /* msecs */
69
70 struct cpsw_mdio_regs {
71 u32 version;
72 u32 control;
73 #define CONTROL_IDLE BIT(31)
74 #define CONTROL_ENABLE BIT(30)
75
76 u32 alive;
77 u32 link;
78 u32 linkintraw;
79 u32 linkintmasked;
80 u32 __reserved_0[2];
81 u32 userintraw;
82 u32 userintmasked;
83 u32 userintmaskset;
84 u32 userintmaskclr;
85 u32 __reserved_1[20];
86
87 struct {
88 u32 access;
89 u32 physel;
90 #define USERACCESS_GO BIT(31)
91 #define USERACCESS_WRITE BIT(30)
92 #define USERACCESS_ACK BIT(29)
93 #define USERACCESS_READ (0)
94 #define USERACCESS_DATA (0xffff)
95 } user[0];
96 };
97
98 struct cpsw_regs {
99 u32 id_ver;
100 u32 control;
101 u32 soft_reset;
102 u32 stat_port_en;
103 u32 ptype;
104 };
105
106 struct cpsw_slave_regs {
107 u32 max_blks;
108 u32 blk_cnt;
109 u32 flow_thresh;
110 u32 port_vlan;
111 u32 tx_pri_map;
112 u32 gap_thresh;
113 u32 sa_lo;
114 u32 sa_hi;
115 };
116
117 struct cpsw_host_regs {
118 u32 max_blks;
119 u32 blk_cnt;
120 u32 flow_thresh;
121 u32 port_vlan;
122 u32 tx_pri_map;
123 u32 cpdma_tx_pri_map;
124 u32 cpdma_rx_chan_map;
125 };
126
127 struct cpsw_sliver_regs {
128 u32 id_ver;
129 u32 mac_control;
130 u32 mac_status;
131 u32 soft_reset;
132 u32 rx_maxlen;
133 u32 __reserved_0;
134 u32 rx_pause;
135 u32 tx_pause;
136 u32 __reserved_1;
137 u32 rx_pri_map;
138 };
139
140 #define ALE_ENTRY_BITS 68
141 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
142
143 /* ALE Registers */
144 #define ALE_CONTROL 0x08
145 #define ALE_UNKNOWNVLAN 0x18
146 #define ALE_TABLE_CONTROL 0x20
147 #define ALE_TABLE 0x34
148 #define ALE_PORTCTL 0x40
149
150 #define ALE_TABLE_WRITE BIT(31)
151
152 #define ALE_TYPE_FREE 0
153 #define ALE_TYPE_ADDR 1
154 #define ALE_TYPE_VLAN 2
155 #define ALE_TYPE_VLAN_ADDR 3
156
157 #define ALE_UCAST_PERSISTANT 0
158 #define ALE_UCAST_UNTOUCHED 1
159 #define ALE_UCAST_OUI 2
160 #define ALE_UCAST_TOUCHED 3
161
162 #define ALE_MCAST_FWD 0
163 #define ALE_MCAST_BLOCK_LEARN_FWD 1
164 #define ALE_MCAST_FWD_LEARN 2
165 #define ALE_MCAST_FWD_2 3
166
167 enum cpsw_ale_port_state {
168 ALE_PORT_STATE_DISABLE = 0x00,
169 ALE_PORT_STATE_BLOCK = 0x01,
170 ALE_PORT_STATE_LEARN = 0x02,
171 ALE_PORT_STATE_FORWARD = 0x03,
172 };
173
174 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
175 #define ALE_SECURE 1
176 #define ALE_BLOCKED 2
177
178 struct cpsw_slave {
179 struct cpsw_slave_regs *regs;
180 struct cpsw_sliver_regs *sliver;
181 int slave_num;
182 u32 mac_control;
183 struct cpsw_slave_data *data;
184 };
185
186 struct cpdma_desc {
187 /* hardware fields */
188 u32 hw_next;
189 u32 hw_buffer;
190 u32 hw_len;
191 u32 hw_mode;
192 /* software fields */
193 u32 sw_buffer;
194 u32 sw_len;
195 };
196
197 struct cpdma_chan {
198 struct cpdma_desc *head, *tail;
199 void *hdp, *cp, *rxfree;
200 };
201
202 #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
203 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
204 #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
205
206 #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
207 #define chan_read(chan, fld) __raw_readl((chan)->fld)
208 #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
209
210 #define for_each_slave(slave, priv) \
211 for (slave = (priv)->slaves; slave != (priv)->slaves + \
212 (priv)->data.slaves; slave++)
213
214 struct cpsw_priv {
215 struct eth_device *dev;
216 struct cpsw_platform_data data;
217 int host_port;
218
219 struct cpsw_regs *regs;
220 void *dma_regs;
221 struct cpsw_host_regs *host_port_regs;
222 void *ale_regs;
223
224 struct cpdma_desc *descs;
225 struct cpdma_desc *desc_free;
226 struct cpdma_chan rx_chan, tx_chan;
227
228 struct cpsw_slave *slaves;
229 struct phy_device *phydev;
230 struct mii_dev *bus;
231
232 u32 mdio_link;
233 u32 phy_mask;
234 };
235
236 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
237 {
238 int idx;
239
240 idx = start / 32;
241 start -= idx * 32;
242 idx = 2 - idx; /* flip */
243 return (ale_entry[idx] >> start) & BITMASK(bits);
244 }
245
246 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
247 u32 value)
248 {
249 int idx;
250
251 value &= BITMASK(bits);
252 idx = start / 32;
253 start -= idx * 32;
254 idx = 2 - idx; /* flip */
255 ale_entry[idx] &= ~(BITMASK(bits) << start);
256 ale_entry[idx] |= (value << start);
257 }
258
259 #define DEFINE_ALE_FIELD(name, start, bits) \
260 static inline int cpsw_ale_get_##name(u32 *ale_entry) \
261 { \
262 return cpsw_ale_get_field(ale_entry, start, bits); \
263 } \
264 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
265 { \
266 cpsw_ale_set_field(ale_entry, start, bits, value); \
267 }
268
269 DEFINE_ALE_FIELD(entry_type, 60, 2)
270 DEFINE_ALE_FIELD(mcast_state, 62, 2)
271 DEFINE_ALE_FIELD(port_mask, 66, 3)
272 DEFINE_ALE_FIELD(ucast_type, 62, 2)
273 DEFINE_ALE_FIELD(port_num, 66, 2)
274 DEFINE_ALE_FIELD(blocked, 65, 1)
275 DEFINE_ALE_FIELD(secure, 64, 1)
276 DEFINE_ALE_FIELD(mcast, 40, 1)
277
278 /* The MAC address field in the ALE entry cannot be macroized as above */
279 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
280 {
281 int i;
282
283 for (i = 0; i < 6; i++)
284 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
285 }
286
287 static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
288 {
289 int i;
290
291 for (i = 0; i < 6; i++)
292 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
293 }
294
295 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
296 {
297 int i;
298
299 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
300
301 for (i = 0; i < ALE_ENTRY_WORDS; i++)
302 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
303
304 return idx;
305 }
306
307 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
308 {
309 int i;
310
311 for (i = 0; i < ALE_ENTRY_WORDS; i++)
312 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
313
314 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
315
316 return idx;
317 }
318
319 static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
320 {
321 u32 ale_entry[ALE_ENTRY_WORDS];
322 int type, idx;
323
324 for (idx = 0; idx < priv->data.ale_entries; idx++) {
325 u8 entry_addr[6];
326
327 cpsw_ale_read(priv, idx, ale_entry);
328 type = cpsw_ale_get_entry_type(ale_entry);
329 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
330 continue;
331 cpsw_ale_get_addr(ale_entry, entry_addr);
332 if (memcmp(entry_addr, addr, 6) == 0)
333 return idx;
334 }
335 return -ENOENT;
336 }
337
338 static int cpsw_ale_match_free(struct cpsw_priv *priv)
339 {
340 u32 ale_entry[ALE_ENTRY_WORDS];
341 int type, idx;
342
343 for (idx = 0; idx < priv->data.ale_entries; idx++) {
344 cpsw_ale_read(priv, idx, ale_entry);
345 type = cpsw_ale_get_entry_type(ale_entry);
346 if (type == ALE_TYPE_FREE)
347 return idx;
348 }
349 return -ENOENT;
350 }
351
352 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
353 {
354 u32 ale_entry[ALE_ENTRY_WORDS];
355 int type, idx;
356
357 for (idx = 0; idx < priv->data.ale_entries; idx++) {
358 cpsw_ale_read(priv, idx, ale_entry);
359 type = cpsw_ale_get_entry_type(ale_entry);
360 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
361 continue;
362 if (cpsw_ale_get_mcast(ale_entry))
363 continue;
364 type = cpsw_ale_get_ucast_type(ale_entry);
365 if (type != ALE_UCAST_PERSISTANT &&
366 type != ALE_UCAST_OUI)
367 return idx;
368 }
369 return -ENOENT;
370 }
371
372 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
373 int port, int flags)
374 {
375 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
376 int idx;
377
378 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
379 cpsw_ale_set_addr(ale_entry, addr);
380 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
381 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
382 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
383 cpsw_ale_set_port_num(ale_entry, port);
384
385 idx = cpsw_ale_match_addr(priv, addr);
386 if (idx < 0)
387 idx = cpsw_ale_match_free(priv);
388 if (idx < 0)
389 idx = cpsw_ale_find_ageable(priv);
390 if (idx < 0)
391 return -ENOMEM;
392
393 cpsw_ale_write(priv, idx, ale_entry);
394 return 0;
395 }
396
397 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
398 {
399 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
400 int idx, mask;
401
402 idx = cpsw_ale_match_addr(priv, addr);
403 if (idx >= 0)
404 cpsw_ale_read(priv, idx, ale_entry);
405
406 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
407 cpsw_ale_set_addr(ale_entry, addr);
408 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
409
410 mask = cpsw_ale_get_port_mask(ale_entry);
411 port_mask |= mask;
412 cpsw_ale_set_port_mask(ale_entry, port_mask);
413
414 if (idx < 0)
415 idx = cpsw_ale_match_free(priv);
416 if (idx < 0)
417 idx = cpsw_ale_find_ageable(priv);
418 if (idx < 0)
419 return -ENOMEM;
420
421 cpsw_ale_write(priv, idx, ale_entry);
422 return 0;
423 }
424
425 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
426 {
427 u32 tmp, mask = BIT(bit);
428
429 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
430 tmp &= ~mask;
431 tmp |= val ? mask : 0;
432 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
433 }
434
435 #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
436 #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
437 #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
438
439 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
440 int val)
441 {
442 int offset = ALE_PORTCTL + 4 * port;
443 u32 tmp, mask = 0x3;
444
445 tmp = __raw_readl(priv->ale_regs + offset);
446 tmp &= ~mask;
447 tmp |= val & mask;
448 __raw_writel(tmp, priv->ale_regs + offset);
449 }
450
451 static struct cpsw_mdio_regs *mdio_regs;
452
453 /* wait until hardware is ready for another user access */
454 static inline u32 wait_for_user_access(void)
455 {
456 u32 reg = 0;
457 int timeout = MDIO_TIMEOUT;
458
459 while (timeout-- &&
460 ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
461 udelay(10);
462
463 if (timeout == -1) {
464 printf("wait_for_user_access Timeout\n");
465 return -ETIMEDOUT;
466 }
467 return reg;
468 }
469
470 /* wait until hardware state machine is idle */
471 static inline void wait_for_idle(void)
472 {
473 int timeout = MDIO_TIMEOUT;
474
475 while (timeout-- &&
476 ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
477 udelay(10);
478
479 if (timeout == -1)
480 printf("wait_for_idle Timeout\n");
481 }
482
483 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
484 int dev_addr, int phy_reg)
485 {
486 unsigned short data;
487 u32 reg;
488
489 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
490 return -EINVAL;
491
492 wait_for_user_access();
493 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
494 (phy_id << 16));
495 __raw_writel(reg, &mdio_regs->user[0].access);
496 reg = wait_for_user_access();
497
498 data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
499 return data;
500 }
501
502 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
503 int phy_reg, u16 data)
504 {
505 u32 reg;
506
507 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
508 return -EINVAL;
509
510 wait_for_user_access();
511 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
512 (phy_id << 16) | (data & USERACCESS_DATA));
513 __raw_writel(reg, &mdio_regs->user[0].access);
514 wait_for_user_access();
515
516 return 0;
517 }
518
519 static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
520 {
521 struct mii_dev *bus = mdio_alloc();
522
523 mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
524
525 /* set enable and clock divider */
526 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
527
528 /*
529 * wait for scan logic to settle:
530 * the scan time consists of (a) a large fixed component, and (b) a
531 * small component that varies with the mii bus frequency. These
532 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
533 * silicon. Since the effect of (b) was found to be largely
534 * negligible, we keep things simple here.
535 */
536 udelay(1000);
537
538 bus->read = cpsw_mdio_read;
539 bus->write = cpsw_mdio_write;
540 sprintf(bus->name, name);
541
542 mdio_register(bus);
543 }
544
545 /* Set a self-clearing bit in a register, and wait for it to clear */
546 static inline void setbit_and_wait_for_clear32(void *addr)
547 {
548 __raw_writel(CLEAR_BIT, addr);
549 while (__raw_readl(addr) & CLEAR_BIT)
550 ;
551 }
552
553 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
554 ((mac)[2] << 16) | ((mac)[3] << 24))
555 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
556
557 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
558 struct cpsw_priv *priv)
559 {
560 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
561 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
562 }
563
564 static void cpsw_slave_update_link(struct cpsw_slave *slave,
565 struct cpsw_priv *priv, int *link)
566 {
567 struct phy_device *phy = priv->phydev;
568 u32 mac_control = 0;
569
570 phy_startup(phy);
571 *link = phy->link;
572
573 if (*link) { /* link up */
574 mac_control = priv->data.mac_control;
575 if (phy->speed == 1000)
576 mac_control |= GIGABITEN;
577 if (phy->duplex == DUPLEX_FULL)
578 mac_control |= FULLDUPLEXEN;
579 if (phy->speed == 100)
580 mac_control |= MIIEN;
581 }
582
583 if (mac_control == slave->mac_control)
584 return;
585
586 if (mac_control) {
587 printf("link up on port %d, speed %d, %s duplex\n",
588 slave->slave_num, phy->speed,
589 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
590 } else {
591 printf("link down on port %d\n", slave->slave_num);
592 }
593
594 __raw_writel(mac_control, &slave->sliver->mac_control);
595 slave->mac_control = mac_control;
596 }
597
598 static int cpsw_update_link(struct cpsw_priv *priv)
599 {
600 int link = 0;
601 struct cpsw_slave *slave;
602
603 for_each_slave(slave, priv)
604 cpsw_slave_update_link(slave, priv, &link);
605 priv->mdio_link = readl(&mdio_regs->link);
606 return link;
607 }
608
609 static int cpsw_check_link(struct cpsw_priv *priv)
610 {
611 u32 link = 0;
612
613 link = __raw_readl(&mdio_regs->link) & priv->phy_mask;
614 if ((link) && (link == priv->mdio_link))
615 return 1;
616
617 return cpsw_update_link(priv);
618 }
619
620 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
621 {
622 if (priv->host_port == 0)
623 return slave_num + 1;
624 else
625 return slave_num;
626 }
627
628 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
629 {
630 u32 slave_port;
631
632 setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
633
634 /* setup priority mapping */
635 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
636 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
637
638 /* setup max packet size, and mac address */
639 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
640 cpsw_set_slave_mac(slave, priv);
641
642 slave->mac_control = 0; /* no link yet */
643
644 /* enable forwarding */
645 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
646 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
647
648 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
649
650 priv->phy_mask |= 1 << slave->data->phy_id;
651 }
652
653 static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
654 {
655 struct cpdma_desc *desc = priv->desc_free;
656
657 if (desc)
658 priv->desc_free = desc_read_ptr(desc, hw_next);
659 return desc;
660 }
661
662 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
663 {
664 if (desc) {
665 desc_write(desc, hw_next, priv->desc_free);
666 priv->desc_free = desc;
667 }
668 }
669
670 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
671 void *buffer, int len)
672 {
673 struct cpdma_desc *desc, *prev;
674 u32 mode;
675
676 desc = cpdma_desc_alloc(priv);
677 if (!desc)
678 return -ENOMEM;
679
680 if (len < PKT_MIN)
681 len = PKT_MIN;
682
683 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
684
685 desc_write(desc, hw_next, 0);
686 desc_write(desc, hw_buffer, buffer);
687 desc_write(desc, hw_len, len);
688 desc_write(desc, hw_mode, mode | len);
689 desc_write(desc, sw_buffer, buffer);
690 desc_write(desc, sw_len, len);
691
692 if (!chan->head) {
693 /* simple case - first packet enqueued */
694 chan->head = desc;
695 chan->tail = desc;
696 chan_write(chan, hdp, desc);
697 goto done;
698 }
699
700 /* not the first packet - enqueue at the tail */
701 prev = chan->tail;
702 desc_write(prev, hw_next, desc);
703 chan->tail = desc;
704
705 /* next check if EOQ has been triggered already */
706 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
707 chan_write(chan, hdp, desc);
708
709 done:
710 if (chan->rxfree)
711 chan_write(chan, rxfree, 1);
712 return 0;
713 }
714
715 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
716 void **buffer, int *len)
717 {
718 struct cpdma_desc *desc = chan->head;
719 u32 status;
720
721 if (!desc)
722 return -ENOENT;
723
724 status = desc_read(desc, hw_mode);
725
726 if (len)
727 *len = status & 0x7ff;
728
729 if (buffer)
730 *buffer = desc_read_ptr(desc, sw_buffer);
731
732 if (status & CPDMA_DESC_OWNER) {
733 if (chan_read(chan, hdp) == 0) {
734 if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
735 chan_write(chan, hdp, desc);
736 }
737
738 return -EBUSY;
739 }
740
741 chan->head = desc_read_ptr(desc, hw_next);
742 chan_write(chan, cp, desc);
743
744 cpdma_desc_free(priv, desc);
745 return 0;
746 }
747
748 static int cpsw_init(struct eth_device *dev, bd_t *bis)
749 {
750 struct cpsw_priv *priv = dev->priv;
751 struct cpsw_slave *slave;
752 int i, ret;
753
754 /* soft reset the controller and initialize priv */
755 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
756
757 /* initialize and reset the address lookup engine */
758 cpsw_ale_enable(priv, 1);
759 cpsw_ale_clear(priv, 1);
760 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
761
762 /* setup host port priority mapping */
763 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
764 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
765
766 /* disable priority elevation and enable statistics on all ports */
767 __raw_writel(0, &priv->regs->ptype);
768
769 /* enable statistics collection only on the host port */
770 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
771
772 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
773
774 cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
775 ALE_SECURE);
776 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
777
778 for_each_slave(slave, priv)
779 cpsw_slave_init(slave, priv);
780
781 cpsw_update_link(priv);
782
783 /* init descriptor pool */
784 for (i = 0; i < NUM_DESCS; i++) {
785 desc_write(&priv->descs[i], hw_next,
786 (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
787 }
788 priv->desc_free = &priv->descs[0];
789
790 /* initialize channels */
791 if (priv->data.version == CPSW_CTRL_VERSION_2) {
792 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
793 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
794 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
795 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
796
797 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
798 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
799 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
800 } else {
801 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
802 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
803 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
804 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
805
806 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
807 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
808 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
809 }
810
811 /* clear dma state */
812 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
813
814 if (priv->data.version == CPSW_CTRL_VERSION_2) {
815 for (i = 0; i < priv->data.channels; i++) {
816 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
817 * i);
818 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
819 * i);
820 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
821 * i);
822 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
823 * i);
824 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
825 * i);
826 }
827 } else {
828 for (i = 0; i < priv->data.channels; i++) {
829 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
830 * i);
831 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
832 * i);
833 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
834 * i);
835 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
836 * i);
837 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
838 * i);
839
840 }
841 }
842
843 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
844 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
845
846 /* submit rx descs */
847 for (i = 0; i < PKTBUFSRX; i++) {
848 ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
849 PKTSIZE);
850 if (ret < 0) {
851 printf("error %d submitting rx desc\n", ret);
852 break;
853 }
854 }
855
856 return 0;
857 }
858
859 static void cpsw_halt(struct eth_device *dev)
860 {
861 struct cpsw_priv *priv = dev->priv;
862
863 writel(0, priv->dma_regs + CPDMA_TXCONTROL);
864 writel(0, priv->dma_regs + CPDMA_RXCONTROL);
865
866 /* soft reset the controller and initialize priv */
867 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
868
869 /* clear dma state */
870 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
871
872 priv->data.control(0);
873 }
874
875 static int cpsw_send(struct eth_device *dev, void *packet, int length)
876 {
877 struct cpsw_priv *priv = dev->priv;
878 void *buffer;
879 int len;
880 int timeout = CPDMA_TIMEOUT;
881
882 if (!cpsw_check_link(priv))
883 return -EIO;
884
885 flush_dcache_range((unsigned long)packet,
886 (unsigned long)packet + length);
887
888 /* first reap completed packets */
889 while (timeout-- &&
890 (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
891 ;
892
893 if (timeout == -1) {
894 printf("cpdma_process timeout\n");
895 return -ETIMEDOUT;
896 }
897
898 return cpdma_submit(priv, &priv->tx_chan, packet, length);
899 }
900
901 static int cpsw_recv(struct eth_device *dev)
902 {
903 struct cpsw_priv *priv = dev->priv;
904 void *buffer;
905 int len;
906
907 cpsw_update_link(priv);
908
909 while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
910 invalidate_dcache_range((unsigned long)buffer,
911 (unsigned long)buffer + PKTSIZE_ALIGN);
912 NetReceive(buffer, len);
913 cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
914 }
915
916 return 0;
917 }
918
919 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
920 struct cpsw_priv *priv)
921 {
922 void *regs = priv->regs;
923 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
924 slave->slave_num = slave_num;
925 slave->data = data;
926 slave->regs = regs + data->slave_reg_ofs;
927 slave->sliver = regs + data->sliver_reg_ofs;
928 }
929
930 static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
931 {
932 struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
933 struct phy_device *phydev;
934 u32 supported = (SUPPORTED_10baseT_Half |
935 SUPPORTED_10baseT_Full |
936 SUPPORTED_100baseT_Half |
937 SUPPORTED_100baseT_Full |
938 SUPPORTED_1000baseT_Full);
939
940 phydev = phy_connect(priv->bus,
941 CONFIG_PHY_ADDR,
942 dev,
943 slave->data->phy_if);
944
945 phydev->supported &= supported;
946 phydev->advertising = phydev->supported;
947
948 priv->phydev = phydev;
949 phy_config(phydev);
950
951 return 1;
952 }
953
954 int cpsw_register(struct cpsw_platform_data *data)
955 {
956 struct cpsw_priv *priv;
957 struct cpsw_slave *slave;
958 void *regs = (void *)data->cpsw_base;
959 struct eth_device *dev;
960
961 dev = calloc(sizeof(*dev), 1);
962 if (!dev)
963 return -ENOMEM;
964
965 priv = calloc(sizeof(*priv), 1);
966 if (!priv) {
967 free(dev);
968 return -ENOMEM;
969 }
970
971 priv->data = *data;
972 priv->dev = dev;
973
974 priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
975 if (!priv->slaves) {
976 free(dev);
977 free(priv);
978 return -ENOMEM;
979 }
980
981 priv->descs = (void *)CPDMA_RAM_ADDR;
982 priv->host_port = data->host_port_num;
983 priv->regs = regs;
984 priv->host_port_regs = regs + data->host_port_reg_ofs;
985 priv->dma_regs = regs + data->cpdma_reg_ofs;
986 priv->ale_regs = regs + data->ale_reg_ofs;
987
988 int idx = 0;
989
990 for_each_slave(slave, priv) {
991 cpsw_slave_setup(slave, idx, priv);
992 idx = idx + 1;
993 }
994
995 strcpy(dev->name, "cpsw");
996 dev->iobase = 0;
997 dev->init = cpsw_init;
998 dev->halt = cpsw_halt;
999 dev->send = cpsw_send;
1000 dev->recv = cpsw_recv;
1001 dev->priv = priv;
1002
1003 eth_register(dev);
1004
1005 cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
1006 priv->bus = miiphy_get_dev_by_name(dev->name);
1007 for_each_slave(slave, priv)
1008 cpsw_phy_init(dev, slave);
1009
1010 return 1;
1011 }