]>
Commit | Line | Data |
---|---|---|
7737d5c6 DL |
1 | /* |
2 | * Copyright (C) 2006 Freescale Semiconductor, Inc. | |
3 | * | |
4 | * Dave Liu <daveliu@freescale.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License as | |
8 | * published by the Free Software Foundation; either version 2 of | |
9 | * the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | |
19 | * MA 02111-1307 USA | |
20 | */ | |
21 | ||
22 | #include "common.h" | |
23 | #include "net.h" | |
24 | #include "malloc.h" | |
25 | #include "asm/errno.h" | |
26 | #include "asm/io.h" | |
27 | #include "asm/immap_qe.h" | |
28 | #include "qe.h" | |
29 | #include "uccf.h" | |
30 | #include "uec.h" | |
31 | #include "uec_phy.h" | |
32 | ||
33 | #if defined(CONFIG_QE) | |
34 | ||
35 | #ifdef CONFIG_UEC_ETH1 | |
36 | static uec_info_t eth1_uec_info = { | |
37 | .uf_info = { | |
38 | .ucc_num = CFG_UEC1_UCC_NUM, | |
39 | .rx_clock = CFG_UEC1_RX_CLK, | |
40 | .tx_clock = CFG_UEC1_TX_CLK, | |
41 | .eth_type = CFG_UEC1_ETH_TYPE, | |
42 | }, | |
43 | .num_threads_tx = UEC_NUM_OF_THREADS_4, | |
44 | .num_threads_rx = UEC_NUM_OF_THREADS_4, | |
45 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | |
46 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | |
47 | .tx_bd_ring_len = 16, | |
48 | .rx_bd_ring_len = 16, | |
49 | .phy_address = CFG_UEC1_PHY_ADDR, | |
50 | .enet_interface = CFG_UEC1_INTERFACE_MODE, | |
51 | }; | |
52 | #endif | |
53 | #ifdef CONFIG_UEC_ETH2 | |
54 | static uec_info_t eth2_uec_info = { | |
55 | .uf_info = { | |
56 | .ucc_num = CFG_UEC2_UCC_NUM, | |
57 | .rx_clock = CFG_UEC2_RX_CLK, | |
58 | .tx_clock = CFG_UEC2_TX_CLK, | |
59 | .eth_type = CFG_UEC2_ETH_TYPE, | |
60 | }, | |
61 | .num_threads_tx = UEC_NUM_OF_THREADS_4, | |
62 | .num_threads_rx = UEC_NUM_OF_THREADS_4, | |
63 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | |
64 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | |
65 | .tx_bd_ring_len = 16, | |
66 | .rx_bd_ring_len = 16, | |
67 | .phy_address = CFG_UEC2_PHY_ADDR, | |
68 | .enet_interface = CFG_UEC2_INTERFACE_MODE, | |
69 | }; | |
70 | #endif | |
71 | ||
72 | static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode) | |
73 | { | |
74 | uec_t *uec_regs; | |
75 | u32 maccfg1; | |
76 | ||
77 | if (!uec) { | |
78 | printf("%s: uec not initial\n", __FUNCTION__); | |
79 | return -EINVAL; | |
80 | } | |
81 | uec_regs = uec->uec_regs; | |
82 | ||
83 | maccfg1 = in_be32(&uec_regs->maccfg1); | |
84 | ||
85 | if (mode & COMM_DIR_TX) { | |
86 | maccfg1 |= MACCFG1_ENABLE_TX; | |
87 | out_be32(&uec_regs->maccfg1, maccfg1); | |
88 | uec->mac_tx_enabled = 1; | |
89 | } | |
90 | ||
91 | if (mode & COMM_DIR_RX) { | |
92 | maccfg1 |= MACCFG1_ENABLE_RX; | |
93 | out_be32(&uec_regs->maccfg1, maccfg1); | |
94 | uec->mac_rx_enabled = 1; | |
95 | } | |
96 | ||
97 | return 0; | |
98 | } | |
99 | ||
100 | static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode) | |
101 | { | |
102 | uec_t *uec_regs; | |
103 | u32 maccfg1; | |
104 | ||
105 | if (!uec) { | |
106 | printf("%s: uec not initial\n", __FUNCTION__); | |
107 | return -EINVAL; | |
108 | } | |
109 | uec_regs = uec->uec_regs; | |
110 | ||
111 | maccfg1 = in_be32(&uec_regs->maccfg1); | |
112 | ||
113 | if (mode & COMM_DIR_TX) { | |
114 | maccfg1 &= ~MACCFG1_ENABLE_TX; | |
115 | out_be32(&uec_regs->maccfg1, maccfg1); | |
116 | uec->mac_tx_enabled = 0; | |
117 | } | |
118 | ||
119 | if (mode & COMM_DIR_RX) { | |
120 | maccfg1 &= ~MACCFG1_ENABLE_RX; | |
121 | out_be32(&uec_regs->maccfg1, maccfg1); | |
122 | uec->mac_rx_enabled = 0; | |
123 | } | |
124 | ||
125 | return 0; | |
126 | } | |
127 | ||
128 | static int uec_graceful_stop_tx(uec_private_t *uec) | |
129 | { | |
130 | ucc_fast_t *uf_regs; | |
131 | u32 cecr_subblock; | |
132 | u32 ucce; | |
133 | ||
134 | if (!uec || !uec->uccf) { | |
135 | printf("%s: No handle passed.\n", __FUNCTION__); | |
136 | return -EINVAL; | |
137 | } | |
138 | ||
139 | uf_regs = uec->uccf->uf_regs; | |
140 | ||
141 | /* Clear the grace stop event */ | |
142 | out_be32(&uf_regs->ucce, UCCE_GRA); | |
143 | ||
144 | /* Issue host command */ | |
145 | cecr_subblock = | |
146 | ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); | |
147 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, | |
148 | (u8)QE_CR_PROTOCOL_ETHERNET, 0); | |
149 | ||
150 | /* Wait for command to complete */ | |
151 | do { | |
152 | ucce = in_be32(&uf_regs->ucce); | |
153 | } while (! (ucce & UCCE_GRA)); | |
154 | ||
155 | uec->grace_stopped_tx = 1; | |
156 | ||
157 | return 0; | |
158 | } | |
159 | ||
160 | static int uec_graceful_stop_rx(uec_private_t *uec) | |
161 | { | |
162 | u32 cecr_subblock; | |
163 | u8 ack; | |
164 | ||
165 | if (!uec) { | |
166 | printf("%s: No handle passed.\n", __FUNCTION__); | |
167 | return -EINVAL; | |
168 | } | |
169 | ||
170 | if (!uec->p_rx_glbl_pram) { | |
171 | printf("%s: No init rx global parameter\n", __FUNCTION__); | |
172 | return -EINVAL; | |
173 | } | |
174 | ||
175 | /* Clear acknowledge bit */ | |
176 | ack = uec->p_rx_glbl_pram->rxgstpack; | |
177 | ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; | |
178 | uec->p_rx_glbl_pram->rxgstpack = ack; | |
179 | ||
180 | /* Keep issuing cmd and checking ack bit until it is asserted */ | |
181 | do { | |
182 | /* Issue host command */ | |
183 | cecr_subblock = | |
184 | ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); | |
185 | qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, | |
186 | (u8)QE_CR_PROTOCOL_ETHERNET, 0); | |
187 | ack = uec->p_rx_glbl_pram->rxgstpack; | |
188 | } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX )); | |
189 | ||
190 | uec->grace_stopped_rx = 1; | |
191 | ||
192 | return 0; | |
193 | } | |
194 | ||
195 | static int uec_restart_tx(uec_private_t *uec) | |
196 | { | |
197 | u32 cecr_subblock; | |
198 | ||
199 | if (!uec || !uec->uec_info) { | |
200 | printf("%s: No handle passed.\n", __FUNCTION__); | |
201 | return -EINVAL; | |
202 | } | |
203 | ||
204 | cecr_subblock = | |
205 | ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); | |
206 | qe_issue_cmd(QE_RESTART_TX, cecr_subblock, | |
207 | (u8)QE_CR_PROTOCOL_ETHERNET, 0); | |
208 | ||
209 | uec->grace_stopped_tx = 0; | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
214 | static int uec_restart_rx(uec_private_t *uec) | |
215 | { | |
216 | u32 cecr_subblock; | |
217 | ||
218 | if (!uec || !uec->uec_info) { | |
219 | printf("%s: No handle passed.\n", __FUNCTION__); | |
220 | return -EINVAL; | |
221 | } | |
222 | ||
223 | cecr_subblock = | |
224 | ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num); | |
225 | qe_issue_cmd(QE_RESTART_RX, cecr_subblock, | |
226 | (u8)QE_CR_PROTOCOL_ETHERNET, 0); | |
227 | ||
228 | uec->grace_stopped_rx = 0; | |
229 | ||
230 | return 0; | |
231 | } | |
232 | ||
233 | static int uec_open(uec_private_t *uec, comm_dir_e mode) | |
234 | { | |
235 | ucc_fast_private_t *uccf; | |
236 | ||
237 | if (!uec || !uec->uccf) { | |
238 | printf("%s: No handle passed.\n", __FUNCTION__); | |
239 | return -EINVAL; | |
240 | } | |
241 | uccf = uec->uccf; | |
242 | ||
243 | /* check if the UCC number is in range. */ | |
244 | if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
245 | printf("%s: ucc_num out of range.\n", __FUNCTION__); | |
246 | return -EINVAL; | |
247 | } | |
248 | ||
249 | /* Enable MAC */ | |
250 | uec_mac_enable(uec, mode); | |
251 | ||
252 | /* Enable UCC fast */ | |
253 | ucc_fast_enable(uccf, mode); | |
254 | ||
255 | /* RISC microcode start */ | |
256 | if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) { | |
257 | uec_restart_tx(uec); | |
258 | } | |
259 | if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) { | |
260 | uec_restart_rx(uec); | |
261 | } | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
266 | static int uec_stop(uec_private_t *uec, comm_dir_e mode) | |
267 | { | |
268 | ucc_fast_private_t *uccf; | |
269 | ||
270 | if (!uec || !uec->uccf) { | |
271 | printf("%s: No handle passed.\n", __FUNCTION__); | |
272 | return -EINVAL; | |
273 | } | |
274 | uccf = uec->uccf; | |
275 | ||
276 | /* check if the UCC number is in range. */ | |
277 | if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
278 | printf("%s: ucc_num out of range.\n", __FUNCTION__); | |
279 | return -EINVAL; | |
280 | } | |
281 | /* Stop any transmissions */ | |
282 | if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) { | |
283 | uec_graceful_stop_tx(uec); | |
284 | } | |
285 | /* Stop any receptions */ | |
286 | if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) { | |
287 | uec_graceful_stop_rx(uec); | |
288 | } | |
289 | ||
290 | /* Disable the UCC fast */ | |
291 | ucc_fast_disable(uec->uccf, mode); | |
292 | ||
293 | /* Disable the MAC */ | |
294 | uec_mac_disable(uec, mode); | |
295 | ||
296 | return 0; | |
297 | } | |
298 | ||
299 | static int uec_set_mac_duplex(uec_private_t *uec, int duplex) | |
300 | { | |
301 | uec_t *uec_regs; | |
302 | u32 maccfg2; | |
303 | ||
304 | if (!uec) { | |
305 | printf("%s: uec not initial\n", __FUNCTION__); | |
306 | return -EINVAL; | |
307 | } | |
308 | uec_regs = uec->uec_regs; | |
309 | ||
310 | if (duplex == DUPLEX_HALF) { | |
311 | maccfg2 = in_be32(&uec_regs->maccfg2); | |
312 | maccfg2 &= ~MACCFG2_FDX; | |
313 | out_be32(&uec_regs->maccfg2, maccfg2); | |
314 | } | |
315 | ||
316 | if (duplex == DUPLEX_FULL) { | |
317 | maccfg2 = in_be32(&uec_regs->maccfg2); | |
318 | maccfg2 |= MACCFG2_FDX; | |
319 | out_be32(&uec_regs->maccfg2, maccfg2); | |
320 | } | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | static int uec_set_mac_if_mode(uec_private_t *uec, enet_interface_e if_mode) | |
326 | { | |
327 | enet_interface_e enet_if_mode; | |
328 | uec_info_t *uec_info; | |
329 | uec_t *uec_regs; | |
330 | u32 upsmr; | |
331 | u32 maccfg2; | |
332 | ||
333 | if (!uec) { | |
334 | printf("%s: uec not initial\n", __FUNCTION__); | |
335 | return -EINVAL; | |
336 | } | |
337 | ||
338 | uec_info = uec->uec_info; | |
339 | uec_regs = uec->uec_regs; | |
340 | enet_if_mode = if_mode; | |
341 | ||
342 | maccfg2 = in_be32(&uec_regs->maccfg2); | |
343 | maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; | |
344 | ||
345 | upsmr = in_be32(&uec->uccf->uf_regs->upsmr); | |
346 | upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM); | |
347 | ||
348 | switch (enet_if_mode) { | |
349 | case ENET_100_MII: | |
350 | case ENET_10_MII: | |
351 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; | |
352 | break; | |
353 | case ENET_1000_GMII: | |
354 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; | |
355 | break; | |
356 | case ENET_1000_TBI: | |
357 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; | |
358 | upsmr |= UPSMR_TBIM; | |
359 | break; | |
360 | case ENET_1000_RTBI: | |
361 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; | |
362 | upsmr |= (UPSMR_RPM | UPSMR_TBIM); | |
363 | break; | |
364 | case ENET_1000_RGMII: | |
365 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; | |
366 | upsmr |= UPSMR_RPM; | |
367 | break; | |
368 | case ENET_100_RGMII: | |
369 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; | |
370 | upsmr |= UPSMR_RPM; | |
371 | break; | |
372 | case ENET_10_RGMII: | |
373 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; | |
374 | upsmr |= (UPSMR_RPM | UPSMR_R10M); | |
375 | break; | |
376 | case ENET_100_RMII: | |
377 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; | |
378 | upsmr |= UPSMR_RMM; | |
379 | break; | |
380 | case ENET_10_RMII: | |
381 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; | |
382 | upsmr |= (UPSMR_R10M | UPSMR_RMM); | |
383 | break; | |
384 | default: | |
385 | return -EINVAL; | |
386 | break; | |
387 | } | |
388 | out_be32(&uec_regs->maccfg2, maccfg2); | |
389 | out_be32(&uec->uccf->uf_regs->upsmr, upsmr); | |
390 | ||
391 | return 0; | |
392 | } | |
393 | ||
394 | static int init_mii_management_configuration(uec_t *uec_regs) | |
395 | { | |
396 | uint timeout = 0x1000; | |
397 | u32 miimcfg = 0; | |
398 | ||
399 | miimcfg = in_be32(&uec_regs->miimcfg); | |
400 | miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE; | |
401 | out_be32(&uec_regs->miimcfg, miimcfg); | |
402 | ||
403 | /* Wait until the bus is free */ | |
404 | while ((in_be32(&uec_regs->miimcfg) & MIIMIND_BUSY) && timeout--); | |
405 | if (timeout <= 0) { | |
406 | printf("%s: The MII Bus is stuck!", __FUNCTION__); | |
407 | return -ETIMEDOUT; | |
408 | } | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
413 | static int init_phy(struct eth_device *dev) | |
414 | { | |
415 | uec_private_t *uec; | |
416 | uec_t *uec_regs; | |
417 | struct uec_mii_info *mii_info; | |
418 | struct phy_info *curphy; | |
419 | int err; | |
420 | ||
421 | uec = (uec_private_t *)dev->priv; | |
422 | uec_regs = uec->uec_regs; | |
423 | ||
424 | uec->oldlink = 0; | |
425 | uec->oldspeed = 0; | |
426 | uec->oldduplex = -1; | |
427 | ||
428 | mii_info = malloc(sizeof(*mii_info)); | |
429 | if (!mii_info) { | |
430 | printf("%s: Could not allocate mii_info", dev->name); | |
431 | return -ENOMEM; | |
432 | } | |
433 | memset(mii_info, 0, sizeof(*mii_info)); | |
434 | ||
435 | mii_info->speed = SPEED_1000; | |
436 | mii_info->duplex = DUPLEX_FULL; | |
437 | mii_info->pause = 0; | |
438 | mii_info->link = 1; | |
439 | ||
440 | mii_info->advertising = (ADVERTISED_10baseT_Half | | |
441 | ADVERTISED_10baseT_Full | | |
442 | ADVERTISED_100baseT_Half | | |
443 | ADVERTISED_100baseT_Full | | |
444 | ADVERTISED_1000baseT_Full); | |
445 | mii_info->autoneg = 1; | |
446 | mii_info->mii_id = uec->uec_info->phy_address; | |
447 | mii_info->dev = dev; | |
448 | ||
449 | mii_info->mdio_read = &read_phy_reg; | |
450 | mii_info->mdio_write = &write_phy_reg; | |
451 | ||
452 | uec->mii_info = mii_info; | |
453 | ||
454 | if (init_mii_management_configuration(uec_regs)) { | |
455 | printf("%s: The MII Bus is stuck!", dev->name); | |
456 | err = -1; | |
457 | goto bus_fail; | |
458 | } | |
459 | ||
460 | /* get info for this PHY */ | |
461 | curphy = get_phy_info(uec->mii_info); | |
462 | if (!curphy) { | |
463 | printf("%s: No PHY found", dev->name); | |
464 | err = -1; | |
465 | goto no_phy; | |
466 | } | |
467 | ||
468 | mii_info->phyinfo = curphy; | |
469 | ||
470 | /* Run the commands which initialize the PHY */ | |
471 | if (curphy->init) { | |
472 | err = curphy->init(uec->mii_info); | |
473 | if (err) | |
474 | goto phy_init_fail; | |
475 | } | |
476 | ||
477 | return 0; | |
478 | ||
479 | phy_init_fail: | |
480 | no_phy: | |
481 | bus_fail: | |
482 | free(mii_info); | |
483 | return err; | |
484 | } | |
485 | ||
486 | static void adjust_link(struct eth_device *dev) | |
487 | { | |
488 | uec_private_t *uec = (uec_private_t *)dev->priv; | |
489 | uec_t *uec_regs; | |
490 | struct uec_mii_info *mii_info = uec->mii_info; | |
491 | ||
492 | extern void change_phy_interface_mode(struct eth_device *dev, | |
493 | enet_interface_e mode); | |
494 | uec_regs = uec->uec_regs; | |
495 | ||
496 | if (mii_info->link) { | |
497 | /* Now we make sure that we can be in full duplex mode. | |
498 | * If not, we operate in half-duplex mode. */ | |
499 | if (mii_info->duplex != uec->oldduplex) { | |
500 | if (!(mii_info->duplex)) { | |
501 | uec_set_mac_duplex(uec, DUPLEX_HALF); | |
502 | printf("%s: Half Duplex\n", dev->name); | |
503 | } else { | |
504 | uec_set_mac_duplex(uec, DUPLEX_FULL); | |
505 | printf("%s: Full Duplex\n", dev->name); | |
506 | } | |
507 | uec->oldduplex = mii_info->duplex; | |
508 | } | |
509 | ||
510 | if (mii_info->speed != uec->oldspeed) { | |
511 | switch (mii_info->speed) { | |
512 | case 1000: | |
513 | break; | |
514 | case 100: | |
515 | printf ("switching to rgmii 100\n"); | |
516 | /* change phy to rgmii 100 */ | |
517 | change_phy_interface_mode(dev, | |
518 | ENET_100_RGMII); | |
519 | /* change the MAC interface mode */ | |
520 | uec_set_mac_if_mode(uec,ENET_100_RGMII); | |
521 | break; | |
522 | case 10: | |
523 | printf ("switching to rgmii 10\n"); | |
524 | /* change phy to rgmii 10 */ | |
525 | change_phy_interface_mode(dev, | |
526 | ENET_10_RGMII); | |
527 | /* change the MAC interface mode */ | |
528 | uec_set_mac_if_mode(uec,ENET_10_RGMII); | |
529 | break; | |
530 | default: | |
531 | printf("%s: Ack,Speed(%d)is illegal\n", | |
532 | dev->name, mii_info->speed); | |
533 | break; | |
534 | } | |
535 | ||
536 | printf("%s: Speed %dBT\n", dev->name, mii_info->speed); | |
537 | uec->oldspeed = mii_info->speed; | |
538 | } | |
539 | ||
540 | if (!uec->oldlink) { | |
541 | printf("%s: Link is up\n", dev->name); | |
542 | uec->oldlink = 1; | |
543 | } | |
544 | ||
545 | } else { /* if (mii_info->link) */ | |
546 | if (uec->oldlink) { | |
547 | printf("%s: Link is down\n", dev->name); | |
548 | uec->oldlink = 0; | |
549 | uec->oldspeed = 0; | |
550 | uec->oldduplex = -1; | |
551 | } | |
552 | } | |
553 | } | |
554 | ||
555 | static void phy_change(struct eth_device *dev) | |
556 | { | |
557 | uec_private_t *uec = (uec_private_t *)dev->priv; | |
558 | uec_t *uec_regs; | |
559 | int result = 0; | |
560 | ||
561 | uec_regs = uec->uec_regs; | |
562 | ||
563 | /* Delay 5s to give the PHY a chance to change the register state */ | |
564 | udelay(5000000); | |
565 | ||
566 | /* Update the link, speed, duplex */ | |
567 | result = uec->mii_info->phyinfo->read_status(uec->mii_info); | |
568 | ||
569 | /* Adjust the interface according to speed */ | |
570 | if ((0 == result) || (uec->mii_info->link == 0)) { | |
571 | adjust_link(dev); | |
572 | } | |
573 | } | |
574 | ||
575 | static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr) | |
576 | { | |
577 | uec_t *uec_regs; | |
578 | u32 mac_addr1; | |
579 | u32 mac_addr2; | |
580 | ||
581 | if (!uec) { | |
582 | printf("%s: uec not initial\n", __FUNCTION__); | |
583 | return -EINVAL; | |
584 | } | |
585 | ||
586 | uec_regs = uec->uec_regs; | |
587 | ||
588 | /* if a station address of 0x12345678ABCD, perform a write to | |
589 | MACSTNADDR1 of 0xCDAB7856, | |
590 | MACSTNADDR2 of 0x34120000 */ | |
591 | ||
592 | mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \ | |
593 | (mac_addr[3] << 8) | (mac_addr[2]); | |
594 | out_be32(&uec_regs->macstnaddr1, mac_addr1); | |
595 | ||
596 | mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000; | |
597 | out_be32(&uec_regs->macstnaddr2, mac_addr2); | |
598 | ||
599 | return 0; | |
600 | } | |
601 | ||
602 | static int uec_convert_threads_num(uec_num_of_threads_e threads_num, | |
603 | int *threads_num_ret) | |
604 | { | |
605 | int num_threads_numerica; | |
606 | ||
607 | switch (threads_num) { | |
608 | case UEC_NUM_OF_THREADS_1: | |
609 | num_threads_numerica = 1; | |
610 | break; | |
611 | case UEC_NUM_OF_THREADS_2: | |
612 | num_threads_numerica = 2; | |
613 | break; | |
614 | case UEC_NUM_OF_THREADS_4: | |
615 | num_threads_numerica = 4; | |
616 | break; | |
617 | case UEC_NUM_OF_THREADS_6: | |
618 | num_threads_numerica = 6; | |
619 | break; | |
620 | case UEC_NUM_OF_THREADS_8: | |
621 | num_threads_numerica = 8; | |
622 | break; | |
623 | default: | |
624 | printf("%s: Bad number of threads value.", | |
625 | __FUNCTION__); | |
626 | return -EINVAL; | |
627 | } | |
628 | ||
629 | *threads_num_ret = num_threads_numerica; | |
630 | ||
631 | return 0; | |
632 | } | |
633 | ||
634 | static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx) | |
635 | { | |
636 | uec_info_t *uec_info; | |
637 | u32 end_bd; | |
638 | u8 bmrx = 0; | |
639 | int i; | |
640 | ||
641 | uec_info = uec->uec_info; | |
642 | ||
643 | /* Alloc global Tx parameter RAM page */ | |
644 | uec->tx_glbl_pram_offset = qe_muram_alloc( | |
645 | sizeof(uec_tx_global_pram_t), | |
646 | UEC_TX_GLOBAL_PRAM_ALIGNMENT); | |
647 | uec->p_tx_glbl_pram = (uec_tx_global_pram_t *) | |
648 | qe_muram_addr(uec->tx_glbl_pram_offset); | |
649 | ||
650 | /* Zero the global Tx prameter RAM */ | |
651 | memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t)); | |
652 | ||
653 | /* Init global Tx parameter RAM */ | |
654 | ||
655 | /* TEMODER, RMON statistics disable, one Tx queue */ | |
656 | out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE); | |
657 | ||
658 | /* SQPTR */ | |
659 | uec->send_q_mem_reg_offset = qe_muram_alloc( | |
660 | sizeof(uec_send_queue_qd_t), | |
661 | UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); | |
662 | uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *) | |
663 | qe_muram_addr(uec->send_q_mem_reg_offset); | |
664 | out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset); | |
665 | ||
666 | /* Setup the table with TxBDs ring */ | |
667 | end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1) | |
668 | * SIZEOFBD; | |
669 | out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base, | |
670 | (u32)(uec->p_tx_bd_ring)); | |
671 | out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address, | |
672 | end_bd); | |
673 | ||
674 | /* Scheduler Base Pointer, we have only one Tx queue, no need it */ | |
675 | out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0); | |
676 | ||
677 | /* TxRMON Base Pointer, TxRMON disable, we don't need it */ | |
678 | out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0); | |
679 | ||
680 | /* TSTATE, global snooping, big endian, the CSB bus selected */ | |
681 | bmrx = BMR_INIT_VALUE; | |
682 | out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT)); | |
683 | ||
684 | /* IPH_Offset */ | |
685 | for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) { | |
686 | out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0); | |
687 | } | |
688 | ||
689 | /* VTAG table */ | |
690 | for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) { | |
691 | out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0); | |
692 | } | |
693 | ||
694 | /* TQPTR */ | |
695 | uec->thread_dat_tx_offset = qe_muram_alloc( | |
696 | num_threads_tx * sizeof(uec_thread_data_tx_t) + | |
697 | 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT); | |
698 | ||
699 | uec->p_thread_data_tx = (uec_thread_data_tx_t *) | |
700 | qe_muram_addr(uec->thread_dat_tx_offset); | |
701 | out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset); | |
702 | } | |
703 | ||
704 | static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx) | |
705 | { | |
706 | u8 bmrx = 0; | |
707 | int i; | |
708 | uec_82xx_address_filtering_pram_t *p_af_pram; | |
709 | ||
710 | /* Allocate global Rx parameter RAM page */ | |
711 | uec->rx_glbl_pram_offset = qe_muram_alloc( | |
712 | sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT); | |
713 | uec->p_rx_glbl_pram = (uec_rx_global_pram_t *) | |
714 | qe_muram_addr(uec->rx_glbl_pram_offset); | |
715 | ||
716 | /* Zero Global Rx parameter RAM */ | |
717 | memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t)); | |
718 | ||
719 | /* Init global Rx parameter RAM */ | |
720 | /* REMODER, Extended feature mode disable, VLAN disable, | |
721 | LossLess flow control disable, Receive firmware statisic disable, | |
722 | Extended address parsing mode disable, One Rx queues, | |
723 | Dynamic maximum/minimum frame length disable, IP checksum check | |
724 | disable, IP address alignment disable | |
725 | */ | |
726 | out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE); | |
727 | ||
728 | /* RQPTR */ | |
729 | uec->thread_dat_rx_offset = qe_muram_alloc( | |
730 | num_threads_rx * sizeof(uec_thread_data_rx_t), | |
731 | UEC_THREAD_DATA_ALIGNMENT); | |
732 | uec->p_thread_data_rx = (uec_thread_data_rx_t *) | |
733 | qe_muram_addr(uec->thread_dat_rx_offset); | |
734 | out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset); | |
735 | ||
736 | /* Type_or_Len */ | |
737 | out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072); | |
738 | ||
739 | /* RxRMON base pointer, we don't need it */ | |
740 | out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0); | |
741 | ||
742 | /* IntCoalescingPTR, we don't need it, no interrupt */ | |
743 | out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0); | |
744 | ||
745 | /* RSTATE, global snooping, big endian, the CSB bus selected */ | |
746 | bmrx = BMR_INIT_VALUE; | |
747 | out_8(&uec->p_rx_glbl_pram->rstate, bmrx); | |
748 | ||
749 | /* MRBLR */ | |
750 | out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN); | |
751 | ||
752 | /* RBDQPTR */ | |
753 | uec->rx_bd_qs_tbl_offset = qe_muram_alloc( | |
754 | sizeof(uec_rx_bd_queues_entry_t) + \ | |
755 | sizeof(uec_rx_prefetched_bds_t), | |
756 | UEC_RX_BD_QUEUES_ALIGNMENT); | |
757 | uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *) | |
758 | qe_muram_addr(uec->rx_bd_qs_tbl_offset); | |
759 | ||
760 | /* Zero it */ | |
761 | memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \ | |
762 | sizeof(uec_rx_prefetched_bds_t)); | |
763 | out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset); | |
764 | out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr, | |
765 | (u32)uec->p_rx_bd_ring); | |
766 | ||
767 | /* MFLR */ | |
768 | out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN); | |
769 | /* MINFLR */ | |
770 | out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN); | |
771 | /* MAXD1 */ | |
772 | out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN); | |
773 | /* MAXD2 */ | |
774 | out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN); | |
775 | /* ECAM_PTR */ | |
776 | out_be32(&uec->p_rx_glbl_pram->ecamptr, 0); | |
777 | /* L2QT */ | |
778 | out_be32(&uec->p_rx_glbl_pram->l2qt, 0); | |
779 | /* L3QT */ | |
780 | for (i = 0; i < 8; i++) { | |
781 | out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0); | |
782 | } | |
783 | ||
784 | /* VLAN_TYPE */ | |
785 | out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100); | |
786 | /* TCI */ | |
787 | out_be16(&uec->p_rx_glbl_pram->vlantci, 0); | |
788 | ||
789 | /* Clear PQ2 style address filtering hash table */ | |
790 | p_af_pram = (uec_82xx_address_filtering_pram_t *) \ | |
791 | uec->p_rx_glbl_pram->addressfiltering; | |
792 | ||
793 | p_af_pram->iaddr_h = 0; | |
794 | p_af_pram->iaddr_l = 0; | |
795 | p_af_pram->gaddr_h = 0; | |
796 | p_af_pram->gaddr_l = 0; | |
797 | } | |
798 | ||
799 | static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec, | |
800 | int thread_tx, int thread_rx) | |
801 | { | |
802 | uec_init_cmd_pram_t *p_init_enet_param; | |
803 | u32 init_enet_param_offset; | |
804 | uec_info_t *uec_info; | |
805 | int i; | |
806 | int snum; | |
807 | u32 init_enet_offset; | |
808 | u32 entry_val; | |
809 | u32 command; | |
810 | u32 cecr_subblock; | |
811 | ||
812 | uec_info = uec->uec_info; | |
813 | ||
814 | /* Allocate init enet command parameter */ | |
815 | uec->init_enet_param_offset = qe_muram_alloc( | |
816 | sizeof(uec_init_cmd_pram_t), 4); | |
817 | init_enet_param_offset = uec->init_enet_param_offset; | |
818 | uec->p_init_enet_param = (uec_init_cmd_pram_t *) | |
819 | qe_muram_addr(uec->init_enet_param_offset); | |
820 | ||
821 | /* Zero init enet command struct */ | |
822 | memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t)); | |
823 | ||
824 | /* Init the command struct */ | |
825 | p_init_enet_param = uec->p_init_enet_param; | |
826 | p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0; | |
827 | p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1; | |
828 | p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2; | |
829 | p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3; | |
830 | p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4; | |
831 | p_init_enet_param->largestexternallookupkeysize = 0; | |
832 | ||
833 | p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx) | |
834 | << ENET_INIT_PARAM_RGF_SHIFT; | |
835 | p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx) | |
836 | << ENET_INIT_PARAM_TGF_SHIFT; | |
837 | ||
838 | /* Init Rx global parameter pointer */ | |
839 | p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset | | |
840 | (u32)uec_info->riscRx; | |
841 | ||
842 | /* Init Rx threads */ | |
843 | for (i = 0; i < (thread_rx + 1); i++) { | |
844 | if ((snum = qe_get_snum()) < 0) { | |
845 | printf("%s can not get snum\n", __FUNCTION__); | |
846 | return -ENOMEM; | |
847 | } | |
848 | ||
849 | if (i==0) { | |
850 | init_enet_offset = 0; | |
851 | } else { | |
852 | init_enet_offset = qe_muram_alloc( | |
853 | sizeof(uec_thread_rx_pram_t), | |
854 | UEC_THREAD_RX_PRAM_ALIGNMENT); | |
855 | } | |
856 | ||
857 | entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | | |
858 | init_enet_offset | (u32)uec_info->riscRx; | |
859 | p_init_enet_param->rxthread[i] = entry_val; | |
860 | } | |
861 | ||
862 | /* Init Tx global parameter pointer */ | |
863 | p_init_enet_param->txglobal = uec->tx_glbl_pram_offset | | |
864 | (u32)uec_info->riscTx; | |
865 | ||
866 | /* Init Tx threads */ | |
867 | for (i = 0; i < thread_tx; i++) { | |
868 | if ((snum = qe_get_snum()) < 0) { | |
869 | printf("%s can not get snum\n", __FUNCTION__); | |
870 | return -ENOMEM; | |
871 | } | |
872 | ||
873 | init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t), | |
874 | UEC_THREAD_TX_PRAM_ALIGNMENT); | |
875 | ||
876 | entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) | | |
877 | init_enet_offset | (u32)uec_info->riscTx; | |
878 | p_init_enet_param->txthread[i] = entry_val; | |
879 | } | |
880 | ||
881 | __asm__ __volatile__("sync"); | |
882 | ||
883 | /* Issue QE command */ | |
884 | command = QE_INIT_TX_RX; | |
885 | cecr_subblock = ucc_fast_get_qe_cr_subblock( | |
886 | uec->uec_info->uf_info.ucc_num); | |
887 | qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, | |
888 | init_enet_param_offset); | |
889 | ||
890 | return 0; | |
891 | } | |
892 | ||
893 | static int uec_startup(uec_private_t *uec) | |
894 | { | |
895 | uec_info_t *uec_info; | |
896 | ucc_fast_info_t *uf_info; | |
897 | ucc_fast_private_t *uccf; | |
898 | ucc_fast_t *uf_regs; | |
899 | uec_t *uec_regs; | |
900 | int num_threads_tx; | |
901 | int num_threads_rx; | |
902 | u32 utbipar; | |
903 | enet_interface_e enet_interface; | |
904 | u32 length; | |
905 | u32 align; | |
906 | qe_bd_t *bd; | |
907 | u8 *buf; | |
908 | int i; | |
909 | ||
910 | if (!uec || !uec->uec_info) { | |
911 | printf("%s: uec or uec_info not initial\n", __FUNCTION__); | |
912 | return -EINVAL; | |
913 | } | |
914 | ||
915 | uec_info = uec->uec_info; | |
916 | uf_info = &(uec_info->uf_info); | |
917 | ||
918 | /* Check if Rx BD ring len is illegal */ | |
919 | if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \ | |
920 | (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) { | |
921 | printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n", | |
922 | __FUNCTION__); | |
923 | return -EINVAL; | |
924 | } | |
925 | ||
926 | /* Check if Tx BD ring len is illegal */ | |
927 | if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) { | |
928 | printf("%s: Tx BD ring length must not be smaller than 2.\n", | |
929 | __FUNCTION__); | |
930 | return -EINVAL; | |
931 | } | |
932 | ||
933 | /* Check if MRBLR is illegal */ | |
934 | if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) { | |
935 | printf("%s: max rx buffer length must be mutliple of 128.\n", | |
936 | __FUNCTION__); | |
937 | return -EINVAL; | |
938 | } | |
939 | ||
940 | /* Both Rx and Tx are stopped */ | |
941 | uec->grace_stopped_rx = 1; | |
942 | uec->grace_stopped_tx = 1; | |
943 | ||
944 | /* Init UCC fast */ | |
945 | if (ucc_fast_init(uf_info, &uccf)) { | |
946 | printf("%s: failed to init ucc fast\n", __FUNCTION__); | |
947 | return -ENOMEM; | |
948 | } | |
949 | ||
950 | /* Save uccf */ | |
951 | uec->uccf = uccf; | |
952 | ||
953 | /* Convert the Tx threads number */ | |
954 | if (uec_convert_threads_num(uec_info->num_threads_tx, | |
955 | &num_threads_tx)) { | |
956 | return -EINVAL; | |
957 | } | |
958 | ||
959 | /* Convert the Rx threads number */ | |
960 | if (uec_convert_threads_num(uec_info->num_threads_rx, | |
961 | &num_threads_rx)) { | |
962 | return -EINVAL; | |
963 | } | |
964 | ||
965 | uf_regs = uccf->uf_regs; | |
966 | ||
967 | /* UEC register is following UCC fast registers */ | |
968 | uec_regs = (uec_t *)(&uf_regs->ucc_eth); | |
969 | ||
970 | /* Save the UEC register pointer to UEC private struct */ | |
971 | uec->uec_regs = uec_regs; | |
972 | ||
973 | /* Init UPSMR, enable hardware statistics (UCC) */ | |
974 | out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE); | |
975 | ||
976 | /* Init MACCFG1, flow control disable, disable Tx and Rx */ | |
977 | out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE); | |
978 | ||
979 | /* Init MACCFG2, length check, MAC PAD and CRC enable */ | |
980 | out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE); | |
981 | ||
982 | /* Setup MAC interface mode */ | |
983 | uec_set_mac_if_mode(uec, uec_info->enet_interface); | |
984 | ||
985 | /* Setup MII master clock source */ | |
986 | qe_set_mii_clk_src(uec_info->uf_info.ucc_num); | |
987 | ||
988 | /* Setup UTBIPAR */ | |
989 | utbipar = in_be32(&uec_regs->utbipar); | |
990 | utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; | |
991 | enet_interface = uec->uec_info->enet_interface; | |
992 | if (enet_interface == ENET_1000_TBI || | |
993 | enet_interface == ENET_1000_RTBI) { | |
994 | utbipar |= (uec_info->phy_address + uec_info->uf_info.ucc_num) | |
995 | << UTBIPAR_PHY_ADDRESS_SHIFT; | |
996 | } else { | |
997 | utbipar |= (0x10 + uec_info->uf_info.ucc_num) | |
998 | << UTBIPAR_PHY_ADDRESS_SHIFT; | |
999 | } | |
1000 | ||
1001 | out_be32(&uec_regs->utbipar, utbipar); | |
1002 | ||
1003 | /* Allocate Tx BDs */ | |
1004 | length = ((uec_info->tx_bd_ring_len * SIZEOFBD) / | |
1005 | UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) * | |
1006 | UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
1007 | if ((uec_info->tx_bd_ring_len * SIZEOFBD) % | |
1008 | UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) { | |
1009 | length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
1010 | } | |
1011 | ||
1012 | align = UEC_TX_BD_RING_ALIGNMENT; | |
1013 | uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align)); | |
1014 | if (uec->tx_bd_ring_offset != 0) { | |
1015 | uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align) | |
1016 | & ~(align - 1)); | |
1017 | } | |
1018 | ||
1019 | /* Zero all of Tx BDs */ | |
1020 | memset((void *)(uec->tx_bd_ring_offset), 0, length + align); | |
1021 | ||
1022 | /* Allocate Rx BDs */ | |
1023 | length = uec_info->rx_bd_ring_len * SIZEOFBD; | |
1024 | align = UEC_RX_BD_RING_ALIGNMENT; | |
1025 | uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align))); | |
1026 | if (uec->rx_bd_ring_offset != 0) { | |
1027 | uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align) | |
1028 | & ~(align - 1)); | |
1029 | } | |
1030 | ||
1031 | /* Zero all of Rx BDs */ | |
1032 | memset((void *)(uec->rx_bd_ring_offset), 0, length + align); | |
1033 | ||
1034 | /* Allocate Rx buffer */ | |
1035 | length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN; | |
1036 | align = UEC_RX_DATA_BUF_ALIGNMENT; | |
1037 | uec->rx_buf_offset = (u32)malloc(length + align); | |
1038 | if (uec->rx_buf_offset != 0) { | |
1039 | uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align) | |
1040 | & ~(align - 1)); | |
1041 | } | |
1042 | ||
1043 | /* Zero all of the Rx buffer */ | |
1044 | memset((void *)(uec->rx_buf_offset), 0, length + align); | |
1045 | ||
1046 | /* Init TxBD ring */ | |
1047 | bd = (qe_bd_t *)uec->p_tx_bd_ring; | |
1048 | uec->txBd = bd; | |
1049 | ||
1050 | for (i = 0; i < uec_info->tx_bd_ring_len; i++) { | |
1051 | BD_DATA_CLEAR(bd); | |
1052 | BD_STATUS_SET(bd, 0); | |
1053 | BD_LENGTH_SET(bd, 0); | |
1054 | bd ++; | |
1055 | } | |
1056 | BD_STATUS_SET((--bd), TxBD_WRAP); | |
1057 | ||
1058 | /* Init RxBD ring */ | |
1059 | bd = (qe_bd_t *)uec->p_rx_bd_ring; | |
1060 | uec->rxBd = bd; | |
1061 | buf = uec->p_rx_buf; | |
1062 | for (i = 0; i < uec_info->rx_bd_ring_len; i++) { | |
1063 | BD_DATA_SET(bd, buf); | |
1064 | BD_LENGTH_SET(bd, 0); | |
1065 | BD_STATUS_SET(bd, RxBD_EMPTY); | |
1066 | buf += MAX_RXBUF_LEN; | |
1067 | bd ++; | |
1068 | } | |
1069 | BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY); | |
1070 | ||
1071 | /* Init global Tx parameter RAM */ | |
1072 | uec_init_tx_parameter(uec, num_threads_tx); | |
1073 | ||
1074 | /* Init global Rx parameter RAM */ | |
1075 | uec_init_rx_parameter(uec, num_threads_rx); | |
1076 | ||
1077 | /* Init ethernet Tx and Rx parameter command */ | |
1078 | if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx, | |
1079 | num_threads_rx)) { | |
1080 | printf("%s issue init enet cmd failed\n", __FUNCTION__); | |
1081 | return -ENOMEM; | |
1082 | } | |
1083 | ||
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | static int uec_init(struct eth_device* dev, bd_t *bd) | |
1088 | { | |
1089 | uec_private_t *uec; | |
1090 | int err; | |
1091 | ||
1092 | uec = (uec_private_t *)dev->priv; | |
1093 | ||
1094 | if (uec->the_first_run == 0) { | |
1095 | /* Set up the MAC address */ | |
1096 | if (dev->enetaddr[0] & 0x01) { | |
1097 | printf("%s: MacAddress is multcast address\n", | |
1098 | __FUNCTION__); | |
1099 | return -EINVAL; | |
1100 | } | |
1101 | uec_set_mac_address(uec, dev->enetaddr); | |
1102 | uec->the_first_run = 1; | |
1103 | } | |
1104 | ||
1105 | err = uec_open(uec, COMM_DIR_RX_AND_TX); | |
1106 | if (err) { | |
1107 | printf("%s: cannot enable UEC device\n", dev->name); | |
1108 | return err; | |
1109 | } | |
1110 | ||
1111 | return 0; | |
1112 | } | |
1113 | ||
1114 | static void uec_halt(struct eth_device* dev) | |
1115 | { | |
1116 | uec_private_t *uec = (uec_private_t *)dev->priv; | |
1117 | uec_stop(uec, COMM_DIR_RX_AND_TX); | |
1118 | } | |
1119 | ||
1120 | static int uec_send(struct eth_device* dev, volatile void *buf, int len) | |
1121 | { | |
1122 | uec_private_t *uec; | |
1123 | ucc_fast_private_t *uccf; | |
1124 | volatile qe_bd_t *bd; | |
1125 | volatile u16 status; | |
1126 | int i; | |
1127 | int result = 0; | |
1128 | ||
1129 | uec = (uec_private_t *)dev->priv; | |
1130 | uccf = uec->uccf; | |
1131 | bd = uec->txBd; | |
1132 | ||
1133 | /* Find an empty TxBD */ | |
1134 | for (i = 0; BD_STATUS(bd) & TxBD_READY; i++) { | |
1135 | if (i > 0x100000) { | |
1136 | printf("%s: tx buffer not ready\n", dev->name); | |
1137 | return result; | |
1138 | } | |
1139 | } | |
1140 | ||
1141 | /* Init TxBD */ | |
1142 | BD_DATA_SET(bd, buf); | |
1143 | BD_LENGTH_SET(bd, len); | |
1144 | status = BD_STATUS(bd); | |
1145 | status &= BD_WRAP; | |
1146 | status |= (TxBD_READY | TxBD_LAST); | |
1147 | BD_STATUS_SET(bd, status); | |
1148 | ||
1149 | /* Tell UCC to transmit the buffer */ | |
1150 | ucc_fast_transmit_on_demand(uccf); | |
1151 | ||
1152 | /* Wait for buffer to be transmitted */ | |
1153 | status = BD_STATUS(bd); | |
1154 | for (i = 0; status & TxBD_READY; i++) { | |
1155 | if (i > 0x100000) { | |
1156 | printf("%s: tx error\n", dev->name); | |
1157 | return result; | |
1158 | } | |
1159 | status = BD_STATUS(bd); | |
1160 | } | |
1161 | ||
1162 | /* Ok, the buffer be transimitted */ | |
1163 | BD_ADVANCE(bd, status, uec->p_tx_bd_ring); | |
1164 | uec->txBd = bd; | |
1165 | result = 1; | |
1166 | ||
1167 | return result; | |
1168 | } | |
1169 | ||
1170 | static int uec_recv(struct eth_device* dev) | |
1171 | { | |
1172 | uec_private_t *uec = dev->priv; | |
1173 | volatile qe_bd_t *bd; | |
1174 | volatile u16 status; | |
1175 | u16 len; | |
1176 | u8 *data; | |
1177 | ||
1178 | bd = uec->rxBd; | |
1179 | status = BD_STATUS(bd); | |
1180 | ||
1181 | while (!(status & RxBD_EMPTY)) { | |
1182 | if (!(status & RxBD_ERROR)) { | |
1183 | data = BD_DATA(bd); | |
1184 | len = BD_LENGTH(bd); | |
1185 | NetReceive(data, len); | |
1186 | } else { | |
1187 | printf("%s: Rx error\n", dev->name); | |
1188 | } | |
1189 | status &= BD_CLEAN; | |
1190 | BD_LENGTH_SET(bd, 0); | |
1191 | BD_STATUS_SET(bd, status | RxBD_EMPTY); | |
1192 | BD_ADVANCE(bd, status, uec->p_rx_bd_ring); | |
1193 | status = BD_STATUS(bd); | |
1194 | } | |
1195 | uec->rxBd = bd; | |
1196 | ||
1197 | return 1; | |
1198 | } | |
1199 | ||
1200 | int uec_initialize(int index) | |
1201 | { | |
1202 | struct eth_device *dev; | |
1203 | int i; | |
1204 | uec_private_t *uec; | |
1205 | uec_info_t *uec_info; | |
1206 | int err; | |
1207 | ||
1208 | dev = (struct eth_device *)malloc(sizeof(struct eth_device)); | |
1209 | if (!dev) | |
1210 | return 0; | |
1211 | memset(dev, 0, sizeof(struct eth_device)); | |
1212 | ||
1213 | /* Allocate the UEC private struct */ | |
1214 | uec = (uec_private_t *)malloc(sizeof(uec_private_t)); | |
1215 | if (!uec) { | |
1216 | return -ENOMEM; | |
1217 | } | |
1218 | memset(uec, 0, sizeof(uec_private_t)); | |
1219 | ||
1220 | /* Init UEC private struct, they come from board.h */ | |
1221 | if (index == 0) { | |
1222 | #ifdef CONFIG_UEC_ETH1 | |
1223 | uec_info = ð1_uec_info; | |
1224 | #endif | |
1225 | } else if (index == 1) { | |
1226 | #ifdef CONFIG_UEC_ETH2 | |
1227 | uec_info = ð2_uec_info; | |
1228 | #endif | |
1229 | } else { | |
1230 | printf("%s: index is illegal.\n", __FUNCTION__); | |
1231 | return -EINVAL; | |
1232 | } | |
1233 | ||
1234 | uec->uec_info = uec_info; | |
1235 | ||
1236 | sprintf(dev->name, "FSL UEC%d", index); | |
1237 | dev->iobase = 0; | |
1238 | dev->priv = (void *)uec; | |
1239 | dev->init = uec_init; | |
1240 | dev->halt = uec_halt; | |
1241 | dev->send = uec_send; | |
1242 | dev->recv = uec_recv; | |
1243 | ||
1244 | /* Clear the ethnet address */ | |
1245 | for (i = 0; i < 6; i++) | |
1246 | dev->enetaddr[i] = 0; | |
1247 | ||
1248 | eth_register(dev); | |
1249 | ||
1250 | err = uec_startup(uec); | |
1251 | if (err) { | |
1252 | printf("%s: Cannot configure net device, aborting.",dev->name); | |
1253 | return err; | |
1254 | } | |
1255 | ||
1256 | err = init_phy(dev); | |
1257 | if (err) { | |
1258 | printf("%s: Cannot initialize PHY, aborting.\n", dev->name); | |
1259 | return err; | |
1260 | } | |
1261 | ||
1262 | phy_change(dev); | |
1263 | ||
1264 | return 1; | |
1265 | } | |
1266 | #endif /* CONFIG_QE */ |