]>
Commit | Line | Data |
---|---|---|
ba94a1bb WD |
1 | /* |
2 | * (C) Copyright 2005-2006 | |
3 | * Stefan Roese, DENX Software Engineering, sr@denx.de. | |
4 | * | |
1a459660 | 5 | * SPDX-License-Identifier: GPL-2.0+ |
ba94a1bb WD |
6 | */ |
7 | ||
8 | #if 0 | |
9 | #define DEBUG /* define for debug output */ | |
10 | #endif | |
11 | ||
12 | #include <config.h> | |
13 | #include <common.h> | |
14 | #include <net.h> | |
15 | #include <miiphy.h> | |
16 | #include <malloc.h> | |
17 | #include <asm/processor.h> | |
18 | #include <asm/arch-ixp/ixp425.h> | |
19 | ||
20 | #include <IxOsal.h> | |
21 | #include <IxEthAcc.h> | |
22 | #include <IxEthDB.h> | |
23 | #include <IxNpeDl.h> | |
24 | #include <IxQMgr.h> | |
25 | #include <IxNpeMh.h> | |
26 | #include <ix_ossl.h> | |
27 | #include <IxFeatureCtrl.h> | |
28 | ||
29 | #include <npe.h> | |
30 | ||
ba94a1bb WD |
31 | static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL; |
32 | static int npe_exists[NPE_NUM_PORTS]; | |
33 | static int npe_used[NPE_NUM_PORTS]; | |
34 | ||
35 | /* A little extra so we can align to cacheline. */ | |
6d0f6bcf | 36 | static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CONFIG_SYS_CACHELINE_SIZE - 1]; |
ba94a1bb WD |
37 | static u8 *npe_alloc_end; |
38 | static u8 *npe_alloc_free; | |
39 | ||
40 | static void *npe_alloc(int size) | |
41 | { | |
42 | static int count = 0; | |
43 | void *p = NULL; | |
44 | ||
6d0f6bcf | 45 | size = (size + (CONFIG_SYS_CACHELINE_SIZE-1)) & ~(CONFIG_SYS_CACHELINE_SIZE-1); |
ba94a1bb WD |
46 | count++; |
47 | ||
48 | if ((npe_alloc_free + size) < npe_alloc_end) { | |
49 | p = npe_alloc_free; | |
50 | npe_alloc_free += size; | |
51 | } else { | |
25dbe98a | 52 | printf("npe_alloc: failed (count=%d, size=%d)!\n", count, size); |
ba94a1bb WD |
53 | } |
54 | return p; | |
55 | } | |
56 | ||
57 | /* Not interrupt safe! */ | |
58 | static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new) | |
59 | { | |
60 | IX_OSAL_MBUF *m = *q; | |
61 | ||
62 | IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL; | |
63 | ||
64 | if (m) { | |
65 | while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m)) | |
66 | m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m); | |
67 | IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new; | |
68 | } else | |
69 | *q = new; | |
70 | } | |
71 | ||
72 | /* Not interrupt safe! */ | |
73 | static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q) | |
74 | { | |
75 | IX_OSAL_MBUF *m = *q; | |
76 | if (m) | |
77 | *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m); | |
78 | return m; | |
79 | } | |
80 | ||
81 | static void reset_tx_mbufs(struct npe* p_npe) | |
82 | { | |
83 | IX_OSAL_MBUF *m; | |
84 | int i; | |
85 | ||
86 | p_npe->txQHead = NULL; | |
87 | ||
88 | for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) { | |
89 | m = &p_npe->tx_mbufs[i]; | |
90 | ||
91 | memset(m, 0, sizeof(*m)); | |
92 | ||
93 | IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE]; | |
94 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
95 | mbuf_enqueue(&p_npe->txQHead, m); | |
96 | } | |
97 | } | |
98 | ||
99 | static void reset_rx_mbufs(struct npe* p_npe) | |
100 | { | |
101 | IX_OSAL_MBUF *m; | |
102 | int i; | |
103 | ||
104 | p_npe->rxQHead = NULL; | |
105 | ||
106 | HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE * | |
107 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS); | |
108 | ||
109 | for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) { | |
110 | m = &p_npe->rx_mbufs[i]; | |
111 | ||
112 | memset(m, 0, sizeof(*m)); | |
113 | ||
114 | IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE]; | |
115 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
116 | ||
117 | if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) { | |
118 | printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id); | |
119 | break; | |
120 | } | |
121 | } | |
122 | } | |
123 | ||
124 | static void init_rx_mbufs(struct npe* p_npe) | |
125 | { | |
126 | p_npe->rxQHead = NULL; | |
127 | ||
128 | p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE * | |
129 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS); | |
130 | if (p_npe->rx_pkts == NULL) { | |
131 | printf("alloc of packets failed.\n"); | |
132 | return; | |
133 | } | |
134 | ||
135 | p_npe->rx_mbufs = (IX_OSAL_MBUF *) | |
136 | npe_alloc(sizeof(IX_OSAL_MBUF) * | |
137 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS); | |
138 | if (p_npe->rx_mbufs == NULL) { | |
139 | printf("alloc of mbufs failed.\n"); | |
140 | return; | |
141 | } | |
142 | ||
143 | reset_rx_mbufs(p_npe); | |
144 | } | |
145 | ||
146 | static void init_tx_mbufs(struct npe* p_npe) | |
147 | { | |
148 | p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE * | |
149 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS); | |
150 | if (p_npe->tx_pkts == NULL) { | |
151 | printf("alloc of packets failed.\n"); | |
152 | return; | |
153 | } | |
154 | ||
155 | p_npe->tx_mbufs = (IX_OSAL_MBUF *) | |
156 | npe_alloc(sizeof(IX_OSAL_MBUF) * | |
157 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS); | |
158 | if (p_npe->tx_mbufs == NULL) { | |
159 | printf("alloc of mbufs failed.\n"); | |
160 | return; | |
161 | } | |
162 | ||
163 | reset_tx_mbufs(p_npe); | |
164 | } | |
165 | ||
166 | /* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */ | |
167 | static int __eth_to_npe(int eth_id) | |
168 | { | |
169 | switch(eth_id) { | |
170 | case IX_ETH_PORT_1: | |
171 | return IX_NPEMH_NPEID_NPEB; | |
172 | ||
173 | case IX_ETH_PORT_2: | |
174 | return IX_NPEMH_NPEID_NPEC; | |
175 | ||
176 | case IX_ETH_PORT_3: | |
177 | return IX_NPEMH_NPEID_NPEA; | |
178 | } | |
179 | return 0; | |
180 | } | |
181 | ||
182 | /* Poll the CSR machinery. */ | |
183 | static void npe_poll(int eth_id) | |
184 | { | |
185 | if (qDispatcherFunc != NULL) { | |
186 | ixNpeMhMessagesReceive(__eth_to_npe(eth_id)); | |
187 | (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP); | |
188 | } | |
189 | } | |
190 | ||
191 | /* ethAcc RX callback */ | |
192 | static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid) | |
193 | { | |
194 | struct npe* p_npe = (struct npe *)cbTag; | |
195 | ||
196 | if (IX_OSAL_MBUF_MLEN(m) > 0) { | |
197 | mbuf_enqueue(&p_npe->rxQHead, m); | |
198 | ||
199 | if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) { | |
200 | debug("Rx overflow: rx_write=%d rx_read=%d\n", | |
201 | p_npe->rx_write, p_npe->rx_read); | |
202 | } else { | |
203 | debug("Received message #%d (len=%d)\n", p_npe->rx_write, | |
204 | IX_OSAL_MBUF_MLEN(m)); | |
205 | memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m), | |
206 | IX_OSAL_MBUF_MLEN(m)); | |
207 | p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m); | |
208 | p_npe->rx_write++; | |
209 | if (p_npe->rx_write == PKTBUFSRX) | |
210 | p_npe->rx_write = 0; | |
211 | ||
212 | #ifdef CONFIG_PRINT_RX_FRAMES | |
213 | { | |
214 | u8 *ptr = IX_OSAL_MBUF_MDATA(m); | |
215 | int i; | |
216 | ||
217 | for (i=0; i<60; i++) { | |
218 | debug("%02x ", *ptr++); | |
219 | } | |
220 | debug("\n"); | |
221 | } | |
222 | #endif | |
223 | } | |
224 | ||
225 | m = mbuf_dequeue(&p_npe->rxQHead); | |
226 | } else { | |
227 | debug("Received frame with length 0!!!\n"); | |
228 | m = mbuf_dequeue(&p_npe->rxQHead); | |
229 | } | |
230 | ||
231 | /* Now return mbuf to NPE */ | |
232 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
233 | IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL; | |
234 | IX_OSAL_MBUF_FLAGS(m) = 0; | |
235 | ||
236 | if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) { | |
237 | debug("npe_rx_callback: Error returning mbuf.\n"); | |
238 | } | |
239 | } | |
240 | ||
241 | /* ethAcc TX callback */ | |
242 | static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m) | |
243 | { | |
244 | struct npe* p_npe = (struct npe *)cbTag; | |
245 | ||
246 | debug("%s\n", __FUNCTION__); | |
247 | ||
248 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
249 | IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL; | |
250 | IX_OSAL_MBUF_FLAGS(m) = 0; | |
251 | ||
252 | mbuf_enqueue(&p_npe->txQHead, m); | |
253 | } | |
254 | ||
255 | ||
256 | static int npe_set_mac_address(struct eth_device *dev) | |
257 | { | |
258 | struct npe *p_npe = (struct npe *)dev->priv; | |
259 | IxEthAccMacAddr npeMac; | |
260 | ||
261 | debug("%s\n", __FUNCTION__); | |
262 | ||
263 | /* Set MAC address */ | |
264 | memcpy(npeMac.macAddress, dev->enetaddr, 6); | |
265 | ||
266 | if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) { | |
267 | printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n", | |
268 | npeMac.macAddress[0], npeMac.macAddress[1], | |
269 | npeMac.macAddress[2], npeMac.macAddress[3], | |
270 | npeMac.macAddress[4], npeMac.macAddress[5]); | |
271 | return 0; | |
272 | } | |
273 | ||
274 | return 1; | |
275 | } | |
276 | ||
277 | /* Boot-time CSR library initialization. */ | |
278 | static int npe_csr_load(void) | |
279 | { | |
280 | int i; | |
281 | ||
282 | if (ixQMgrInit() != IX_SUCCESS) { | |
283 | debug("Error initialising queue manager!\n"); | |
284 | return 0; | |
285 | } | |
286 | ||
287 | ixQMgrDispatcherLoopGet(&qDispatcherFunc); | |
288 | ||
289 | if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) { | |
290 | printf("Error initialising NPE Message handler!\n"); | |
291 | return 0; | |
292 | } | |
293 | ||
294 | if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] && | |
295 | ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS) | |
296 | != IX_SUCCESS) { | |
297 | printf("Error downloading firmware to NPE-B!\n"); | |
298 | return 0; | |
299 | } | |
300 | ||
301 | if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] && | |
302 | ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS) | |
303 | != IX_SUCCESS) { | |
304 | printf("Error downloading firmware to NPE-C!\n"); | |
305 | return 0; | |
306 | } | |
307 | ||
308 | /* don't need this for U-Boot */ | |
472d5460 | 309 | ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, false); |
ba94a1bb WD |
310 | |
311 | if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) { | |
312 | printf("Error initialising Ethernet access driver!\n"); | |
313 | return 0; | |
314 | } | |
315 | ||
316 | for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) { | |
317 | if (!npe_used[i] || !npe_exists[i]) | |
318 | continue; | |
319 | if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) { | |
320 | printf("Error initialising Ethernet port%d!\n", i); | |
321 | } | |
322 | if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) { | |
323 | printf("Error setting scheduling discipline for port %d.\n", i); | |
324 | } | |
325 | if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) { | |
326 | printf("Error disabling RX FCS for port %d.\n", i); | |
327 | } | |
328 | if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) { | |
329 | printf("Error enabling TX FCS for port %d.\n", i); | |
330 | } | |
331 | } | |
332 | ||
333 | return 1; | |
334 | } | |
335 | ||
336 | static int npe_init(struct eth_device *dev, bd_t * bis) | |
337 | { | |
338 | struct npe *p_npe = (struct npe *)dev->priv; | |
339 | int i; | |
340 | u16 reg_short; | |
341 | int speed; | |
342 | int duplex; | |
343 | ||
344 | debug("%s: 1\n", __FUNCTION__); | |
345 | ||
d697d79f MS |
346 | #ifdef CONFIG_MII_NPE0_FIXEDLINK |
347 | if (0 == p_npe->eth_id) { | |
348 | speed = CONFIG_MII_NPE0_SPEED; | |
349 | duplex = CONFIG_MII_NPE0_FULLDUPLEX ? FULL : HALF; | |
350 | } else | |
351 | #endif | |
352 | #ifdef CONFIG_MII_NPE1_FIXEDLINK | |
353 | if (1 == p_npe->eth_id) { | |
354 | speed = CONFIG_MII_NPE1_SPEED; | |
355 | duplex = CONFIG_MII_NPE1_FULLDUPLEX ? FULL : HALF; | |
356 | } else | |
357 | #endif | |
358 | { | |
359 | miiphy_read(dev->name, p_npe->phy_no, MII_BMSR, ®_short); | |
360 | ||
361 | /* | |
362 | * Wait if PHY is capable of autonegotiation and | |
363 | * autonegotiation is not complete | |
364 | */ | |
365 | if ((reg_short & BMSR_ANEGCAPABLE) && | |
366 | !(reg_short & BMSR_ANEGCOMPLETE)) { | |
367 | puts("Waiting for PHY auto negotiation to complete"); | |
368 | i = 0; | |
369 | while (!(reg_short & BMSR_ANEGCOMPLETE)) { | |
370 | /* | |
371 | * Timeout reached ? | |
372 | */ | |
373 | if (i > PHY_AUTONEGOTIATE_TIMEOUT) { | |
374 | puts(" TIMEOUT !\n"); | |
375 | break; | |
376 | } | |
ba94a1bb | 377 | |
d697d79f MS |
378 | if ((i++ % 1000) == 0) { |
379 | putc('.'); | |
380 | miiphy_read(dev->name, p_npe->phy_no, | |
381 | MII_BMSR, ®_short); | |
382 | } | |
383 | udelay(1000); /* 1 ms */ | |
ba94a1bb | 384 | } |
d697d79f MS |
385 | puts(" done\n"); |
386 | /* another 500 ms (results in faster booting) */ | |
387 | udelay(500000); | |
ba94a1bb | 388 | } |
d697d79f MS |
389 | speed = miiphy_speed(dev->name, p_npe->phy_no); |
390 | duplex = miiphy_duplex(dev->name, p_npe->phy_no); | |
ba94a1bb WD |
391 | } |
392 | ||
ba94a1bb WD |
393 | if (p_npe->print_speed) { |
394 | p_npe->print_speed = 0; | |
395 | printf ("ENET Speed is %d Mbps - %s duplex connection\n", | |
396 | (int) speed, (duplex == HALF) ? "HALF" : "FULL"); | |
397 | } | |
398 | ||
399 | npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool); | |
400 | npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool + | |
6d0f6bcf | 401 | CONFIG_SYS_CACHELINE_SIZE - 1) & ~(CONFIG_SYS_CACHELINE_SIZE - 1)); |
ba94a1bb WD |
402 | |
403 | /* initialize mbuf pool */ | |
404 | init_rx_mbufs(p_npe); | |
405 | init_tx_mbufs(p_npe); | |
406 | ||
407 | if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback, | |
408 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
409 | printf("can't register RX callback!\n"); | |
422b1a01 | 410 | return -1; |
ba94a1bb WD |
411 | } |
412 | ||
413 | if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback, | |
414 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
415 | printf("can't register TX callback!\n"); | |
422b1a01 | 416 | return -1; |
ba94a1bb WD |
417 | } |
418 | ||
419 | npe_set_mac_address(dev); | |
420 | ||
421 | if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) { | |
422 | printf("can't enable port!\n"); | |
422b1a01 | 423 | return -1; |
ba94a1bb WD |
424 | } |
425 | ||
426 | p_npe->active = 1; | |
427 | ||
422b1a01 | 428 | return 0; |
ba94a1bb WD |
429 | } |
430 | ||
431 | #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */ | |
432 | /* Uninitialize CSR library. */ | |
433 | static void npe_csr_unload(void) | |
434 | { | |
435 | ixEthAccUnload(); | |
436 | ixEthDBUnload(); | |
437 | ixNpeMhUnload(); | |
438 | ixQMgrUnload(); | |
439 | } | |
440 | ||
441 | /* callback which is used by ethAcc to recover RX buffers when stopping */ | |
442 | static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid) | |
443 | { | |
444 | debug("%s\n", __FUNCTION__); | |
445 | } | |
446 | ||
447 | /* callback which is used by ethAcc to recover TX buffers when stopping */ | |
448 | static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m) | |
449 | { | |
450 | debug("%s\n", __FUNCTION__); | |
451 | } | |
452 | #endif | |
453 | ||
454 | static void npe_halt(struct eth_device *dev) | |
455 | { | |
456 | struct npe *p_npe = (struct npe *)dev->priv; | |
457 | int i; | |
458 | ||
459 | debug("%s\n", __FUNCTION__); | |
460 | ||
461 | /* Delay to give time for recovery of mbufs */ | |
462 | for (i = 0; i < 100; i++) { | |
463 | npe_poll(p_npe->eth_id); | |
464 | udelay(100); | |
465 | } | |
466 | ||
467 | #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */ | |
468 | if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback, | |
469 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
470 | debug("Error registering rx callback!\n"); | |
471 | } | |
472 | ||
473 | if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback, | |
474 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
475 | debug("Error registering tx callback!\n"); | |
476 | } | |
477 | ||
478 | if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) { | |
479 | debug("npe_stop: Error disabling NPEB!\n"); | |
480 | } | |
481 | ||
482 | /* Delay to give time for recovery of mbufs */ | |
483 | for (i = 0; i < 100; i++) { | |
484 | npe_poll(p_npe->eth_id); | |
485 | udelay(10000); | |
486 | } | |
487 | ||
488 | /* | |
489 | * For U-Boot only, we are probably launching Linux or other OS that | |
490 | * needs a clean slate for its NPE library. | |
491 | */ | |
492 | #if 0 /* test-only */ | |
493 | for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) { | |
494 | if (npe_used[i] && npe_exists[i]) | |
495 | if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS) | |
496 | printf("Failed to stop and reset NPE B.\n"); | |
497 | } | |
498 | #endif | |
499 | ||
500 | #endif | |
501 | p_npe->active = 0; | |
502 | } | |
503 | ||
504 | ||
287e3ad4 | 505 | static int npe_send(struct eth_device *dev, void *packet, int len) |
ba94a1bb WD |
506 | { |
507 | struct npe *p_npe = (struct npe *)dev->priv; | |
508 | u8 *dest; | |
509 | int err; | |
510 | IX_OSAL_MBUF *m; | |
511 | ||
512 | debug("%s\n", __FUNCTION__); | |
513 | m = mbuf_dequeue(&p_npe->txQHead); | |
514 | dest = IX_OSAL_MBUF_MDATA(m); | |
515 | IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len; | |
516 | IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL; | |
517 | ||
518 | memcpy(dest, (char *)packet, len); | |
519 | ||
520 | if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY)) | |
521 | != IX_ETH_ACC_SUCCESS) { | |
522 | printf("npe_send: Can't submit frame. err[%d]\n", err); | |
523 | mbuf_enqueue(&p_npe->txQHead, m); | |
524 | return 0; | |
525 | } | |
526 | ||
527 | #ifdef DEBUG_PRINT_TX_FRAMES | |
528 | { | |
529 | u8 *ptr = IX_OSAL_MBUF_MDATA(m); | |
530 | int i; | |
531 | ||
532 | for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) { | |
533 | printf("%02x ", *ptr++); | |
534 | } | |
535 | printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m)); | |
536 | } | |
537 | #endif | |
538 | ||
539 | npe_poll(p_npe->eth_id); | |
540 | ||
541 | return len; | |
542 | } | |
543 | ||
544 | static int npe_rx(struct eth_device *dev) | |
545 | { | |
546 | struct npe *p_npe = (struct npe *)dev->priv; | |
547 | ||
548 | debug("%s\n", __FUNCTION__); | |
549 | npe_poll(p_npe->eth_id); | |
550 | ||
551 | debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read); | |
552 | while (p_npe->rx_write != p_npe->rx_read) { | |
553 | debug("Reading message #%d\n", p_npe->rx_read); | |
554 | NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]); | |
555 | p_npe->rx_read++; | |
556 | if (p_npe->rx_read == PKTBUFSRX) | |
557 | p_npe->rx_read = 0; | |
558 | } | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
563 | int npe_initialize(bd_t * bis) | |
564 | { | |
565 | static int virgin = 0; | |
566 | struct eth_device *dev; | |
567 | int eth_num = 0; | |
568 | struct npe *p_npe = NULL; | |
740e8ba7 | 569 | uchar enetaddr[6]; |
ba94a1bb | 570 | |
6d0f6bcf | 571 | for (eth_num = 0; eth_num < CONFIG_SYS_NPE_NUMS; eth_num++) { |
ba94a1bb WD |
572 | |
573 | /* See if we can actually bring up the interface, otherwise, skip it */ | |
ba94a1bb | 574 | #ifdef CONFIG_HAS_ETH1 |
740e8ba7 MF |
575 | if (eth_num == 1) { |
576 | if (!eth_getenv_enetaddr("eth1addr", enetaddr)) | |
ba94a1bb | 577 | continue; |
740e8ba7 | 578 | } else |
ba94a1bb | 579 | #endif |
740e8ba7 MF |
580 | if (!eth_getenv_enetaddr("ethaddr", enetaddr)) |
581 | continue; | |
ba94a1bb WD |
582 | |
583 | /* Allocate device structure */ | |
584 | dev = (struct eth_device *)malloc(sizeof(*dev)); | |
585 | if (dev == NULL) { | |
586 | printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num); | |
587 | return -1; | |
588 | } | |
589 | memset(dev, 0, sizeof(*dev)); | |
590 | ||
591 | /* Allocate our private use data */ | |
592 | p_npe = (struct npe *)malloc(sizeof(struct npe)); | |
593 | if (p_npe == NULL) { | |
594 | printf("%s: Cannot allocate private hw data for eth_device %d", | |
595 | __FUNCTION__, eth_num); | |
596 | free(dev); | |
597 | return -1; | |
598 | } | |
599 | memset(p_npe, 0, sizeof(struct npe)); | |
600 | ||
740e8ba7 MF |
601 | p_npe->eth_id = eth_num; |
602 | memcpy(dev->enetaddr, enetaddr, 6); | |
ba94a1bb | 603 | #ifdef CONFIG_HAS_ETH1 |
740e8ba7 | 604 | if (eth_num == 1) |
ba94a1bb | 605 | p_npe->phy_no = CONFIG_PHY1_ADDR; |
740e8ba7 | 606 | else |
ba94a1bb | 607 | #endif |
740e8ba7 | 608 | p_npe->phy_no = CONFIG_PHY_ADDR; |
ba94a1bb WD |
609 | |
610 | sprintf(dev->name, "NPE%d", eth_num); | |
611 | dev->priv = (void *)p_npe; | |
612 | dev->init = npe_init; | |
613 | dev->halt = npe_halt; | |
614 | dev->send = npe_send; | |
615 | dev->recv = npe_rx; | |
616 | ||
617 | p_npe->print_speed = 1; | |
618 | ||
619 | if (0 == virgin) { | |
620 | virgin = 1; | |
621 | ||
622 | if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) { | |
623 | switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) { | |
624 | case IX_FEATURE_CTRL_SILICON_TYPE_B0: | |
20f17281 | 625 | default: /* newer than B0 */ |
ba94a1bb | 626 | /* |
20f17281 MS |
627 | * If it is B0 or newer Silicon, we |
628 | * only enable port when its | |
629 | * corresponding Eth Coprocessor is | |
630 | * available. | |
ba94a1bb WD |
631 | */ |
632 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) == | |
633 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
472d5460 | 634 | npe_exists[IX_ETH_PORT_1] = true; |
ba94a1bb WD |
635 | |
636 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) == | |
637 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
472d5460 | 638 | npe_exists[IX_ETH_PORT_2] = true; |
ba94a1bb WD |
639 | break; |
640 | case IX_FEATURE_CTRL_SILICON_TYPE_A0: | |
641 | /* | |
642 | * If it is A0 Silicon, we enable both as both Eth Coprocessors | |
643 | * are available. | |
644 | */ | |
472d5460 YS |
645 | npe_exists[IX_ETH_PORT_1] = true; |
646 | npe_exists[IX_ETH_PORT_2] = true; | |
ba94a1bb WD |
647 | break; |
648 | } | |
649 | } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) { | |
650 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) == | |
651 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
472d5460 | 652 | npe_exists[IX_ETH_PORT_1] = true; |
ba94a1bb WD |
653 | |
654 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) == | |
655 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
472d5460 | 656 | npe_exists[IX_ETH_PORT_2] = true; |
ba94a1bb WD |
657 | } |
658 | ||
659 | npe_used[IX_ETH_PORT_1] = 1; | |
660 | npe_used[IX_ETH_PORT_2] = 1; | |
661 | ||
662 | npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool); | |
663 | npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool + | |
6d0f6bcf JCPV |
664 | CONFIG_SYS_CACHELINE_SIZE - 1) |
665 | & ~(CONFIG_SYS_CACHELINE_SIZE - 1)); | |
ba94a1bb WD |
666 | |
667 | if (!npe_csr_load()) | |
668 | return 0; | |
669 | } | |
670 | ||
671 | eth_register(dev); | |
672 | ||
3a1ed1e1 | 673 | #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) |
ba94a1bb WD |
674 | miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write); |
675 | #endif | |
676 | ||
677 | } /* end for each supported device */ | |
678 | ||
679 | return 1; | |
680 | } |