]> git.ipfire.org Git - people/ms/linux.git/blame - drivers/net/gianfar.c
gianfar: Use gfar_halt to stop DMA in gfar_probe
[people/ms/linux.git] / drivers / net / gianfar.c
CommitLineData
0bbaf069 1/*
1da177e4
LT
2 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
7f7f5316
AF
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
1da177e4
LT
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
4c8d3d99 10 * Maintainer: Kumar Gala
1da177e4 11 *
e8a2b6a4 12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
538cc7ee 13 * Copyright (c) 2007 MontaVista Software, Inc.
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
0bbaf069 27 *
1da177e4
LT
28 * The driver is initialized through platform_device. Structures which
29 * define the configuration needed by the board are defined in a
30 * board structure in arch/ppc/platforms (though I do not
31 * discount the possibility that other architectures could one
bb40dcbb 32 * day be supported.
1da177e4
LT
33 *
34 * The Gianfar Ethernet Controller uses a ring of buffer
35 * descriptors. The beginning is indicated by a register
0bbaf069
KG
36 * pointing to the physical address of the start of the ring.
37 * The end is determined by a "wrap" bit being set in the
1da177e4
LT
38 * last descriptor of the ring.
39 *
40 * When a packet is received, the RXF bit in the
0bbaf069 41 * IEVENT register is set, triggering an interrupt when the
1da177e4
LT
42 * corresponding bit in the IMASK register is also set (if
43 * interrupt coalescing is active, then the interrupt may not
44 * happen immediately, but will wait until either a set number
bb40dcbb 45 * of frames or amount of time have passed). In NAPI, the
1da177e4 46 * interrupt handler will signal there is work to be done, and
0aa1538f 47 * exit. This method will start at the last known empty
0bbaf069 48 * descriptor, and process every subsequent descriptor until there
1da177e4
LT
49 * are none left with data (NAPI will stop after a set number of
50 * packets to give time to other tasks, but will eventually
51 * process all the packets). The data arrives inside a
52 * pre-allocated skb, and so after the skb is passed up to the
53 * stack, a new skb must be allocated, and the address field in
54 * the buffer descriptor must be updated to indicate this new
55 * skb.
56 *
57 * When the kernel requests that a packet be transmitted, the
58 * driver starts where it left off last time, and points the
59 * descriptor at the buffer which was passed in. The driver
60 * then informs the DMA engine that there are packets ready to
61 * be transmitted. Once the controller is finished transmitting
62 * the packet, an interrupt may be triggered (under the same
63 * conditions as for reception, but depending on the TXF bit).
64 * The driver then cleans up the buffer.
65 */
66
1da177e4 67#include <linux/kernel.h>
1da177e4
LT
68#include <linux/string.h>
69#include <linux/errno.h>
bb40dcbb 70#include <linux/unistd.h>
1da177e4
LT
71#include <linux/slab.h>
72#include <linux/interrupt.h>
73#include <linux/init.h>
74#include <linux/delay.h>
75#include <linux/netdevice.h>
76#include <linux/etherdevice.h>
77#include <linux/skbuff.h>
0bbaf069 78#include <linux/if_vlan.h>
1da177e4
LT
79#include <linux/spinlock.h>
80#include <linux/mm.h>
d052d1be 81#include <linux/platform_device.h>
0bbaf069
KG
82#include <linux/ip.h>
83#include <linux/tcp.h>
84#include <linux/udp.h>
9c07b884 85#include <linux/in.h>
1da177e4
LT
86
87#include <asm/io.h>
88#include <asm/irq.h>
89#include <asm/uaccess.h>
90#include <linux/module.h>
1da177e4
LT
91#include <linux/dma-mapping.h>
92#include <linux/crc32.h>
bb40dcbb
AF
93#include <linux/mii.h>
94#include <linux/phy.h>
1da177e4
LT
95
96#include "gianfar.h"
bb40dcbb 97#include "gianfar_mii.h"
1da177e4
LT
98
99#define TX_TIMEOUT (1*HZ)
1da177e4
LT
100#undef BRIEF_GFAR_ERRORS
101#undef VERBOSE_GFAR_ERRORS
102
1da177e4 103const char gfar_driver_name[] = "Gianfar Ethernet";
7f7f5316 104const char gfar_driver_version[] = "1.3";
1da177e4 105
1da177e4
LT
106static int gfar_enet_open(struct net_device *dev);
107static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
ab939905 108static void gfar_reset_task(struct work_struct *work);
1da177e4
LT
109static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev);
815b97c6
AF
111struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
113 struct sk_buff *skb);
1da177e4
LT
114static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu);
7d12e780
DH
116static irqreturn_t gfar_error(int irq, void *dev_id);
117static irqreturn_t gfar_transmit(int irq, void *dev_id);
118static irqreturn_t gfar_interrupt(int irq, void *dev_id);
1da177e4
LT
119static void adjust_link(struct net_device *dev);
120static void init_registers(struct net_device *dev);
121static int init_phy(struct net_device *dev);
3ae5eaec
RK
122static int gfar_probe(struct platform_device *pdev);
123static int gfar_remove(struct platform_device *pdev);
bb40dcbb 124static void free_skb_resources(struct gfar_private *priv);
1da177e4
LT
125static void gfar_set_multi(struct net_device *dev);
126static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
d3c12873 127static void gfar_configure_serdes(struct net_device *dev);
bea3348e 128static int gfar_poll(struct napi_struct *napi, int budget);
f2d71c2d
VW
129#ifdef CONFIG_NET_POLL_CONTROLLER
130static void gfar_netpoll(struct net_device *dev);
131#endif
0bbaf069 132int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
f162b9d5 133static int gfar_clean_tx_ring(struct net_device *dev);
1da177e4 134static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
0bbaf069
KG
135static void gfar_vlan_rx_register(struct net_device *netdev,
136 struct vlan_group *grp);
7f7f5316 137void gfar_halt(struct net_device *dev);
d87eb127 138static void gfar_halt_nodisable(struct net_device *dev);
7f7f5316
AF
139void gfar_start(struct net_device *dev);
140static void gfar_clear_exact_match(struct net_device *dev);
141static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
1da177e4 142
7282d491 143extern const struct ethtool_ops gfar_ethtool_ops;
1da177e4
LT
144
145MODULE_AUTHOR("Freescale Semiconductor, Inc");
146MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147MODULE_LICENSE("GPL");
148
7f7f5316
AF
149/* Returns 1 if incoming frames use an FCB */
150static inline int gfar_uses_fcb(struct gfar_private *priv)
0bbaf069 151{
7f7f5316 152 return (priv->vlan_enable || priv->rx_csum_enable);
0bbaf069 153}
bb40dcbb
AF
154
155/* Set up the ethernet device structure, private data,
156 * and anything else we need before we start */
3ae5eaec 157static int gfar_probe(struct platform_device *pdev)
1da177e4
LT
158{
159 u32 tempval;
160 struct net_device *dev = NULL;
161 struct gfar_private *priv = NULL;
1da177e4
LT
162 struct gianfar_platform_data *einfo;
163 struct resource *r;
d51894f4 164 int err = 0, irq;
1da177e4
LT
165
166 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
167
bb40dcbb 168 if (NULL == einfo) {
1da177e4
LT
169 printk(KERN_ERR "gfar %d: Missing additional data!\n",
170 pdev->id);
171
172 return -ENODEV;
173 }
174
175 /* Create an ethernet device instance */
176 dev = alloc_etherdev(sizeof (*priv));
177
bb40dcbb 178 if (NULL == dev)
1da177e4
LT
179 return -ENOMEM;
180
181 priv = netdev_priv(dev);
bea3348e 182 priv->dev = dev;
1da177e4
LT
183
184 /* Set the info in the priv to the current info */
185 priv->einfo = einfo;
186
187 /* fill out IRQ fields */
188 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
d51894f4 189 irq = platform_get_irq_byname(pdev, "tx");
190 if (irq < 0)
48944738 191 goto regs_fail;
d51894f4 192 priv->interruptTransmit = irq;
193
194 irq = platform_get_irq_byname(pdev, "rx");
195 if (irq < 0)
196 goto regs_fail;
197 priv->interruptReceive = irq;
198
199 irq = platform_get_irq_byname(pdev, "error");
200 if (irq < 0)
201 goto regs_fail;
202 priv->interruptError = irq;
1da177e4 203 } else {
d51894f4 204 irq = platform_get_irq(pdev, 0);
205 if (irq < 0)
48944738 206 goto regs_fail;
d51894f4 207 priv->interruptTransmit = irq;
1da177e4
LT
208 }
209
210 /* get a pointer to the register memory */
211 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cc8c6e37 212 priv->regs = ioremap(r->start, sizeof (struct gfar));
1da177e4 213
bb40dcbb 214 if (NULL == priv->regs) {
1da177e4
LT
215 err = -ENOMEM;
216 goto regs_fail;
217 }
218
fef6108d
AF
219 spin_lock_init(&priv->txlock);
220 spin_lock_init(&priv->rxlock);
d87eb127 221 spin_lock_init(&priv->bflock);
ab939905 222 INIT_WORK(&priv->reset_task, gfar_reset_task);
1da177e4 223
3ae5eaec 224 platform_set_drvdata(pdev, dev);
1da177e4
LT
225
226 /* Stop the DMA engine now, in case it was running before */
227 /* (The firmware could have used it, and left it running). */
257d938a 228 gfar_halt(dev);
1da177e4
LT
229
230 /* Reset MAC layer */
231 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
232
233 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
234 gfar_write(&priv->regs->maccfg1, tempval);
235
236 /* Initialize MACCFG2. */
237 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
238
239 /* Initialize ECNTRL */
240 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
241
242 /* Copy the station address into the dev structure, */
243 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
244
245 /* Set the dev->base_addr to the gfar reg region */
246 dev->base_addr = (unsigned long) (priv->regs);
247
3ae5eaec 248 SET_NETDEV_DEV(dev, &pdev->dev);
1da177e4
LT
249
250 /* Fill in the dev structure */
251 dev->open = gfar_enet_open;
252 dev->hard_start_xmit = gfar_start_xmit;
253 dev->tx_timeout = gfar_timeout;
254 dev->watchdog_timeo = TX_TIMEOUT;
bea3348e 255 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
f2d71c2d
VW
256#ifdef CONFIG_NET_POLL_CONTROLLER
257 dev->poll_controller = gfar_netpoll;
1da177e4
LT
258#endif
259 dev->stop = gfar_close;
1da177e4
LT
260 dev->change_mtu = gfar_change_mtu;
261 dev->mtu = 1500;
262 dev->set_multicast_list = gfar_set_multi;
263
0bbaf069
KG
264 dev->ethtool_ops = &gfar_ethtool_ops;
265
266 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
267 priv->rx_csum_enable = 1;
268 dev->features |= NETIF_F_IP_CSUM;
269 } else
270 priv->rx_csum_enable = 0;
271
272 priv->vlgrp = NULL;
1da177e4 273
0bbaf069
KG
274 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
275 dev->vlan_rx_register = gfar_vlan_rx_register;
1da177e4 276
0bbaf069
KG
277 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
278
279 priv->vlan_enable = 1;
280 }
281
282 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
283 priv->extended_hash = 1;
284 priv->hash_width = 9;
285
286 priv->hash_regs[0] = &priv->regs->igaddr0;
287 priv->hash_regs[1] = &priv->regs->igaddr1;
288 priv->hash_regs[2] = &priv->regs->igaddr2;
289 priv->hash_regs[3] = &priv->regs->igaddr3;
290 priv->hash_regs[4] = &priv->regs->igaddr4;
291 priv->hash_regs[5] = &priv->regs->igaddr5;
292 priv->hash_regs[6] = &priv->regs->igaddr6;
293 priv->hash_regs[7] = &priv->regs->igaddr7;
294 priv->hash_regs[8] = &priv->regs->gaddr0;
295 priv->hash_regs[9] = &priv->regs->gaddr1;
296 priv->hash_regs[10] = &priv->regs->gaddr2;
297 priv->hash_regs[11] = &priv->regs->gaddr3;
298 priv->hash_regs[12] = &priv->regs->gaddr4;
299 priv->hash_regs[13] = &priv->regs->gaddr5;
300 priv->hash_regs[14] = &priv->regs->gaddr6;
301 priv->hash_regs[15] = &priv->regs->gaddr7;
302
303 } else {
304 priv->extended_hash = 0;
305 priv->hash_width = 8;
306
307 priv->hash_regs[0] = &priv->regs->gaddr0;
308 priv->hash_regs[1] = &priv->regs->gaddr1;
309 priv->hash_regs[2] = &priv->regs->gaddr2;
310 priv->hash_regs[3] = &priv->regs->gaddr3;
311 priv->hash_regs[4] = &priv->regs->gaddr4;
312 priv->hash_regs[5] = &priv->regs->gaddr5;
313 priv->hash_regs[6] = &priv->regs->gaddr6;
314 priv->hash_regs[7] = &priv->regs->gaddr7;
315 }
316
317 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
318 priv->padding = DEFAULT_PADDING;
319 else
320 priv->padding = 0;
321
0bbaf069
KG
322 if (dev->features & NETIF_F_IP_CSUM)
323 dev->hard_header_len += GMAC_FCB_LEN;
1da177e4
LT
324
325 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1da177e4
LT
326 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
327 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
328
329 priv->txcoalescing = DEFAULT_TX_COALESCE;
330 priv->txcount = DEFAULT_TXCOUNT;
331 priv->txtime = DEFAULT_TXTIME;
332 priv->rxcoalescing = DEFAULT_RX_COALESCE;
333 priv->rxcount = DEFAULT_RXCOUNT;
334 priv->rxtime = DEFAULT_RXTIME;
335
0bbaf069
KG
336 /* Enable most messages by default */
337 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
338
d3eab82b
TP
339 /* Carrier starts down, phylib will bring it up */
340 netif_carrier_off(dev);
341
1da177e4
LT
342 err = register_netdev(dev);
343
344 if (err) {
345 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
346 dev->name);
347 goto register_fail;
348 }
349
7f7f5316
AF
350 /* Create all the sysfs files */
351 gfar_init_sysfs(dev);
352
1da177e4 353 /* Print out the device info */
e174961c 354 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
1da177e4
LT
355
356 /* Even more device info helps when determining which kernel */
7f7f5316 357 /* provided which set of benchmarks. */
1da177e4 358 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1da177e4
LT
359 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
360 dev->name, priv->rx_ring_size, priv->tx_ring_size);
361
362 return 0;
363
364register_fail:
cc8c6e37 365 iounmap(priv->regs);
1da177e4
LT
366regs_fail:
367 free_netdev(dev);
bb40dcbb 368 return err;
1da177e4
LT
369}
370
3ae5eaec 371static int gfar_remove(struct platform_device *pdev)
1da177e4 372{
3ae5eaec 373 struct net_device *dev = platform_get_drvdata(pdev);
1da177e4
LT
374 struct gfar_private *priv = netdev_priv(dev);
375
3ae5eaec 376 platform_set_drvdata(pdev, NULL);
1da177e4 377
cc8c6e37 378 iounmap(priv->regs);
1da177e4
LT
379 free_netdev(dev);
380
381 return 0;
382}
383
d87eb127
SW
384#ifdef CONFIG_PM
385static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
386{
387 struct net_device *dev = platform_get_drvdata(pdev);
388 struct gfar_private *priv = netdev_priv(dev);
389 unsigned long flags;
390 u32 tempval;
391
392 int magic_packet = priv->wol_en &&
393 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
394
395 netif_device_detach(dev);
396
397 if (netif_running(dev)) {
398 spin_lock_irqsave(&priv->txlock, flags);
399 spin_lock(&priv->rxlock);
400
401 gfar_halt_nodisable(dev);
402
403 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
404 tempval = gfar_read(&priv->regs->maccfg1);
405
406 tempval &= ~MACCFG1_TX_EN;
407
408 if (!magic_packet)
409 tempval &= ~MACCFG1_RX_EN;
410
411 gfar_write(&priv->regs->maccfg1, tempval);
412
413 spin_unlock(&priv->rxlock);
414 spin_unlock_irqrestore(&priv->txlock, flags);
415
d87eb127 416 napi_disable(&priv->napi);
d87eb127
SW
417
418 if (magic_packet) {
419 /* Enable interrupt on Magic Packet */
420 gfar_write(&priv->regs->imask, IMASK_MAG);
421
422 /* Enable Magic Packet mode */
423 tempval = gfar_read(&priv->regs->maccfg2);
424 tempval |= MACCFG2_MPEN;
425 gfar_write(&priv->regs->maccfg2, tempval);
426 } else {
427 phy_stop(priv->phydev);
428 }
429 }
430
431 return 0;
432}
433
434static int gfar_resume(struct platform_device *pdev)
435{
436 struct net_device *dev = platform_get_drvdata(pdev);
437 struct gfar_private *priv = netdev_priv(dev);
438 unsigned long flags;
439 u32 tempval;
440 int magic_packet = priv->wol_en &&
441 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
442
443 if (!netif_running(dev)) {
444 netif_device_attach(dev);
445 return 0;
446 }
447
448 if (!magic_packet && priv->phydev)
449 phy_start(priv->phydev);
450
451 /* Disable Magic Packet mode, in case something
452 * else woke us up.
453 */
454
455 spin_lock_irqsave(&priv->txlock, flags);
456 spin_lock(&priv->rxlock);
457
458 tempval = gfar_read(&priv->regs->maccfg2);
459 tempval &= ~MACCFG2_MPEN;
460 gfar_write(&priv->regs->maccfg2, tempval);
461
462 gfar_start(dev);
463
464 spin_unlock(&priv->rxlock);
465 spin_unlock_irqrestore(&priv->txlock, flags);
466
467 netif_device_attach(dev);
468
d87eb127 469 napi_enable(&priv->napi);
d87eb127
SW
470
471 return 0;
472}
473#else
474#define gfar_suspend NULL
475#define gfar_resume NULL
476#endif
1da177e4 477
e8a2b6a4
AF
478/* Reads the controller's registers to determine what interface
479 * connects it to the PHY.
480 */
481static phy_interface_t gfar_get_interface(struct net_device *dev)
482{
483 struct gfar_private *priv = netdev_priv(dev);
484 u32 ecntrl = gfar_read(&priv->regs->ecntrl);
485
486 if (ecntrl & ECNTRL_SGMII_MODE)
487 return PHY_INTERFACE_MODE_SGMII;
488
489 if (ecntrl & ECNTRL_TBI_MODE) {
490 if (ecntrl & ECNTRL_REDUCED_MODE)
491 return PHY_INTERFACE_MODE_RTBI;
492 else
493 return PHY_INTERFACE_MODE_TBI;
494 }
495
496 if (ecntrl & ECNTRL_REDUCED_MODE) {
497 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
498 return PHY_INTERFACE_MODE_RMII;
7132ab7f
AF
499 else {
500 phy_interface_t interface = priv->einfo->interface;
501
502 /*
503 * This isn't autodetected right now, so it must
504 * be set by the device tree or platform code.
505 */
506 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
507 return PHY_INTERFACE_MODE_RGMII_ID;
508
e8a2b6a4 509 return PHY_INTERFACE_MODE_RGMII;
7132ab7f 510 }
e8a2b6a4
AF
511 }
512
513 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
514 return PHY_INTERFACE_MODE_GMII;
515
516 return PHY_INTERFACE_MODE_MII;
517}
518
519
bb40dcbb
AF
520/* Initializes driver's PHY state, and attaches to the PHY.
521 * Returns 0 on success.
1da177e4
LT
522 */
523static int init_phy(struct net_device *dev)
524{
525 struct gfar_private *priv = netdev_priv(dev);
bb40dcbb
AF
526 uint gigabit_support =
527 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
528 SUPPORTED_1000baseT_Full : 0;
529 struct phy_device *phydev;
4d3248a2 530 char phy_id[BUS_ID_SIZE];
e8a2b6a4 531 phy_interface_t interface;
1da177e4
LT
532
533 priv->oldlink = 0;
534 priv->oldspeed = 0;
535 priv->oldduplex = -1;
536
fb28ad35 537 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
4d3248a2 538
e8a2b6a4
AF
539 interface = gfar_get_interface(dev);
540
541 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
1da177e4 542
d3c12873
KJ
543 if (interface == PHY_INTERFACE_MODE_SGMII)
544 gfar_configure_serdes(dev);
545
bb40dcbb
AF
546 if (IS_ERR(phydev)) {
547 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
548 return PTR_ERR(phydev);
1da177e4
LT
549 }
550
bb40dcbb
AF
551 /* Remove any features not supported by the controller */
552 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
553 phydev->advertising = phydev->supported;
1da177e4 554
bb40dcbb 555 priv->phydev = phydev;
1da177e4
LT
556
557 return 0;
1da177e4
LT
558}
559
d0313587
PG
560/*
561 * Initialize TBI PHY interface for communicating with the
562 * SERDES lynx PHY on the chip. We communicate with this PHY
563 * through the MDIO bus on each controller, treating it as a
564 * "normal" PHY at the address found in the TBIPA register. We assume
565 * that the TBIPA register is valid. Either the MDIO bus code will set
566 * it to a value that doesn't conflict with other PHYs on the bus, or the
567 * value doesn't matter, as there are no other PHYs on the bus.
568 */
d3c12873
KJ
569static void gfar_configure_serdes(struct net_device *dev)
570{
571 struct gfar_private *priv = netdev_priv(dev);
572 struct gfar_mii __iomem *regs =
573 (void __iomem *)&priv->regs->gfar_mii_regs;
d0313587 574 int tbipa = gfar_read(&priv->regs->tbipa);
c132419e
TP
575 struct mii_bus *bus = gfar_get_miibus(priv);
576
577 if (bus)
578 mutex_lock(&bus->mdio_lock);
d3c12873 579
bdb59f94
TP
580 /* If the link is already up, we must already be ok, and don't need to
581 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
582 * everything for us? Resetting it takes the link down and requires
583 * several seconds for it to come back.
584 */
585 if (gfar_local_mdio_read(regs, tbipa, MII_BMSR) & BMSR_LSTATUS)
586 goto done;
d3c12873 587
d0313587
PG
588 /* Single clk mode, mii mode off(for serdes communication) */
589 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
d3c12873 590
d0313587 591 gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE,
d3c12873
KJ
592 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
593 ADVERTISE_1000XPSE_ASYM);
594
d0313587 595 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
d3c12873 596 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
c132419e 597
bdb59f94 598 done:
c132419e
TP
599 if (bus)
600 mutex_unlock(&bus->mdio_lock);
d3c12873
KJ
601}
602
1da177e4
LT
603static void init_registers(struct net_device *dev)
604{
605 struct gfar_private *priv = netdev_priv(dev);
606
607 /* Clear IEVENT */
608 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
609
610 /* Initialize IMASK */
611 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
612
613 /* Init hash registers to zero */
0bbaf069
KG
614 gfar_write(&priv->regs->igaddr0, 0);
615 gfar_write(&priv->regs->igaddr1, 0);
616 gfar_write(&priv->regs->igaddr2, 0);
617 gfar_write(&priv->regs->igaddr3, 0);
618 gfar_write(&priv->regs->igaddr4, 0);
619 gfar_write(&priv->regs->igaddr5, 0);
620 gfar_write(&priv->regs->igaddr6, 0);
621 gfar_write(&priv->regs->igaddr7, 0);
1da177e4
LT
622
623 gfar_write(&priv->regs->gaddr0, 0);
624 gfar_write(&priv->regs->gaddr1, 0);
625 gfar_write(&priv->regs->gaddr2, 0);
626 gfar_write(&priv->regs->gaddr3, 0);
627 gfar_write(&priv->regs->gaddr4, 0);
628 gfar_write(&priv->regs->gaddr5, 0);
629 gfar_write(&priv->regs->gaddr6, 0);
630 gfar_write(&priv->regs->gaddr7, 0);
631
1da177e4
LT
632 /* Zero out the rmon mib registers if it has them */
633 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
cc8c6e37 634 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
1da177e4
LT
635
636 /* Mask off the CAM interrupts */
637 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
638 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
639 }
640
641 /* Initialize the max receive buffer length */
642 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
643
1da177e4
LT
644 /* Initialize the Minimum Frame Length Register */
645 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
1da177e4
LT
646}
647
0bbaf069
KG
648
649/* Halt the receive and transmit queues */
d87eb127 650static void gfar_halt_nodisable(struct net_device *dev)
1da177e4
LT
651{
652 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 653 struct gfar __iomem *regs = priv->regs;
1da177e4
LT
654 u32 tempval;
655
1da177e4
LT
656 /* Mask all interrupts */
657 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
658
659 /* Clear all interrupts */
660 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
661
662 /* Stop the DMA, and wait for it to stop */
663 tempval = gfar_read(&priv->regs->dmactrl);
664 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
665 != (DMACTRL_GRS | DMACTRL_GTS)) {
666 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
667 gfar_write(&priv->regs->dmactrl, tempval);
668
669 while (!(gfar_read(&priv->regs->ievent) &
670 (IEVENT_GRSC | IEVENT_GTSC)))
671 cpu_relax();
672 }
d87eb127 673}
d87eb127
SW
674
675/* Halt the receive and transmit queues */
676void gfar_halt(struct net_device *dev)
677{
678 struct gfar_private *priv = netdev_priv(dev);
679 struct gfar __iomem *regs = priv->regs;
680 u32 tempval;
1da177e4 681
2a54adc3
SW
682 gfar_halt_nodisable(dev);
683
1da177e4
LT
684 /* Disable Rx and Tx */
685 tempval = gfar_read(&regs->maccfg1);
686 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
687 gfar_write(&regs->maccfg1, tempval);
0bbaf069
KG
688}
689
690void stop_gfar(struct net_device *dev)
691{
692 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 693 struct gfar __iomem *regs = priv->regs;
0bbaf069
KG
694 unsigned long flags;
695
bb40dcbb
AF
696 phy_stop(priv->phydev);
697
0bbaf069 698 /* Lock it down */
fef6108d
AF
699 spin_lock_irqsave(&priv->txlock, flags);
700 spin_lock(&priv->rxlock);
0bbaf069 701
0bbaf069 702 gfar_halt(dev);
1da177e4 703
fef6108d
AF
704 spin_unlock(&priv->rxlock);
705 spin_unlock_irqrestore(&priv->txlock, flags);
1da177e4
LT
706
707 /* Free the IRQs */
708 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
709 free_irq(priv->interruptError, dev);
710 free_irq(priv->interruptTransmit, dev);
711 free_irq(priv->interruptReceive, dev);
712 } else {
bb40dcbb 713 free_irq(priv->interruptTransmit, dev);
1da177e4
LT
714 }
715
716 free_skb_resources(priv);
717
cf782298 718 dma_free_coherent(&dev->dev,
1da177e4
LT
719 sizeof(struct txbd8)*priv->tx_ring_size
720 + sizeof(struct rxbd8)*priv->rx_ring_size,
721 priv->tx_bd_base,
0bbaf069 722 gfar_read(&regs->tbase0));
1da177e4
LT
723}
724
725/* If there are any tx skbs or rx skbs still around, free them.
726 * Then free tx_skbuff and rx_skbuff */
bb40dcbb 727static void free_skb_resources(struct gfar_private *priv)
1da177e4
LT
728{
729 struct rxbd8 *rxbdp;
730 struct txbd8 *txbdp;
731 int i;
732
733 /* Go through all the buffer descriptors and free their data buffers */
734 txbdp = priv->tx_bd_base;
735
736 for (i = 0; i < priv->tx_ring_size; i++) {
737
738 if (priv->tx_skbuff[i]) {
cf782298 739 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
1da177e4
LT
740 txbdp->length,
741 DMA_TO_DEVICE);
742 dev_kfree_skb_any(priv->tx_skbuff[i]);
743 priv->tx_skbuff[i] = NULL;
744 }
ad5da7ab
AF
745
746 txbdp++;
1da177e4
LT
747 }
748
749 kfree(priv->tx_skbuff);
750
751 rxbdp = priv->rx_bd_base;
752
753 /* rx_skbuff is not guaranteed to be allocated, so only
754 * free it and its contents if it is allocated */
755 if(priv->rx_skbuff != NULL) {
756 for (i = 0; i < priv->rx_ring_size; i++) {
757 if (priv->rx_skbuff[i]) {
cf782298 758 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
7f7f5316 759 priv->rx_buffer_size,
1da177e4
LT
760 DMA_FROM_DEVICE);
761
762 dev_kfree_skb_any(priv->rx_skbuff[i]);
763 priv->rx_skbuff[i] = NULL;
764 }
765
766 rxbdp->status = 0;
767 rxbdp->length = 0;
768 rxbdp->bufPtr = 0;
769
770 rxbdp++;
771 }
772
773 kfree(priv->rx_skbuff);
774 }
775}
776
0bbaf069
KG
777void gfar_start(struct net_device *dev)
778{
779 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 780 struct gfar __iomem *regs = priv->regs;
0bbaf069
KG
781 u32 tempval;
782
783 /* Enable Rx and Tx in MACCFG1 */
784 tempval = gfar_read(&regs->maccfg1);
785 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
786 gfar_write(&regs->maccfg1, tempval);
787
788 /* Initialize DMACTRL to have WWR and WOP */
789 tempval = gfar_read(&priv->regs->dmactrl);
790 tempval |= DMACTRL_INIT_SETTINGS;
791 gfar_write(&priv->regs->dmactrl, tempval);
792
0bbaf069
KG
793 /* Make sure we aren't stopped */
794 tempval = gfar_read(&priv->regs->dmactrl);
795 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
796 gfar_write(&priv->regs->dmactrl, tempval);
797
fef6108d
AF
798 /* Clear THLT/RHLT, so that the DMA starts polling now */
799 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
800 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
801
0bbaf069
KG
802 /* Unmask the interrupts we look for */
803 gfar_write(&regs->imask, IMASK_DEFAULT);
804}
805
1da177e4
LT
806/* Bring the controller up and running */
807int startup_gfar(struct net_device *dev)
808{
809 struct txbd8 *txbdp;
810 struct rxbd8 *rxbdp;
f9663aea 811 dma_addr_t addr = 0;
1da177e4
LT
812 unsigned long vaddr;
813 int i;
814 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 815 struct gfar __iomem *regs = priv->regs;
1da177e4 816 int err = 0;
0bbaf069 817 u32 rctrl = 0;
7f7f5316 818 u32 attrs = 0;
1da177e4
LT
819
820 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
821
822 /* Allocate memory for the buffer descriptors */
cf782298 823 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
1da177e4
LT
824 sizeof (struct txbd8) * priv->tx_ring_size +
825 sizeof (struct rxbd8) * priv->rx_ring_size,
826 &addr, GFP_KERNEL);
827
828 if (vaddr == 0) {
0bbaf069
KG
829 if (netif_msg_ifup(priv))
830 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
831 dev->name);
1da177e4
LT
832 return -ENOMEM;
833 }
834
835 priv->tx_bd_base = (struct txbd8 *) vaddr;
836
837 /* enet DMA only understands physical addresses */
0bbaf069 838 gfar_write(&regs->tbase0, addr);
1da177e4
LT
839
840 /* Start the rx descriptor ring where the tx ring leaves off */
841 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
842 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
843 priv->rx_bd_base = (struct rxbd8 *) vaddr;
0bbaf069 844 gfar_write(&regs->rbase0, addr);
1da177e4
LT
845
846 /* Setup the skbuff rings */
847 priv->tx_skbuff =
848 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
849 priv->tx_ring_size, GFP_KERNEL);
850
bb40dcbb 851 if (NULL == priv->tx_skbuff) {
0bbaf069
KG
852 if (netif_msg_ifup(priv))
853 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
854 dev->name);
1da177e4
LT
855 err = -ENOMEM;
856 goto tx_skb_fail;
857 }
858
859 for (i = 0; i < priv->tx_ring_size; i++)
860 priv->tx_skbuff[i] = NULL;
861
862 priv->rx_skbuff =
863 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
864 priv->rx_ring_size, GFP_KERNEL);
865
bb40dcbb 866 if (NULL == priv->rx_skbuff) {
0bbaf069
KG
867 if (netif_msg_ifup(priv))
868 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
869 dev->name);
1da177e4
LT
870 err = -ENOMEM;
871 goto rx_skb_fail;
872 }
873
874 for (i = 0; i < priv->rx_ring_size; i++)
875 priv->rx_skbuff[i] = NULL;
876
877 /* Initialize some variables in our dev structure */
878 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
879 priv->cur_rx = priv->rx_bd_base;
880 priv->skb_curtx = priv->skb_dirtytx = 0;
881 priv->skb_currx = 0;
882
883 /* Initialize Transmit Descriptor Ring */
884 txbdp = priv->tx_bd_base;
885 for (i = 0; i < priv->tx_ring_size; i++) {
886 txbdp->status = 0;
887 txbdp->length = 0;
888 txbdp->bufPtr = 0;
889 txbdp++;
890 }
891
892 /* Set the last descriptor in the ring to indicate wrap */
893 txbdp--;
894 txbdp->status |= TXBD_WRAP;
895
896 rxbdp = priv->rx_bd_base;
897 for (i = 0; i < priv->rx_ring_size; i++) {
815b97c6 898 struct sk_buff *skb;
1da177e4 899
815b97c6 900 skb = gfar_new_skb(dev);
1da177e4 901
815b97c6
AF
902 if (!skb) {
903 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
904 dev->name);
905
906 goto err_rxalloc_fail;
907 }
1da177e4
LT
908
909 priv->rx_skbuff[i] = skb;
910
815b97c6
AF
911 gfar_new_rxbdp(dev, rxbdp, skb);
912
1da177e4
LT
913 rxbdp++;
914 }
915
916 /* Set the last descriptor in the ring to wrap */
917 rxbdp--;
918 rxbdp->status |= RXBD_WRAP;
919
920 /* If the device has multiple interrupts, register for
921 * them. Otherwise, only register for the one */
922 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
0bbaf069 923 /* Install our interrupt handlers for Error,
1da177e4
LT
924 * Transmit, and Receive */
925 if (request_irq(priv->interruptError, gfar_error,
926 0, "enet_error", dev) < 0) {
0bbaf069
KG
927 if (netif_msg_intr(priv))
928 printk(KERN_ERR "%s: Can't get IRQ %d\n",
929 dev->name, priv->interruptError);
1da177e4
LT
930
931 err = -1;
932 goto err_irq_fail;
933 }
934
935 if (request_irq(priv->interruptTransmit, gfar_transmit,
936 0, "enet_tx", dev) < 0) {
0bbaf069
KG
937 if (netif_msg_intr(priv))
938 printk(KERN_ERR "%s: Can't get IRQ %d\n",
939 dev->name, priv->interruptTransmit);
1da177e4
LT
940
941 err = -1;
942
943 goto tx_irq_fail;
944 }
945
946 if (request_irq(priv->interruptReceive, gfar_receive,
947 0, "enet_rx", dev) < 0) {
0bbaf069
KG
948 if (netif_msg_intr(priv))
949 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
950 dev->name, priv->interruptReceive);
1da177e4
LT
951
952 err = -1;
953 goto rx_irq_fail;
954 }
955 } else {
956 if (request_irq(priv->interruptTransmit, gfar_interrupt,
957 0, "gfar_interrupt", dev) < 0) {
0bbaf069
KG
958 if (netif_msg_intr(priv))
959 printk(KERN_ERR "%s: Can't get IRQ %d\n",
960 dev->name, priv->interruptError);
1da177e4
LT
961
962 err = -1;
963 goto err_irq_fail;
964 }
965 }
966
bb40dcbb 967 phy_start(priv->phydev);
1da177e4
LT
968
969 /* Configure the coalescing support */
970 if (priv->txcoalescing)
971 gfar_write(&regs->txic,
972 mk_ic_value(priv->txcount, priv->txtime));
973 else
974 gfar_write(&regs->txic, 0);
975
976 if (priv->rxcoalescing)
977 gfar_write(&regs->rxic,
978 mk_ic_value(priv->rxcount, priv->rxtime));
979 else
980 gfar_write(&regs->rxic, 0);
981
0bbaf069
KG
982 if (priv->rx_csum_enable)
983 rctrl |= RCTRL_CHECKSUMMING;
1da177e4 984
7f7f5316 985 if (priv->extended_hash) {
0bbaf069 986 rctrl |= RCTRL_EXTHASH;
1da177e4 987
7f7f5316
AF
988 gfar_clear_exact_match(dev);
989 rctrl |= RCTRL_EMEN;
990 }
991
0bbaf069
KG
992 if (priv->vlan_enable)
993 rctrl |= RCTRL_VLAN;
1da177e4 994
7f7f5316
AF
995 if (priv->padding) {
996 rctrl &= ~RCTRL_PAL_MASK;
997 rctrl |= RCTRL_PADDING(priv->padding);
998 }
999
0bbaf069
KG
1000 /* Init rctrl based on our settings */
1001 gfar_write(&priv->regs->rctrl, rctrl);
1da177e4 1002
0bbaf069
KG
1003 if (dev->features & NETIF_F_IP_CSUM)
1004 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
1da177e4 1005
7f7f5316
AF
1006 /* Set the extraction length and index */
1007 attrs = ATTRELI_EL(priv->rx_stash_size) |
1008 ATTRELI_EI(priv->rx_stash_index);
1009
1010 gfar_write(&priv->regs->attreli, attrs);
1011
1012 /* Start with defaults, and add stashing or locking
1013 * depending on the approprate variables */
1014 attrs = ATTR_INIT_SETTINGS;
1015
1016 if (priv->bd_stash_en)
1017 attrs |= ATTR_BDSTASH;
1018
1019 if (priv->rx_stash_size != 0)
1020 attrs |= ATTR_BUFSTASH;
1021
1022 gfar_write(&priv->regs->attr, attrs);
1023
1024 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
1025 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1026 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1027
1028 /* Start the controller */
0bbaf069 1029 gfar_start(dev);
1da177e4
LT
1030
1031 return 0;
1032
1033rx_irq_fail:
1034 free_irq(priv->interruptTransmit, dev);
1035tx_irq_fail:
1036 free_irq(priv->interruptError, dev);
1037err_irq_fail:
7d2e3cb7 1038err_rxalloc_fail:
1da177e4
LT
1039rx_skb_fail:
1040 free_skb_resources(priv);
1041tx_skb_fail:
cf782298 1042 dma_free_coherent(&dev->dev,
1da177e4
LT
1043 sizeof(struct txbd8)*priv->tx_ring_size
1044 + sizeof(struct rxbd8)*priv->rx_ring_size,
1045 priv->tx_bd_base,
0bbaf069 1046 gfar_read(&regs->tbase0));
1da177e4 1047
1da177e4
LT
1048 return err;
1049}
1050
1051/* Called when something needs to use the ethernet device */
1052/* Returns 0 for success. */
1053static int gfar_enet_open(struct net_device *dev)
1054{
94e8cc35 1055 struct gfar_private *priv = netdev_priv(dev);
1da177e4
LT
1056 int err;
1057
bea3348e
SH
1058 napi_enable(&priv->napi);
1059
1da177e4
LT
1060 /* Initialize a bunch of registers */
1061 init_registers(dev);
1062
1063 gfar_set_mac_address(dev);
1064
1065 err = init_phy(dev);
1066
bea3348e
SH
1067 if(err) {
1068 napi_disable(&priv->napi);
1da177e4 1069 return err;
bea3348e 1070 }
1da177e4
LT
1071
1072 err = startup_gfar(dev);
db0e8e3f 1073 if (err) {
bea3348e 1074 napi_disable(&priv->napi);
db0e8e3f
AV
1075 return err;
1076 }
1da177e4
LT
1077
1078 netif_start_queue(dev);
1079
1080 return err;
1081}
1082
7f7f5316 1083static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
0bbaf069
KG
1084{
1085 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
1086
1087 memset(fcb, 0, GMAC_FCB_LEN);
1088
0bbaf069
KG
1089 return fcb;
1090}
1091
1092static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1093{
7f7f5316 1094 u8 flags = 0;
0bbaf069
KG
1095
1096 /* If we're here, it's a IP packet with a TCP or UDP
1097 * payload. We set it to checksum, using a pseudo-header
1098 * we provide
1099 */
7f7f5316 1100 flags = TXFCB_DEFAULT;
0bbaf069 1101
7f7f5316
AF
1102 /* Tell the controller what the protocol is */
1103 /* And provide the already calculated phcs */
eddc9ec5 1104 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
7f7f5316 1105 flags |= TXFCB_UDP;
4bedb452 1106 fcb->phcs = udp_hdr(skb)->check;
7f7f5316 1107 } else
8da32de5 1108 fcb->phcs = tcp_hdr(skb)->check;
0bbaf069
KG
1109
1110 /* l3os is the distance between the start of the
1111 * frame (skb->data) and the start of the IP hdr.
1112 * l4os is the distance between the start of the
1113 * l3 hdr and the l4 hdr */
bbe735e4 1114 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
cfe1fc77 1115 fcb->l4os = skb_network_header_len(skb);
0bbaf069 1116
7f7f5316 1117 fcb->flags = flags;
0bbaf069
KG
1118}
1119
7f7f5316 1120void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
0bbaf069 1121{
7f7f5316 1122 fcb->flags |= TXFCB_VLN;
0bbaf069
KG
1123 fcb->vlctl = vlan_tx_tag_get(skb);
1124}
1125
1da177e4
LT
1126/* This is called by the kernel when a frame is ready for transmission. */
1127/* It is pointed to by the dev->hard_start_xmit function pointer */
1128static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1129{
1130 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 1131 struct txfcb *fcb = NULL;
1da177e4 1132 struct txbd8 *txbdp;
7f7f5316 1133 u16 status;
fef6108d 1134 unsigned long flags;
1da177e4
LT
1135
1136 /* Update transmit stats */
09f75cd7 1137 dev->stats.tx_bytes += skb->len;
1da177e4
LT
1138
1139 /* Lock priv now */
fef6108d 1140 spin_lock_irqsave(&priv->txlock, flags);
1da177e4
LT
1141
1142 /* Point at the first free tx descriptor */
1143 txbdp = priv->cur_tx;
1144
1145 /* Clear all but the WRAP status flags */
7f7f5316 1146 status = txbdp->status & TXBD_WRAP;
1da177e4 1147
0bbaf069 1148 /* Set up checksumming */
7f7f5316 1149 if (likely((dev->features & NETIF_F_IP_CSUM)
84fa7933 1150 && (CHECKSUM_PARTIAL == skb->ip_summed))) {
0bbaf069 1151 fcb = gfar_add_fcb(skb, txbdp);
7f7f5316 1152 status |= TXBD_TOE;
0bbaf069
KG
1153 gfar_tx_checksum(skb, fcb);
1154 }
1155
1156 if (priv->vlan_enable &&
1157 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
7f7f5316 1158 if (unlikely(NULL == fcb)) {
0bbaf069 1159 fcb = gfar_add_fcb(skb, txbdp);
7f7f5316
AF
1160 status |= TXBD_TOE;
1161 }
0bbaf069
KG
1162
1163 gfar_tx_vlan(skb, fcb);
1164 }
1165
1da177e4
LT
1166 /* Set buffer length and pointer */
1167 txbdp->length = skb->len;
cf782298 1168 txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1da177e4
LT
1169 skb->len, DMA_TO_DEVICE);
1170
1171 /* Save the skb pointer so we can free it later */
1172 priv->tx_skbuff[priv->skb_curtx] = skb;
1173
1174 /* Update the current skb pointer (wrapping if this was the last) */
1175 priv->skb_curtx =
1176 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1177
1178 /* Flag the BD as interrupt-causing */
7f7f5316 1179 status |= TXBD_INTERRUPT;
1da177e4
LT
1180
1181 /* Flag the BD as ready to go, last in frame, and */
1182 /* in need of CRC */
7f7f5316 1183 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1da177e4
LT
1184
1185 dev->trans_start = jiffies;
1186
3b6330ce
SW
1187 /* The powerpc-specific eieio() is used, as wmb() has too strong
1188 * semantics (it requires synchronization between cacheable and
1189 * uncacheable mappings, which eieio doesn't provide and which we
1190 * don't need), thus requiring a more expensive sync instruction. At
1191 * some point, the set of architecture-independent barrier functions
1192 * should be expanded to include weaker barriers.
1193 */
1194
1195 eieio();
7f7f5316
AF
1196 txbdp->status = status;
1197
1da177e4
LT
1198 /* If this was the last BD in the ring, the next one */
1199 /* is at the beginning of the ring */
1200 if (txbdp->status & TXBD_WRAP)
1201 txbdp = priv->tx_bd_base;
1202 else
1203 txbdp++;
1204
1205 /* If the next BD still needs to be cleaned up, then the bds
1206 are full. We need to tell the kernel to stop sending us stuff. */
1207 if (txbdp == priv->dirty_tx) {
1208 netif_stop_queue(dev);
1209
09f75cd7 1210 dev->stats.tx_fifo_errors++;
1da177e4
LT
1211 }
1212
1213 /* Update the current txbd to the next one */
1214 priv->cur_tx = txbdp;
1215
1216 /* Tell the DMA to go go go */
1217 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1218
1219 /* Unlock priv */
fef6108d 1220 spin_unlock_irqrestore(&priv->txlock, flags);
1da177e4
LT
1221
1222 return 0;
1223}
1224
1225/* Stops the kernel queue, and halts the controller */
1226static int gfar_close(struct net_device *dev)
1227{
1228 struct gfar_private *priv = netdev_priv(dev);
bea3348e
SH
1229
1230 napi_disable(&priv->napi);
1231
ab939905 1232 cancel_work_sync(&priv->reset_task);
1da177e4
LT
1233 stop_gfar(dev);
1234
bb40dcbb
AF
1235 /* Disconnect from the PHY */
1236 phy_disconnect(priv->phydev);
1237 priv->phydev = NULL;
1da177e4
LT
1238
1239 netif_stop_queue(dev);
1240
1241 return 0;
1242}
1243
1da177e4 1244/* Changes the mac address if the controller is not running. */
f162b9d5 1245static int gfar_set_mac_address(struct net_device *dev)
1da177e4 1246{
7f7f5316 1247 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1da177e4
LT
1248
1249 return 0;
1250}
1251
1252
0bbaf069
KG
1253/* Enables and disables VLAN insertion/extraction */
1254static void gfar_vlan_rx_register(struct net_device *dev,
1255 struct vlan_group *grp)
1256{
1257 struct gfar_private *priv = netdev_priv(dev);
1258 unsigned long flags;
1259 u32 tempval;
1260
fef6108d 1261 spin_lock_irqsave(&priv->rxlock, flags);
0bbaf069
KG
1262
1263 priv->vlgrp = grp;
1264
1265 if (grp) {
1266 /* Enable VLAN tag insertion */
1267 tempval = gfar_read(&priv->regs->tctrl);
1268 tempval |= TCTRL_VLINS;
1269
1270 gfar_write(&priv->regs->tctrl, tempval);
6aa20a22 1271
0bbaf069
KG
1272 /* Enable VLAN tag extraction */
1273 tempval = gfar_read(&priv->regs->rctrl);
1274 tempval |= RCTRL_VLEX;
1275 gfar_write(&priv->regs->rctrl, tempval);
1276 } else {
1277 /* Disable VLAN tag insertion */
1278 tempval = gfar_read(&priv->regs->tctrl);
1279 tempval &= ~TCTRL_VLINS;
1280 gfar_write(&priv->regs->tctrl, tempval);
1281
1282 /* Disable VLAN tag extraction */
1283 tempval = gfar_read(&priv->regs->rctrl);
1284 tempval &= ~RCTRL_VLEX;
1285 gfar_write(&priv->regs->rctrl, tempval);
1286 }
1287
fef6108d 1288 spin_unlock_irqrestore(&priv->rxlock, flags);
0bbaf069
KG
1289}
1290
1da177e4
LT
1291static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1292{
1293 int tempsize, tempval;
1294 struct gfar_private *priv = netdev_priv(dev);
1295 int oldsize = priv->rx_buffer_size;
0bbaf069
KG
1296 int frame_size = new_mtu + ETH_HLEN;
1297
1298 if (priv->vlan_enable)
faa89577 1299 frame_size += VLAN_HLEN;
0bbaf069
KG
1300
1301 if (gfar_uses_fcb(priv))
1302 frame_size += GMAC_FCB_LEN;
1303
1304 frame_size += priv->padding;
1da177e4
LT
1305
1306 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
0bbaf069
KG
1307 if (netif_msg_drv(priv))
1308 printk(KERN_ERR "%s: Invalid MTU setting\n",
1309 dev->name);
1da177e4
LT
1310 return -EINVAL;
1311 }
1312
1313 tempsize =
1314 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1315 INCREMENTAL_BUFFER_SIZE;
1316
1317 /* Only stop and start the controller if it isn't already
7f7f5316 1318 * stopped, and we changed something */
1da177e4
LT
1319 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1320 stop_gfar(dev);
1321
1322 priv->rx_buffer_size = tempsize;
1323
1324 dev->mtu = new_mtu;
1325
1326 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1327 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1328
1329 /* If the mtu is larger than the max size for standard
1330 * ethernet frames (ie, a jumbo frame), then set maccfg2
1331 * to allow huge frames, and to check the length */
1332 tempval = gfar_read(&priv->regs->maccfg2);
1333
1334 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1335 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1336 else
1337 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1338
1339 gfar_write(&priv->regs->maccfg2, tempval);
1340
1341 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1342 startup_gfar(dev);
1343
1344 return 0;
1345}
1346
ab939905 1347/* gfar_reset_task gets scheduled when a packet has not been
1da177e4
LT
1348 * transmitted after a set amount of time.
1349 * For now, assume that clearing out all the structures, and
ab939905
SS
1350 * starting over will fix the problem.
1351 */
1352static void gfar_reset_task(struct work_struct *work)
1da177e4 1353{
ab939905
SS
1354 struct gfar_private *priv = container_of(work, struct gfar_private,
1355 reset_task);
1356 struct net_device *dev = priv->dev;
1da177e4
LT
1357
1358 if (dev->flags & IFF_UP) {
1359 stop_gfar(dev);
1360 startup_gfar(dev);
1361 }
1362
263ba320 1363 netif_tx_schedule_all(dev);
1da177e4
LT
1364}
1365
ab939905
SS
1366static void gfar_timeout(struct net_device *dev)
1367{
1368 struct gfar_private *priv = netdev_priv(dev);
1369
1370 dev->stats.tx_errors++;
1371 schedule_work(&priv->reset_task);
1372}
1373
1da177e4 1374/* Interrupt Handler for Transmit complete */
f162b9d5 1375static int gfar_clean_tx_ring(struct net_device *dev)
1da177e4 1376{
1da177e4 1377 struct txbd8 *bdp;
d080cd63
DH
1378 struct gfar_private *priv = netdev_priv(dev);
1379 int howmany = 0;
1da177e4 1380
1da177e4
LT
1381 bdp = priv->dirty_tx;
1382 while ((bdp->status & TXBD_READY) == 0) {
1383 /* If dirty_tx and cur_tx are the same, then either the */
1384 /* ring is empty or full now (it could only be full in the beginning, */
1385 /* obviously). If it is empty, we are done. */
1386 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1387 break;
1388
d080cd63 1389 howmany++;
1da177e4
LT
1390
1391 /* Deferred means some collisions occurred during transmit, */
1392 /* but we eventually sent the packet. */
1393 if (bdp->status & TXBD_DEF)
09f75cd7 1394 dev->stats.collisions++;
1da177e4 1395
81183059
AF
1396 /* Unmap the DMA memory */
1397 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1398 bdp->length, DMA_TO_DEVICE);
1399
1da177e4
LT
1400 /* Free the sk buffer associated with this TxBD */
1401 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
d080cd63 1402
1da177e4
LT
1403 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1404 priv->skb_dirtytx =
1405 (priv->skb_dirtytx +
1406 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1407
d080cd63
DH
1408 /* Clean BD length for empty detection */
1409 bdp->length = 0;
1410
1da177e4
LT
1411 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1412 if (bdp->status & TXBD_WRAP)
1413 bdp = priv->tx_bd_base;
1414 else
1415 bdp++;
1416
1417 /* Move dirty_tx to be the next bd */
1418 priv->dirty_tx = bdp;
1419
1420 /* We freed a buffer, so now we can restart transmission */
1421 if (netif_queue_stopped(dev))
1422 netif_wake_queue(dev);
1423 } /* while ((bdp->status & TXBD_READY) == 0) */
1424
d080cd63
DH
1425 dev->stats.tx_packets += howmany;
1426
1427 return howmany;
1428}
1429
1430/* Interrupt Handler for Transmit complete */
1431static irqreturn_t gfar_transmit(int irq, void *dev_id)
1432{
1433 struct net_device *dev = (struct net_device *) dev_id;
1434 struct gfar_private *priv = netdev_priv(dev);
1435
1436 /* Clear IEVENT */
1437 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1438
1439 /* Lock priv */
1440 spin_lock(&priv->txlock);
1441
1442 gfar_clean_tx_ring(dev);
1443
1da177e4
LT
1444 /* If we are coalescing the interrupts, reset the timer */
1445 /* Otherwise, clear it */
2f448911
AF
1446 if (likely(priv->txcoalescing)) {
1447 gfar_write(&priv->regs->txic, 0);
1da177e4
LT
1448 gfar_write(&priv->regs->txic,
1449 mk_ic_value(priv->txcount, priv->txtime));
2f448911 1450 }
1da177e4 1451
fef6108d 1452 spin_unlock(&priv->txlock);
1da177e4
LT
1453
1454 return IRQ_HANDLED;
1455}
1456
815b97c6
AF
1457static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1458 struct sk_buff *skb)
1459{
1460 struct gfar_private *priv = netdev_priv(dev);
1461 u32 * status_len = (u32 *)bdp;
1462 u16 flags;
1463
1464 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1465 priv->rx_buffer_size, DMA_FROM_DEVICE);
1466
1467 flags = RXBD_EMPTY | RXBD_INTERRUPT;
1468
1469 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1470 flags |= RXBD_WRAP;
1471
1472 eieio();
1473
1474 *status_len = (u32)flags << 16;
1475}
1476
1477
1478struct sk_buff * gfar_new_skb(struct net_device *dev)
1da177e4 1479{
7f7f5316 1480 unsigned int alignamount;
1da177e4
LT
1481 struct gfar_private *priv = netdev_priv(dev);
1482 struct sk_buff *skb = NULL;
1da177e4
LT
1483
1484 /* We have to allocate the skb, so keep trying till we succeed */
815b97c6 1485 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
1da177e4 1486
815b97c6 1487 if (!skb)
1da177e4
LT
1488 return NULL;
1489
7f7f5316 1490 alignamount = RXBUF_ALIGNMENT -
bea3348e 1491 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
7f7f5316 1492
1da177e4
LT
1493 /* We need the data buffer to be aligned properly. We will reserve
1494 * as many bytes as needed to align the data properly
1495 */
7f7f5316 1496 skb_reserve(skb, alignamount);
1da177e4 1497
1da177e4
LT
1498 return skb;
1499}
1500
298e1a9e 1501static inline void count_errors(unsigned short status, struct net_device *dev)
1da177e4 1502{
298e1a9e 1503 struct gfar_private *priv = netdev_priv(dev);
09f75cd7 1504 struct net_device_stats *stats = &dev->stats;
1da177e4
LT
1505 struct gfar_extra_stats *estats = &priv->extra_stats;
1506
1507 /* If the packet was truncated, none of the other errors
1508 * matter */
1509 if (status & RXBD_TRUNCATED) {
1510 stats->rx_length_errors++;
1511
1512 estats->rx_trunc++;
1513
1514 return;
1515 }
1516 /* Count the errors, if there were any */
1517 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1518 stats->rx_length_errors++;
1519
1520 if (status & RXBD_LARGE)
1521 estats->rx_large++;
1522 else
1523 estats->rx_short++;
1524 }
1525 if (status & RXBD_NONOCTET) {
1526 stats->rx_frame_errors++;
1527 estats->rx_nonoctet++;
1528 }
1529 if (status & RXBD_CRCERR) {
1530 estats->rx_crcerr++;
1531 stats->rx_crc_errors++;
1532 }
1533 if (status & RXBD_OVERRUN) {
1534 estats->rx_overrun++;
1535 stats->rx_crc_errors++;
1536 }
1537}
1538
7d12e780 1539irqreturn_t gfar_receive(int irq, void *dev_id)
1da177e4
LT
1540{
1541 struct net_device *dev = (struct net_device *) dev_id;
1542 struct gfar_private *priv = netdev_priv(dev);
1da177e4 1543 u32 tempval;
1da177e4 1544
1da177e4 1545 /* support NAPI */
d080cd63
DH
1546 /* Clear IEVENT, so interrupts aren't called again
1547 * because of the packets that have already arrived */
1548 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1549
bea3348e 1550 if (netif_rx_schedule_prep(dev, &priv->napi)) {
1da177e4 1551 tempval = gfar_read(&priv->regs->imask);
d080cd63 1552 tempval &= IMASK_RTX_DISABLED;
1da177e4
LT
1553 gfar_write(&priv->regs->imask, tempval);
1554
bea3348e 1555 __netif_rx_schedule(dev, &priv->napi);
1da177e4 1556 } else {
0bbaf069
KG
1557 if (netif_msg_rx_err(priv))
1558 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1559 dev->name, gfar_read(&priv->regs->ievent),
1560 gfar_read(&priv->regs->imask));
1da177e4 1561 }
1da177e4
LT
1562
1563 return IRQ_HANDLED;
1564}
1565
0bbaf069
KG
1566static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1567{
1568 /* If valid headers were found, and valid sums
1569 * were verified, then we tell the kernel that no
1570 * checksumming is necessary. Otherwise, it is */
7f7f5316 1571 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
0bbaf069
KG
1572 skb->ip_summed = CHECKSUM_UNNECESSARY;
1573 else
1574 skb->ip_summed = CHECKSUM_NONE;
1575}
1576
1577
1578static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1579{
1580 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1581
1582 /* Remove the FCB from the skb */
1583 skb_pull(skb, GMAC_FCB_LEN);
1584
1585 return fcb;
1586}
1da177e4
LT
1587
1588/* gfar_process_frame() -- handle one incoming packet if skb
1589 * isn't NULL. */
1590static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1591 int length)
1592{
1593 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 1594 struct rxfcb *fcb = NULL;
1da177e4 1595
bb40dcbb 1596 if (NULL == skb) {
0bbaf069
KG
1597 if (netif_msg_rx_err(priv))
1598 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
09f75cd7 1599 dev->stats.rx_dropped++;
1da177e4
LT
1600 priv->extra_stats.rx_skbmissing++;
1601 } else {
0bbaf069
KG
1602 int ret;
1603
1da177e4
LT
1604 /* Prep the skb for the packet */
1605 skb_put(skb, length);
1606
0bbaf069
KG
1607 /* Grab the FCB if there is one */
1608 if (gfar_uses_fcb(priv))
1609 fcb = gfar_get_fcb(skb);
1610
1611 /* Remove the padded bytes, if there are any */
1612 if (priv->padding)
1613 skb_pull(skb, priv->padding);
1614
1615 if (priv->rx_csum_enable)
1616 gfar_rx_checksum(skb, fcb);
1617
1da177e4
LT
1618 /* Tell the skb what kind of packet this is */
1619 skb->protocol = eth_type_trans(skb, dev);
1620
1621 /* Send the packet up the stack */
0aa1538f
FR
1622 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
1623 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
1624 fcb->vlctl);
1625 } else
1626 ret = netif_receive_skb(skb);
0bbaf069
KG
1627
1628 if (NET_RX_DROP == ret)
1da177e4 1629 priv->extra_stats.kernel_dropped++;
1da177e4
LT
1630 }
1631
1632 return 0;
1633}
1634
1635/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
0bbaf069 1636 * until the budget/quota has been reached. Returns the number
1da177e4
LT
1637 * of frames handled
1638 */
0bbaf069 1639int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1da177e4
LT
1640{
1641 struct rxbd8 *bdp;
1642 struct sk_buff *skb;
1643 u16 pkt_len;
1644 int howmany = 0;
1645 struct gfar_private *priv = netdev_priv(dev);
1646
1647 /* Get the first full descriptor */
1648 bdp = priv->cur_rx;
1649
1650 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
815b97c6 1651 struct sk_buff *newskb;
3b6330ce 1652 rmb();
815b97c6
AF
1653
1654 /* Add another skb for the future */
1655 newskb = gfar_new_skb(dev);
1656
1da177e4
LT
1657 skb = priv->rx_skbuff[priv->skb_currx];
1658
81183059
AF
1659 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1660 priv->rx_buffer_size, DMA_FROM_DEVICE);
1661
815b97c6
AF
1662 /* We drop the frame if we failed to allocate a new buffer */
1663 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1664 bdp->status & RXBD_ERR)) {
1665 count_errors(bdp->status, dev);
1666
1667 if (unlikely(!newskb))
1668 newskb = skb;
1669
81183059 1670 if (skb)
815b97c6 1671 dev_kfree_skb_any(skb);
815b97c6 1672 } else {
1da177e4 1673 /* Increment the number of packets */
09f75cd7 1674 dev->stats.rx_packets++;
1da177e4
LT
1675 howmany++;
1676
1677 /* Remove the FCS from the packet length */
1678 pkt_len = bdp->length - 4;
1679
1680 gfar_process_frame(dev, skb, pkt_len);
1681
09f75cd7 1682 dev->stats.rx_bytes += pkt_len;
1da177e4
LT
1683 }
1684
815b97c6 1685 priv->rx_skbuff[priv->skb_currx] = newskb;
1da177e4 1686
815b97c6
AF
1687 /* Setup the new bdp */
1688 gfar_new_rxbdp(dev, bdp, newskb);
1da177e4
LT
1689
1690 /* Update to the next pointer */
1691 if (bdp->status & RXBD_WRAP)
1692 bdp = priv->rx_bd_base;
1693 else
1694 bdp++;
1695
1696 /* update to point at the next skb */
1697 priv->skb_currx =
815b97c6
AF
1698 (priv->skb_currx + 1) &
1699 RX_RING_MOD_MASK(priv->rx_ring_size);
1da177e4
LT
1700 }
1701
1702 /* Update the current rxbd pointer to be the next one */
1703 priv->cur_rx = bdp;
1704
1da177e4
LT
1705 return howmany;
1706}
1707
bea3348e 1708static int gfar_poll(struct napi_struct *napi, int budget)
1da177e4 1709{
bea3348e
SH
1710 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1711 struct net_device *dev = priv->dev;
1da177e4 1712 int howmany;
d080cd63
DH
1713 unsigned long flags;
1714
1715 /* If we fail to get the lock, don't bother with the TX BDs */
1716 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1717 gfar_clean_tx_ring(dev);
1718 spin_unlock_irqrestore(&priv->txlock, flags);
1719 }
1da177e4 1720
bea3348e 1721 howmany = gfar_clean_rx_ring(dev, budget);
1da177e4 1722
bea3348e
SH
1723 if (howmany < budget) {
1724 netif_rx_complete(dev, napi);
1da177e4
LT
1725
1726 /* Clear the halt bit in RSTAT */
1727 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1728
1729 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1730
1731 /* If we are coalescing interrupts, update the timer */
1732 /* Otherwise, clear it */
2f448911
AF
1733 if (likely(priv->rxcoalescing)) {
1734 gfar_write(&priv->regs->rxic, 0);
1da177e4
LT
1735 gfar_write(&priv->regs->rxic,
1736 mk_ic_value(priv->rxcount, priv->rxtime));
2f448911 1737 }
1da177e4
LT
1738 }
1739
bea3348e 1740 return howmany;
1da177e4 1741}
1da177e4 1742
f2d71c2d
VW
1743#ifdef CONFIG_NET_POLL_CONTROLLER
1744/*
1745 * Polling 'interrupt' - used by things like netconsole to send skbs
1746 * without having to re-enable interrupts. It's not called while
1747 * the interrupt routine is executing.
1748 */
1749static void gfar_netpoll(struct net_device *dev)
1750{
1751 struct gfar_private *priv = netdev_priv(dev);
1752
1753 /* If the device has multiple interrupts, run tx/rx */
1754 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1755 disable_irq(priv->interruptTransmit);
1756 disable_irq(priv->interruptReceive);
1757 disable_irq(priv->interruptError);
1758 gfar_interrupt(priv->interruptTransmit, dev);
1759 enable_irq(priv->interruptError);
1760 enable_irq(priv->interruptReceive);
1761 enable_irq(priv->interruptTransmit);
1762 } else {
1763 disable_irq(priv->interruptTransmit);
1764 gfar_interrupt(priv->interruptTransmit, dev);
1765 enable_irq(priv->interruptTransmit);
1766 }
1767}
1768#endif
1769
1da177e4 1770/* The interrupt handler for devices with one interrupt */
7d12e780 1771static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1da177e4
LT
1772{
1773 struct net_device *dev = dev_id;
1774 struct gfar_private *priv = netdev_priv(dev);
1775
1776 /* Save ievent for future reference */
1777 u32 events = gfar_read(&priv->regs->ievent);
1778
1da177e4 1779 /* Check for reception */
538cc7ee 1780 if (events & IEVENT_RX_MASK)
7d12e780 1781 gfar_receive(irq, dev_id);
1da177e4
LT
1782
1783 /* Check for transmit completion */
538cc7ee 1784 if (events & IEVENT_TX_MASK)
7d12e780 1785 gfar_transmit(irq, dev_id);
1da177e4 1786
538cc7ee
SS
1787 /* Check for errors */
1788 if (events & IEVENT_ERR_MASK)
1789 gfar_error(irq, dev_id);
1da177e4
LT
1790
1791 return IRQ_HANDLED;
1792}
1793
1da177e4
LT
1794/* Called every time the controller might need to be made
1795 * aware of new link state. The PHY code conveys this
bb40dcbb 1796 * information through variables in the phydev structure, and this
1da177e4
LT
1797 * function converts those variables into the appropriate
1798 * register values, and can bring down the device if needed.
1799 */
1800static void adjust_link(struct net_device *dev)
1801{
1802 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 1803 struct gfar __iomem *regs = priv->regs;
bb40dcbb
AF
1804 unsigned long flags;
1805 struct phy_device *phydev = priv->phydev;
1806 int new_state = 0;
1807
fef6108d 1808 spin_lock_irqsave(&priv->txlock, flags);
bb40dcbb
AF
1809 if (phydev->link) {
1810 u32 tempval = gfar_read(&regs->maccfg2);
7f7f5316 1811 u32 ecntrl = gfar_read(&regs->ecntrl);
1da177e4 1812
1da177e4
LT
1813 /* Now we make sure that we can be in full duplex mode.
1814 * If not, we operate in half-duplex mode. */
bb40dcbb
AF
1815 if (phydev->duplex != priv->oldduplex) {
1816 new_state = 1;
1817 if (!(phydev->duplex))
1da177e4 1818 tempval &= ~(MACCFG2_FULL_DUPLEX);
bb40dcbb 1819 else
1da177e4 1820 tempval |= MACCFG2_FULL_DUPLEX;
1da177e4 1821
bb40dcbb 1822 priv->oldduplex = phydev->duplex;
1da177e4
LT
1823 }
1824
bb40dcbb
AF
1825 if (phydev->speed != priv->oldspeed) {
1826 new_state = 1;
1827 switch (phydev->speed) {
1da177e4 1828 case 1000:
1da177e4
LT
1829 tempval =
1830 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1da177e4
LT
1831 break;
1832 case 100:
1833 case 10:
1da177e4
LT
1834 tempval =
1835 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
7f7f5316
AF
1836
1837 /* Reduced mode distinguishes
1838 * between 10 and 100 */
1839 if (phydev->speed == SPEED_100)
1840 ecntrl |= ECNTRL_R100;
1841 else
1842 ecntrl &= ~(ECNTRL_R100);
1da177e4
LT
1843 break;
1844 default:
0bbaf069
KG
1845 if (netif_msg_link(priv))
1846 printk(KERN_WARNING
bb40dcbb
AF
1847 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1848 dev->name, phydev->speed);
1da177e4
LT
1849 break;
1850 }
1851
bb40dcbb 1852 priv->oldspeed = phydev->speed;
1da177e4
LT
1853 }
1854
bb40dcbb 1855 gfar_write(&regs->maccfg2, tempval);
7f7f5316 1856 gfar_write(&regs->ecntrl, ecntrl);
bb40dcbb 1857
1da177e4 1858 if (!priv->oldlink) {
bb40dcbb 1859 new_state = 1;
1da177e4 1860 priv->oldlink = 1;
1da177e4 1861 }
bb40dcbb
AF
1862 } else if (priv->oldlink) {
1863 new_state = 1;
1864 priv->oldlink = 0;
1865 priv->oldspeed = 0;
1866 priv->oldduplex = -1;
1da177e4 1867 }
1da177e4 1868
bb40dcbb
AF
1869 if (new_state && netif_msg_link(priv))
1870 phy_print_status(phydev);
1871
fef6108d 1872 spin_unlock_irqrestore(&priv->txlock, flags);
bb40dcbb 1873}
1da177e4
LT
1874
1875/* Update the hash table based on the current list of multicast
1876 * addresses we subscribe to. Also, change the promiscuity of
1877 * the device based on the flags (this function is called
1878 * whenever dev->flags is changed */
1879static void gfar_set_multi(struct net_device *dev)
1880{
1881 struct dev_mc_list *mc_ptr;
1882 struct gfar_private *priv = netdev_priv(dev);
cc8c6e37 1883 struct gfar __iomem *regs = priv->regs;
1da177e4
LT
1884 u32 tempval;
1885
1886 if(dev->flags & IFF_PROMISC) {
1da177e4
LT
1887 /* Set RCTRL to PROM */
1888 tempval = gfar_read(&regs->rctrl);
1889 tempval |= RCTRL_PROM;
1890 gfar_write(&regs->rctrl, tempval);
1891 } else {
1892 /* Set RCTRL to not PROM */
1893 tempval = gfar_read(&regs->rctrl);
1894 tempval &= ~(RCTRL_PROM);
1895 gfar_write(&regs->rctrl, tempval);
1896 }
6aa20a22 1897
1da177e4
LT
1898 if(dev->flags & IFF_ALLMULTI) {
1899 /* Set the hash to rx all multicast frames */
0bbaf069
KG
1900 gfar_write(&regs->igaddr0, 0xffffffff);
1901 gfar_write(&regs->igaddr1, 0xffffffff);
1902 gfar_write(&regs->igaddr2, 0xffffffff);
1903 gfar_write(&regs->igaddr3, 0xffffffff);
1904 gfar_write(&regs->igaddr4, 0xffffffff);
1905 gfar_write(&regs->igaddr5, 0xffffffff);
1906 gfar_write(&regs->igaddr6, 0xffffffff);
1907 gfar_write(&regs->igaddr7, 0xffffffff);
1da177e4
LT
1908 gfar_write(&regs->gaddr0, 0xffffffff);
1909 gfar_write(&regs->gaddr1, 0xffffffff);
1910 gfar_write(&regs->gaddr2, 0xffffffff);
1911 gfar_write(&regs->gaddr3, 0xffffffff);
1912 gfar_write(&regs->gaddr4, 0xffffffff);
1913 gfar_write(&regs->gaddr5, 0xffffffff);
1914 gfar_write(&regs->gaddr6, 0xffffffff);
1915 gfar_write(&regs->gaddr7, 0xffffffff);
1916 } else {
7f7f5316
AF
1917 int em_num;
1918 int idx;
1919
1da177e4 1920 /* zero out the hash */
0bbaf069
KG
1921 gfar_write(&regs->igaddr0, 0x0);
1922 gfar_write(&regs->igaddr1, 0x0);
1923 gfar_write(&regs->igaddr2, 0x0);
1924 gfar_write(&regs->igaddr3, 0x0);
1925 gfar_write(&regs->igaddr4, 0x0);
1926 gfar_write(&regs->igaddr5, 0x0);
1927 gfar_write(&regs->igaddr6, 0x0);
1928 gfar_write(&regs->igaddr7, 0x0);
1da177e4
LT
1929 gfar_write(&regs->gaddr0, 0x0);
1930 gfar_write(&regs->gaddr1, 0x0);
1931 gfar_write(&regs->gaddr2, 0x0);
1932 gfar_write(&regs->gaddr3, 0x0);
1933 gfar_write(&regs->gaddr4, 0x0);
1934 gfar_write(&regs->gaddr5, 0x0);
1935 gfar_write(&regs->gaddr6, 0x0);
1936 gfar_write(&regs->gaddr7, 0x0);
1937
7f7f5316
AF
1938 /* If we have extended hash tables, we need to
1939 * clear the exact match registers to prepare for
1940 * setting them */
1941 if (priv->extended_hash) {
1942 em_num = GFAR_EM_NUM + 1;
1943 gfar_clear_exact_match(dev);
1944 idx = 1;
1945 } else {
1946 idx = 0;
1947 em_num = 0;
1948 }
1949
1da177e4
LT
1950 if(dev->mc_count == 0)
1951 return;
1952
1953 /* Parse the list, and set the appropriate bits */
1954 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
7f7f5316
AF
1955 if (idx < em_num) {
1956 gfar_set_mac_for_addr(dev, idx,
1957 mc_ptr->dmi_addr);
1958 idx++;
1959 } else
1960 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1da177e4
LT
1961 }
1962 }
1963
1964 return;
1965}
1966
7f7f5316
AF
1967
1968/* Clears each of the exact match registers to zero, so they
1969 * don't interfere with normal reception */
1970static void gfar_clear_exact_match(struct net_device *dev)
1971{
1972 int idx;
1973 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1974
1975 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1976 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1977}
1978
1da177e4
LT
1979/* Set the appropriate hash bit for the given addr */
1980/* The algorithm works like so:
1981 * 1) Take the Destination Address (ie the multicast address), and
1982 * do a CRC on it (little endian), and reverse the bits of the
1983 * result.
1984 * 2) Use the 8 most significant bits as a hash into a 256-entry
1985 * table. The table is controlled through 8 32-bit registers:
1986 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
1987 * gaddr7. This means that the 3 most significant bits in the
1988 * hash index which gaddr register to use, and the 5 other bits
1989 * indicate which bit (assuming an IBM numbering scheme, which
1990 * for PowerPC (tm) is usually the case) in the register holds
1991 * the entry. */
1992static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1993{
1994 u32 tempval;
1995 struct gfar_private *priv = netdev_priv(dev);
1da177e4 1996 u32 result = ether_crc(MAC_ADDR_LEN, addr);
0bbaf069
KG
1997 int width = priv->hash_width;
1998 u8 whichbit = (result >> (32 - width)) & 0x1f;
1999 u8 whichreg = result >> (32 - width + 5);
1da177e4
LT
2000 u32 value = (1 << (31-whichbit));
2001
0bbaf069 2002 tempval = gfar_read(priv->hash_regs[whichreg]);
1da177e4 2003 tempval |= value;
0bbaf069 2004 gfar_write(priv->hash_regs[whichreg], tempval);
1da177e4
LT
2005
2006 return;
2007}
2008
7f7f5316
AF
2009
2010/* There are multiple MAC Address register pairs on some controllers
2011 * This function sets the numth pair to a given address
2012 */
2013static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2014{
2015 struct gfar_private *priv = netdev_priv(dev);
2016 int idx;
2017 char tmpbuf[MAC_ADDR_LEN];
2018 u32 tempval;
cc8c6e37 2019 u32 __iomem *macptr = &priv->regs->macstnaddr1;
7f7f5316
AF
2020
2021 macptr += num*2;
2022
2023 /* Now copy it into the mac registers backwards, cuz */
2024 /* little endian is silly */
2025 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2026 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2027
2028 gfar_write(macptr, *((u32 *) (tmpbuf)));
2029
2030 tempval = *((u32 *) (tmpbuf + 4));
2031
2032 gfar_write(macptr+1, tempval);
2033}
2034
1da177e4 2035/* GFAR error interrupt handler */
7d12e780 2036static irqreturn_t gfar_error(int irq, void *dev_id)
1da177e4
LT
2037{
2038 struct net_device *dev = dev_id;
2039 struct gfar_private *priv = netdev_priv(dev);
2040
2041 /* Save ievent for future reference */
2042 u32 events = gfar_read(&priv->regs->ievent);
2043
2044 /* Clear IEVENT */
d87eb127
SW
2045 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2046
2047 /* Magic Packet is not an error. */
2048 if ((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2049 (events & IEVENT_MAG))
2050 events &= ~IEVENT_MAG;
1da177e4
LT
2051
2052 /* Hmm... */
0bbaf069
KG
2053 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2054 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
538cc7ee 2055 dev->name, events, gfar_read(&priv->regs->imask));
1da177e4
LT
2056
2057 /* Update the error counters */
2058 if (events & IEVENT_TXE) {
09f75cd7 2059 dev->stats.tx_errors++;
1da177e4
LT
2060
2061 if (events & IEVENT_LC)
09f75cd7 2062 dev->stats.tx_window_errors++;
1da177e4 2063 if (events & IEVENT_CRL)
09f75cd7 2064 dev->stats.tx_aborted_errors++;
1da177e4 2065 if (events & IEVENT_XFUN) {
0bbaf069 2066 if (netif_msg_tx_err(priv))
538cc7ee
SS
2067 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2068 "packet dropped.\n", dev->name);
09f75cd7 2069 dev->stats.tx_dropped++;
1da177e4
LT
2070 priv->extra_stats.tx_underrun++;
2071
2072 /* Reactivate the Tx Queues */
2073 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
2074 }
0bbaf069
KG
2075 if (netif_msg_tx_err(priv))
2076 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1da177e4
LT
2077 }
2078 if (events & IEVENT_BSY) {
09f75cd7 2079 dev->stats.rx_errors++;
1da177e4
LT
2080 priv->extra_stats.rx_bsy++;
2081
7d12e780 2082 gfar_receive(irq, dev_id);
1da177e4 2083
0bbaf069 2084 if (netif_msg_rx_err(priv))
538cc7ee
SS
2085 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2086 dev->name, gfar_read(&priv->regs->rstat));
1da177e4
LT
2087 }
2088 if (events & IEVENT_BABR) {
09f75cd7 2089 dev->stats.rx_errors++;
1da177e4
LT
2090 priv->extra_stats.rx_babr++;
2091
0bbaf069 2092 if (netif_msg_rx_err(priv))
538cc7ee 2093 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
1da177e4
LT
2094 }
2095 if (events & IEVENT_EBERR) {
2096 priv->extra_stats.eberr++;
0bbaf069 2097 if (netif_msg_rx_err(priv))
538cc7ee 2098 printk(KERN_DEBUG "%s: bus error\n", dev->name);
1da177e4 2099 }
0bbaf069 2100 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
538cc7ee 2101 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1da177e4
LT
2102
2103 if (events & IEVENT_BABT) {
2104 priv->extra_stats.tx_babt++;
0bbaf069 2105 if (netif_msg_tx_err(priv))
538cc7ee 2106 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
1da177e4
LT
2107 }
2108 return IRQ_HANDLED;
2109}
2110
72abb461
KS
2111/* work with hotplug and coldplug */
2112MODULE_ALIAS("platform:fsl-gianfar");
2113
1da177e4 2114/* Structure for a device driver */
3ae5eaec 2115static struct platform_driver gfar_driver = {
1da177e4
LT
2116 .probe = gfar_probe,
2117 .remove = gfar_remove,
d87eb127
SW
2118 .suspend = gfar_suspend,
2119 .resume = gfar_resume,
3ae5eaec
RK
2120 .driver = {
2121 .name = "fsl-gianfar",
72abb461 2122 .owner = THIS_MODULE,
3ae5eaec 2123 },
1da177e4
LT
2124};
2125
2126static int __init gfar_init(void)
2127{
bb40dcbb
AF
2128 int err = gfar_mdio_init();
2129
2130 if (err)
2131 return err;
2132
3ae5eaec 2133 err = platform_driver_register(&gfar_driver);
bb40dcbb
AF
2134
2135 if (err)
2136 gfar_mdio_exit();
6aa20a22 2137
bb40dcbb 2138 return err;
1da177e4
LT
2139}
2140
2141static void __exit gfar_exit(void)
2142{
3ae5eaec 2143 platform_driver_unregister(&gfar_driver);
bb40dcbb 2144 gfar_mdio_exit();
1da177e4
LT
2145}
2146
2147module_init(gfar_init);
2148module_exit(gfar_exit);
2149