]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/net/usb/lan78xx.c
lan78xx: Avoid spurious kevent 4 "error"
[people/arne_f/kernel.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
c6e970a0 32#include <linux/phy.h>
55d7de9d 33#include <net/ip6_checksum.h>
d6502fc2 34#include <net/vxlan.h>
cc89c323
WH
35#include <linux/interrupt.h>
36#include <linux/irqdomain.h>
37#include <linux/irq.h>
38#include <linux/irqchip/chained_irq.h>
bdfba55e 39#include <linux/microchipphy.h>
8c56ea41 40#include <linux/phy.h>
a4977f3e 41#include <linux/of_net.h>
55d7de9d
WH
42#include "lan78xx.h"
43
44#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
45#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46#define DRIVER_NAME "lan78xx"
02dc1f3d 47#define DRIVER_VERSION "1.0.6"
55d7de9d
WH
48
49#define TX_TIMEOUT_JIFFIES (5 * HZ)
50#define THROTTLE_JIFFIES (HZ / 8)
51#define UNLINK_TIMEOUT_MS 3
52
53#define RX_MAX_QUEUE_MEMORY (60 * 1518)
54
55#define SS_USB_PKT_SIZE (1024)
56#define HS_USB_PKT_SIZE (512)
57#define FS_USB_PKT_SIZE (64)
58
59#define MAX_RX_FIFO_SIZE (12 * 1024)
60#define MAX_TX_FIFO_SIZE (12 * 1024)
61#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
62#define DEFAULT_BULK_IN_DELAY (0x0800)
63#define MAX_SINGLE_PACKET_SIZE (9000)
64#define DEFAULT_TX_CSUM_ENABLE (true)
65#define DEFAULT_RX_CSUM_ENABLE (true)
66#define DEFAULT_TSO_CSUM_ENABLE (true)
67#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
68#define TX_OVERHEAD (8)
69#define RXW_PADDING 2
70
71#define LAN78XX_USB_VENDOR_ID (0x0424)
72#define LAN7800_USB_PRODUCT_ID (0x7800)
73#define LAN7850_USB_PRODUCT_ID (0x7850)
02dc1f3d 74#define LAN7801_USB_PRODUCT_ID (0x7801)
55d7de9d
WH
75#define LAN78XX_EEPROM_MAGIC (0x78A5)
76#define LAN78XX_OTP_MAGIC (0x78F3)
77
78#define MII_READ 1
79#define MII_WRITE 0
80
81#define EEPROM_INDICATOR (0xA5)
82#define EEPROM_MAC_OFFSET (0x01)
83#define MAX_EEPROM_SIZE 512
84#define OTP_INDICATOR_1 (0xF3)
85#define OTP_INDICATOR_2 (0xF7)
86
87#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
88 WAKE_MCAST | WAKE_BCAST | \
89 WAKE_ARP | WAKE_MAGIC)
90
91/* USB related defines */
92#define BULK_IN_PIPE 1
93#define BULK_OUT_PIPE 2
94
95/* default autosuspend delay (mSec)*/
96#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
97
20ff5565
WH
98/* statistic update interval (mSec) */
99#define STAT_UPDATE_TIMER (1 * 1000)
100
cc89c323
WH
101/* defines interrupts from interrupt EP */
102#define MAX_INT_EP (32)
103#define INT_EP_INTEP (31)
104#define INT_EP_OTP_WR_DONE (28)
105#define INT_EP_EEE_TX_LPI_START (26)
106#define INT_EP_EEE_TX_LPI_STOP (25)
107#define INT_EP_EEE_RX_LPI (24)
108#define INT_EP_MAC_RESET_TIMEOUT (23)
109#define INT_EP_RDFO (22)
110#define INT_EP_TXE (21)
111#define INT_EP_USB_STATUS (20)
112#define INT_EP_TX_DIS (19)
113#define INT_EP_RX_DIS (18)
114#define INT_EP_PHY (17)
115#define INT_EP_DP (16)
116#define INT_EP_MAC_ERR (15)
117#define INT_EP_TDFU (14)
118#define INT_EP_TDFO (13)
119#define INT_EP_UTX (12)
120#define INT_EP_GPIO_11 (11)
121#define INT_EP_GPIO_10 (10)
122#define INT_EP_GPIO_9 (9)
123#define INT_EP_GPIO_8 (8)
124#define INT_EP_GPIO_7 (7)
125#define INT_EP_GPIO_6 (6)
126#define INT_EP_GPIO_5 (5)
127#define INT_EP_GPIO_4 (4)
128#define INT_EP_GPIO_3 (3)
129#define INT_EP_GPIO_2 (2)
130#define INT_EP_GPIO_1 (1)
131#define INT_EP_GPIO_0 (0)
132
55d7de9d
WH
133static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
134 "RX FCS Errors",
135 "RX Alignment Errors",
136 "Rx Fragment Errors",
137 "RX Jabber Errors",
138 "RX Undersize Frame Errors",
139 "RX Oversize Frame Errors",
140 "RX Dropped Frames",
141 "RX Unicast Byte Count",
142 "RX Broadcast Byte Count",
143 "RX Multicast Byte Count",
144 "RX Unicast Frames",
145 "RX Broadcast Frames",
146 "RX Multicast Frames",
147 "RX Pause Frames",
148 "RX 64 Byte Frames",
149 "RX 65 - 127 Byte Frames",
150 "RX 128 - 255 Byte Frames",
151 "RX 256 - 511 Bytes Frames",
152 "RX 512 - 1023 Byte Frames",
153 "RX 1024 - 1518 Byte Frames",
154 "RX Greater 1518 Byte Frames",
155 "EEE RX LPI Transitions",
156 "EEE RX LPI Time",
157 "TX FCS Errors",
158 "TX Excess Deferral Errors",
159 "TX Carrier Errors",
160 "TX Bad Byte Count",
161 "TX Single Collisions",
162 "TX Multiple Collisions",
163 "TX Excessive Collision",
164 "TX Late Collisions",
165 "TX Unicast Byte Count",
166 "TX Broadcast Byte Count",
167 "TX Multicast Byte Count",
168 "TX Unicast Frames",
169 "TX Broadcast Frames",
170 "TX Multicast Frames",
171 "TX Pause Frames",
172 "TX 64 Byte Frames",
173 "TX 65 - 127 Byte Frames",
174 "TX 128 - 255 Byte Frames",
175 "TX 256 - 511 Bytes Frames",
176 "TX 512 - 1023 Byte Frames",
177 "TX 1024 - 1518 Byte Frames",
178 "TX Greater 1518 Byte Frames",
179 "EEE TX LPI Transitions",
180 "EEE TX LPI Time",
181};
182
183struct lan78xx_statstage {
184 u32 rx_fcs_errors;
185 u32 rx_alignment_errors;
186 u32 rx_fragment_errors;
187 u32 rx_jabber_errors;
188 u32 rx_undersize_frame_errors;
189 u32 rx_oversize_frame_errors;
190 u32 rx_dropped_frames;
191 u32 rx_unicast_byte_count;
192 u32 rx_broadcast_byte_count;
193 u32 rx_multicast_byte_count;
194 u32 rx_unicast_frames;
195 u32 rx_broadcast_frames;
196 u32 rx_multicast_frames;
197 u32 rx_pause_frames;
198 u32 rx_64_byte_frames;
199 u32 rx_65_127_byte_frames;
200 u32 rx_128_255_byte_frames;
201 u32 rx_256_511_bytes_frames;
202 u32 rx_512_1023_byte_frames;
203 u32 rx_1024_1518_byte_frames;
204 u32 rx_greater_1518_byte_frames;
205 u32 eee_rx_lpi_transitions;
206 u32 eee_rx_lpi_time;
207 u32 tx_fcs_errors;
208 u32 tx_excess_deferral_errors;
209 u32 tx_carrier_errors;
210 u32 tx_bad_byte_count;
211 u32 tx_single_collisions;
212 u32 tx_multiple_collisions;
213 u32 tx_excessive_collision;
214 u32 tx_late_collisions;
215 u32 tx_unicast_byte_count;
216 u32 tx_broadcast_byte_count;
217 u32 tx_multicast_byte_count;
218 u32 tx_unicast_frames;
219 u32 tx_broadcast_frames;
220 u32 tx_multicast_frames;
221 u32 tx_pause_frames;
222 u32 tx_64_byte_frames;
223 u32 tx_65_127_byte_frames;
224 u32 tx_128_255_byte_frames;
225 u32 tx_256_511_bytes_frames;
226 u32 tx_512_1023_byte_frames;
227 u32 tx_1024_1518_byte_frames;
228 u32 tx_greater_1518_byte_frames;
229 u32 eee_tx_lpi_transitions;
230 u32 eee_tx_lpi_time;
231};
232
20ff5565
WH
233struct lan78xx_statstage64 {
234 u64 rx_fcs_errors;
235 u64 rx_alignment_errors;
236 u64 rx_fragment_errors;
237 u64 rx_jabber_errors;
238 u64 rx_undersize_frame_errors;
239 u64 rx_oversize_frame_errors;
240 u64 rx_dropped_frames;
241 u64 rx_unicast_byte_count;
242 u64 rx_broadcast_byte_count;
243 u64 rx_multicast_byte_count;
244 u64 rx_unicast_frames;
245 u64 rx_broadcast_frames;
246 u64 rx_multicast_frames;
247 u64 rx_pause_frames;
248 u64 rx_64_byte_frames;
249 u64 rx_65_127_byte_frames;
250 u64 rx_128_255_byte_frames;
251 u64 rx_256_511_bytes_frames;
252 u64 rx_512_1023_byte_frames;
253 u64 rx_1024_1518_byte_frames;
254 u64 rx_greater_1518_byte_frames;
255 u64 eee_rx_lpi_transitions;
256 u64 eee_rx_lpi_time;
257 u64 tx_fcs_errors;
258 u64 tx_excess_deferral_errors;
259 u64 tx_carrier_errors;
260 u64 tx_bad_byte_count;
261 u64 tx_single_collisions;
262 u64 tx_multiple_collisions;
263 u64 tx_excessive_collision;
264 u64 tx_late_collisions;
265 u64 tx_unicast_byte_count;
266 u64 tx_broadcast_byte_count;
267 u64 tx_multicast_byte_count;
268 u64 tx_unicast_frames;
269 u64 tx_broadcast_frames;
270 u64 tx_multicast_frames;
271 u64 tx_pause_frames;
272 u64 tx_64_byte_frames;
273 u64 tx_65_127_byte_frames;
274 u64 tx_128_255_byte_frames;
275 u64 tx_256_511_bytes_frames;
276 u64 tx_512_1023_byte_frames;
277 u64 tx_1024_1518_byte_frames;
278 u64 tx_greater_1518_byte_frames;
279 u64 eee_tx_lpi_transitions;
280 u64 eee_tx_lpi_time;
281};
282
55d7de9d
WH
283struct lan78xx_net;
284
285struct lan78xx_priv {
286 struct lan78xx_net *dev;
287 u32 rfe_ctl;
288 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
289 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
290 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
291 struct mutex dataport_mutex; /* for dataport access */
292 spinlock_t rfe_ctl_lock; /* for rfe register access */
293 struct work_struct set_multicast;
294 struct work_struct set_vlan;
295 u32 wol;
296};
297
298enum skb_state {
299 illegal = 0,
300 tx_start,
301 tx_done,
302 rx_start,
303 rx_done,
304 rx_cleanup,
305 unlink_start
306};
307
308struct skb_data { /* skb->cb is one of these */
309 struct urb *urb;
310 struct lan78xx_net *dev;
311 enum skb_state state;
312 size_t length;
74d79a2e 313 int num_of_packet;
55d7de9d
WH
314};
315
316struct usb_context {
317 struct usb_ctrlrequest req;
318 struct lan78xx_net *dev;
319};
320
321#define EVENT_TX_HALT 0
322#define EVENT_RX_HALT 1
323#define EVENT_RX_MEMORY 2
324#define EVENT_STS_SPLIT 3
325#define EVENT_LINK_RESET 4
326#define EVENT_RX_PAUSED 5
327#define EVENT_DEV_WAKING 6
328#define EVENT_DEV_ASLEEP 7
329#define EVENT_DEV_OPEN 8
20ff5565
WH
330#define EVENT_STAT_UPDATE 9
331
332struct statstage {
333 struct mutex access_lock; /* for stats access */
334 struct lan78xx_statstage saved;
335 struct lan78xx_statstage rollover_count;
336 struct lan78xx_statstage rollover_max;
337 struct lan78xx_statstage64 curr_stat;
338};
55d7de9d 339
cc89c323
WH
340struct irq_domain_data {
341 struct irq_domain *irqdomain;
342 unsigned int phyirq;
343 struct irq_chip *irqchip;
344 irq_flow_handler_t irq_handler;
345 u32 irqenable;
346 struct mutex irq_lock; /* for irq bus access */
347};
348
55d7de9d
WH
349struct lan78xx_net {
350 struct net_device *net;
351 struct usb_device *udev;
352 struct usb_interface *intf;
353 void *driver_priv;
354
355 int rx_qlen;
356 int tx_qlen;
357 struct sk_buff_head rxq;
358 struct sk_buff_head txq;
359 struct sk_buff_head done;
360 struct sk_buff_head rxq_pause;
361 struct sk_buff_head txq_pend;
362
363 struct tasklet_struct bh;
364 struct delayed_work wq;
365
55d7de9d
WH
366 int msg_enable;
367
368 struct urb *urb_intr;
369 struct usb_anchor deferred;
370
371 struct mutex phy_mutex; /* for phy access */
372 unsigned pipe_in, pipe_out, pipe_intr;
373
374 u32 hard_mtu; /* count any extra framing */
375 size_t rx_urb_size; /* size for rx urbs */
376
377 unsigned long flags;
378
379 wait_queue_head_t *wait;
380 unsigned char suspend_count;
381
382 unsigned maxpacket;
383 struct timer_list delay;
20ff5565 384 struct timer_list stat_monitor;
55d7de9d
WH
385
386 unsigned long data[5];
55d7de9d
WH
387
388 int link_on;
389 u8 mdix_ctrl;
ce85e13a 390
87177ba6
WH
391 u32 chipid;
392 u32 chiprev;
ce85e13a 393 struct mii_bus *mdiobus;
02dc1f3d 394 phy_interface_t interface;
349e0c5e
WH
395
396 int fc_autoneg;
397 u8 fc_request_control;
20ff5565
WH
398
399 int delta;
400 struct statstage stats;
cc89c323
WH
401
402 struct irq_domain_data domain_data;
55d7de9d
WH
403};
404
02dc1f3d
WH
405/* define external phy id */
406#define PHY_LAN8835 (0x0007C130)
407#define PHY_KSZ9031RNX (0x00221620)
408
55d7de9d
WH
409/* use ethtool to change the level for any given device */
410static int msg_level = -1;
411module_param(msg_level, int, 0);
412MODULE_PARM_DESC(msg_level, "Override default message level");
413
414static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
415{
416 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
417 int ret;
418
55d7de9d
WH
419 if (!buf)
420 return -ENOMEM;
421
422 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
423 USB_VENDOR_REQUEST_READ_REGISTER,
424 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
425 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
426 if (likely(ret >= 0)) {
427 le32_to_cpus(buf);
428 *data = *buf;
429 } else {
430 netdev_warn(dev->net,
431 "Failed to read register index 0x%08x. ret = %d",
432 index, ret);
433 }
434
435 kfree(buf);
436
437 return ret;
438}
439
440static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
441{
442 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
443 int ret;
444
55d7de9d
WH
445 if (!buf)
446 return -ENOMEM;
447
448 *buf = data;
449 cpu_to_le32s(buf);
450
451 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
452 USB_VENDOR_REQUEST_WRITE_REGISTER,
453 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
454 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
455 if (unlikely(ret < 0)) {
456 netdev_warn(dev->net,
457 "Failed to write register index 0x%08x. ret = %d",
458 index, ret);
459 }
460
461 kfree(buf);
462
463 return ret;
464}
465
466static int lan78xx_read_stats(struct lan78xx_net *dev,
467 struct lan78xx_statstage *data)
468{
469 int ret = 0;
470 int i;
471 struct lan78xx_statstage *stats;
472 u32 *src;
473 u32 *dst;
474
55d7de9d
WH
475 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
476 if (!stats)
477 return -ENOMEM;
478
479 ret = usb_control_msg(dev->udev,
480 usb_rcvctrlpipe(dev->udev, 0),
481 USB_VENDOR_REQUEST_GET_STATS,
482 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
483 0,
484 0,
485 (void *)stats,
486 sizeof(*stats),
487 USB_CTRL_SET_TIMEOUT);
488 if (likely(ret >= 0)) {
489 src = (u32 *)stats;
490 dst = (u32 *)data;
491 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
492 le32_to_cpus(&src[i]);
493 dst[i] = src[i];
494 }
495 } else {
496 netdev_warn(dev->net,
4f5cf943 497 "Failed to read stat ret = %d", ret);
55d7de9d
WH
498 }
499
500 kfree(stats);
501
502 return ret;
503}
504
20ff5565
WH
505#define check_counter_rollover(struct1, dev_stats, member) { \
506 if (struct1->member < dev_stats.saved.member) \
507 dev_stats.rollover_count.member++; \
508 }
509
510static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
511 struct lan78xx_statstage *stats)
512{
513 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
514 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
515 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
516 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
517 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
518 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
519 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
520 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
521 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
522 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
524 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
525 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
526 check_counter_rollover(stats, dev->stats, rx_pause_frames);
527 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
528 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
529 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
531 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
533 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
534 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
535 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
536 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
537 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
538 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
539 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
540 check_counter_rollover(stats, dev->stats, tx_single_collisions);
541 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
542 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
543 check_counter_rollover(stats, dev->stats, tx_late_collisions);
544 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
545 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
546 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
548 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
549 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
550 check_counter_rollover(stats, dev->stats, tx_pause_frames);
551 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
552 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
553 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
555 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
557 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
558 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
559 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
560
561 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
562}
563
564static void lan78xx_update_stats(struct lan78xx_net *dev)
565{
566 u32 *p, *count, *max;
567 u64 *data;
568 int i;
569 struct lan78xx_statstage lan78xx_stats;
570
571 if (usb_autopm_get_interface(dev->intf) < 0)
572 return;
573
574 p = (u32 *)&lan78xx_stats;
575 count = (u32 *)&dev->stats.rollover_count;
576 max = (u32 *)&dev->stats.rollover_max;
577 data = (u64 *)&dev->stats.curr_stat;
578
579 mutex_lock(&dev->stats.access_lock);
580
581 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
582 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
583
584 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
585 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
586
587 mutex_unlock(&dev->stats.access_lock);
588
589 usb_autopm_put_interface(dev->intf);
590}
591
55d7de9d
WH
592/* Loop until the read is completed with timeout called with phy_mutex held */
593static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
594{
595 unsigned long start_time = jiffies;
596 u32 val;
597 int ret;
598
599 do {
600 ret = lan78xx_read_reg(dev, MII_ACC, &val);
601 if (unlikely(ret < 0))
602 return -EIO;
603
604 if (!(val & MII_ACC_MII_BUSY_))
605 return 0;
606 } while (!time_after(jiffies, start_time + HZ));
607
608 return -EIO;
609}
610
611static inline u32 mii_access(int id, int index, int read)
612{
613 u32 ret;
614
615 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
616 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
617 if (read)
618 ret |= MII_ACC_MII_READ_;
619 else
620 ret |= MII_ACC_MII_WRITE_;
621 ret |= MII_ACC_MII_BUSY_;
622
623 return ret;
624}
625
55d7de9d
WH
626static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
627{
628 unsigned long start_time = jiffies;
629 u32 val;
630 int ret;
631
632 do {
633 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
634 if (unlikely(ret < 0))
635 return -EIO;
636
637 if (!(val & E2P_CMD_EPC_BUSY_) ||
638 (val & E2P_CMD_EPC_TIMEOUT_))
639 break;
640 usleep_range(40, 100);
641 } while (!time_after(jiffies, start_time + HZ));
642
643 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
644 netdev_warn(dev->net, "EEPROM read operation timeout");
645 return -EIO;
646 }
647
648 return 0;
649}
650
651static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
652{
653 unsigned long start_time = jiffies;
654 u32 val;
655 int ret;
656
657 do {
658 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
659 if (unlikely(ret < 0))
660 return -EIO;
661
662 if (!(val & E2P_CMD_EPC_BUSY_))
663 return 0;
664
665 usleep_range(40, 100);
666 } while (!time_after(jiffies, start_time + HZ));
667
668 netdev_warn(dev->net, "EEPROM is busy");
669 return -EIO;
670}
671
672static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
673 u32 length, u8 *data)
674{
675 u32 val;
a0db7d10 676 u32 saved;
55d7de9d 677 int i, ret;
a0db7d10
WH
678 int retval;
679
680 /* depends on chip, some EEPROM pins are muxed with LED function.
681 * disable & restore LED function to access EEPROM.
682 */
683 ret = lan78xx_read_reg(dev, HW_CFG, &val);
684 saved = val;
87177ba6 685 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
686 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
687 ret = lan78xx_write_reg(dev, HW_CFG, val);
688 }
55d7de9d 689
a0db7d10
WH
690 retval = lan78xx_eeprom_confirm_not_busy(dev);
691 if (retval)
692 return retval;
55d7de9d
WH
693
694 for (i = 0; i < length; i++) {
695 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
696 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
697 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
698 if (unlikely(ret < 0)) {
699 retval = -EIO;
700 goto exit;
701 }
55d7de9d 702
a0db7d10
WH
703 retval = lan78xx_wait_eeprom(dev);
704 if (retval < 0)
705 goto exit;
55d7de9d
WH
706
707 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
708 if (unlikely(ret < 0)) {
709 retval = -EIO;
710 goto exit;
711 }
55d7de9d
WH
712
713 data[i] = val & 0xFF;
714 offset++;
715 }
716
a0db7d10
WH
717 retval = 0;
718exit:
87177ba6 719 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
720 ret = lan78xx_write_reg(dev, HW_CFG, saved);
721
722 return retval;
55d7de9d
WH
723}
724
725static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
726 u32 length, u8 *data)
727{
728 u8 sig;
729 int ret;
730
731 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
732 if ((ret == 0) && (sig == EEPROM_INDICATOR))
733 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
734 else
735 ret = -EINVAL;
736
737 return ret;
738}
739
740static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
741 u32 length, u8 *data)
742{
743 u32 val;
a0db7d10 744 u32 saved;
55d7de9d 745 int i, ret;
a0db7d10
WH
746 int retval;
747
748 /* depends on chip, some EEPROM pins are muxed with LED function.
749 * disable & restore LED function to access EEPROM.
750 */
751 ret = lan78xx_read_reg(dev, HW_CFG, &val);
752 saved = val;
87177ba6 753 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
754 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
755 ret = lan78xx_write_reg(dev, HW_CFG, val);
756 }
55d7de9d 757
a0db7d10
WH
758 retval = lan78xx_eeprom_confirm_not_busy(dev);
759 if (retval)
760 goto exit;
55d7de9d
WH
761
762 /* Issue write/erase enable command */
763 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
764 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
765 if (unlikely(ret < 0)) {
766 retval = -EIO;
767 goto exit;
768 }
55d7de9d 769
a0db7d10
WH
770 retval = lan78xx_wait_eeprom(dev);
771 if (retval < 0)
772 goto exit;
55d7de9d
WH
773
774 for (i = 0; i < length; i++) {
775 /* Fill data register */
776 val = data[i];
777 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
778 if (ret < 0) {
779 retval = -EIO;
780 goto exit;
781 }
55d7de9d
WH
782
783 /* Send "write" command */
784 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
785 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
786 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
787 if (ret < 0) {
788 retval = -EIO;
789 goto exit;
790 }
55d7de9d 791
a0db7d10
WH
792 retval = lan78xx_wait_eeprom(dev);
793 if (retval < 0)
794 goto exit;
55d7de9d
WH
795
796 offset++;
797 }
798
a0db7d10
WH
799 retval = 0;
800exit:
87177ba6 801 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
802 ret = lan78xx_write_reg(dev, HW_CFG, saved);
803
804 return retval;
55d7de9d
WH
805}
806
807static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
808 u32 length, u8 *data)
809{
810 int i;
811 int ret;
812 u32 buf;
813 unsigned long timeout;
814
815 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
816
817 if (buf & OTP_PWR_DN_PWRDN_N_) {
818 /* clear it and wait to be cleared */
819 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
820
821 timeout = jiffies + HZ;
822 do {
823 usleep_range(1, 10);
824 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
825 if (time_after(jiffies, timeout)) {
826 netdev_warn(dev->net,
827 "timeout on OTP_PWR_DN");
828 return -EIO;
829 }
830 } while (buf & OTP_PWR_DN_PWRDN_N_);
831 }
832
833 for (i = 0; i < length; i++) {
834 ret = lan78xx_write_reg(dev, OTP_ADDR1,
835 ((offset + i) >> 8) & OTP_ADDR1_15_11);
836 ret = lan78xx_write_reg(dev, OTP_ADDR2,
837 ((offset + i) & OTP_ADDR2_10_3));
838
839 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
840 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
841
842 timeout = jiffies + HZ;
843 do {
844 udelay(1);
845 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
846 if (time_after(jiffies, timeout)) {
847 netdev_warn(dev->net,
848 "timeout on OTP_STATUS");
849 return -EIO;
850 }
851 } while (buf & OTP_STATUS_BUSY_);
852
853 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
854
855 data[i] = (u8)(buf & 0xFF);
856 }
857
858 return 0;
859}
860
9fb6066d
WH
861static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
862 u32 length, u8 *data)
863{
864 int i;
865 int ret;
866 u32 buf;
867 unsigned long timeout;
868
869 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
870
871 if (buf & OTP_PWR_DN_PWRDN_N_) {
872 /* clear it and wait to be cleared */
873 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
874
875 timeout = jiffies + HZ;
876 do {
877 udelay(1);
878 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
879 if (time_after(jiffies, timeout)) {
880 netdev_warn(dev->net,
881 "timeout on OTP_PWR_DN completion");
882 return -EIO;
883 }
884 } while (buf & OTP_PWR_DN_PWRDN_N_);
885 }
886
887 /* set to BYTE program mode */
888 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
889
890 for (i = 0; i < length; i++) {
891 ret = lan78xx_write_reg(dev, OTP_ADDR1,
892 ((offset + i) >> 8) & OTP_ADDR1_15_11);
893 ret = lan78xx_write_reg(dev, OTP_ADDR2,
894 ((offset + i) & OTP_ADDR2_10_3));
895 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
896 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
897 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
898
899 timeout = jiffies + HZ;
900 do {
901 udelay(1);
902 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
903 if (time_after(jiffies, timeout)) {
904 netdev_warn(dev->net,
905 "Timeout on OTP_STATUS completion");
906 return -EIO;
907 }
908 } while (buf & OTP_STATUS_BUSY_);
909 }
910
911 return 0;
912}
913
55d7de9d
WH
914static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
915 u32 length, u8 *data)
916{
917 u8 sig;
918 int ret;
919
920 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
921
922 if (ret == 0) {
923 if (sig == OTP_INDICATOR_1)
924 offset = offset;
925 else if (sig == OTP_INDICATOR_2)
926 offset += 0x100;
927 else
928 ret = -EINVAL;
c0e0cd65
PE
929 if (!ret)
930 ret = lan78xx_read_raw_otp(dev, offset, length, data);
55d7de9d
WH
931 }
932
933 return ret;
934}
935
936static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
937{
938 int i, ret;
939
940 for (i = 0; i < 100; i++) {
941 u32 dp_sel;
942
943 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
944 if (unlikely(ret < 0))
945 return -EIO;
946
947 if (dp_sel & DP_SEL_DPRDY_)
948 return 0;
949
950 usleep_range(40, 100);
951 }
952
953 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
954
955 return -EIO;
956}
957
958static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
959 u32 addr, u32 length, u32 *buf)
960{
961 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
962 u32 dp_sel;
963 int i, ret;
964
965 if (usb_autopm_get_interface(dev->intf) < 0)
966 return 0;
967
968 mutex_lock(&pdata->dataport_mutex);
969
970 ret = lan78xx_dataport_wait_not_busy(dev);
971 if (ret < 0)
972 goto done;
973
974 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
975
976 dp_sel &= ~DP_SEL_RSEL_MASK_;
977 dp_sel |= ram_select;
978 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
979
980 for (i = 0; i < length; i++) {
981 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
982
983 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
984
985 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
986
987 ret = lan78xx_dataport_wait_not_busy(dev);
988 if (ret < 0)
989 goto done;
990 }
991
992done:
993 mutex_unlock(&pdata->dataport_mutex);
994 usb_autopm_put_interface(dev->intf);
995
996 return ret;
997}
998
999static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1000 int index, u8 addr[ETH_ALEN])
1001{
1002 u32 temp;
1003
1004 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1005 temp = addr[3];
1006 temp = addr[2] | (temp << 8);
1007 temp = addr[1] | (temp << 8);
1008 temp = addr[0] | (temp << 8);
1009 pdata->pfilter_table[index][1] = temp;
1010 temp = addr[5];
1011 temp = addr[4] | (temp << 8);
1012 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1013 pdata->pfilter_table[index][0] = temp;
1014 }
1015}
1016
1017/* returns hash bit number for given MAC address */
1018static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1019{
1020 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1021}
1022
1023static void lan78xx_deferred_multicast_write(struct work_struct *param)
1024{
1025 struct lan78xx_priv *pdata =
1026 container_of(param, struct lan78xx_priv, set_multicast);
1027 struct lan78xx_net *dev = pdata->dev;
1028 int i;
1029 int ret;
1030
1031 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1032 pdata->rfe_ctl);
1033
1034 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1035 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1036
1037 for (i = 1; i < NUM_OF_MAF; i++) {
1038 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1039 ret = lan78xx_write_reg(dev, MAF_LO(i),
1040 pdata->pfilter_table[i][1]);
1041 ret = lan78xx_write_reg(dev, MAF_HI(i),
1042 pdata->pfilter_table[i][0]);
1043 }
1044
1045 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1046}
1047
1048static void lan78xx_set_multicast(struct net_device *netdev)
1049{
1050 struct lan78xx_net *dev = netdev_priv(netdev);
1051 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1052 unsigned long flags;
1053 int i;
1054
1055 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1056
1057 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1058 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1059
1060 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1061 pdata->mchash_table[i] = 0;
1062 /* pfilter_table[0] has own HW address */
1063 for (i = 1; i < NUM_OF_MAF; i++) {
1064 pdata->pfilter_table[i][0] =
1065 pdata->pfilter_table[i][1] = 0;
1066 }
1067
1068 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1069
1070 if (dev->net->flags & IFF_PROMISC) {
1071 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1072 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1073 } else {
1074 if (dev->net->flags & IFF_ALLMULTI) {
1075 netif_dbg(dev, drv, dev->net,
1076 "receive all multicast enabled");
1077 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1078 }
1079 }
1080
1081 if (netdev_mc_count(dev->net)) {
1082 struct netdev_hw_addr *ha;
1083 int i;
1084
1085 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1086
1087 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1088
1089 i = 1;
1090 netdev_for_each_mc_addr(ha, netdev) {
1091 /* set first 32 into Perfect Filter */
1092 if (i < 33) {
1093 lan78xx_set_addr_filter(pdata, i, ha->addr);
1094 } else {
1095 u32 bitnum = lan78xx_hash(ha->addr);
1096
1097 pdata->mchash_table[bitnum / 32] |=
1098 (1 << (bitnum % 32));
1099 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1100 }
1101 i++;
1102 }
1103 }
1104
1105 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1106
1107 /* defer register writes to a sleepable context */
1108 schedule_work(&pdata->set_multicast);
1109}
1110
1111static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1112 u16 lcladv, u16 rmtadv)
1113{
1114 u32 flow = 0, fct_flow = 0;
1115 int ret;
349e0c5e 1116 u8 cap;
55d7de9d 1117
349e0c5e
WH
1118 if (dev->fc_autoneg)
1119 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1120 else
1121 cap = dev->fc_request_control;
55d7de9d
WH
1122
1123 if (cap & FLOW_CTRL_TX)
349e0c5e 1124 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
55d7de9d
WH
1125
1126 if (cap & FLOW_CTRL_RX)
1127 flow |= FLOW_CR_RX_FCEN_;
1128
1129 if (dev->udev->speed == USB_SPEED_SUPER)
1130 fct_flow = 0x817;
1131 else if (dev->udev->speed == USB_SPEED_HIGH)
1132 fct_flow = 0x211;
1133
1134 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1135 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1136 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1137
1138 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1139
1140 /* threshold value should be set before enabling flow */
1141 ret = lan78xx_write_reg(dev, FLOW, flow);
1142
1143 return 0;
1144}
1145
1146static int lan78xx_link_reset(struct lan78xx_net *dev)
1147{
ce85e13a 1148 struct phy_device *phydev = dev->net->phydev;
6e76510e 1149 struct ethtool_link_ksettings ecmd;
99c79ece 1150 int ladv, radv, ret;
55d7de9d
WH
1151 u32 buf;
1152
55d7de9d
WH
1153 /* clear LAN78xx interrupt status */
1154 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1155 if (unlikely(ret < 0))
1156 return -EIO;
1157
ce85e13a
WH
1158 phy_read_status(phydev);
1159
1160 if (!phydev->link && dev->link_on) {
55d7de9d 1161 dev->link_on = false;
55d7de9d
WH
1162
1163 /* reset MAC */
1164 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1165 if (unlikely(ret < 0))
1166 return -EIO;
1167 buf |= MAC_CR_RST_;
1168 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1169 if (unlikely(ret < 0))
1170 return -EIO;
e4953910 1171
20ff5565 1172 del_timer(&dev->stat_monitor);
ce85e13a 1173 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
1174 dev->link_on = true;
1175
6e76510e 1176 phy_ethtool_ksettings_get(phydev, &ecmd);
55d7de9d 1177
55d7de9d 1178 if (dev->udev->speed == USB_SPEED_SUPER) {
6e76510e 1179 if (ecmd.base.speed == 1000) {
55d7de9d
WH
1180 /* disable U2 */
1181 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1182 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1183 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1184 /* enable U1 */
1185 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1186 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1187 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1188 } else {
1189 /* enable U1 & U2 */
1190 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1191 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1192 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1193 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1194 }
1195 }
1196
ce85e13a 1197 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
1198 if (ladv < 0)
1199 return ladv;
55d7de9d 1200
ce85e13a 1201 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
1202 if (radv < 0)
1203 return radv;
55d7de9d
WH
1204
1205 netif_dbg(dev, link, dev->net,
1206 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
6e76510e 1207 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
55d7de9d 1208
6e76510e
PR
1209 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1210 radv);
20ff5565
WH
1211
1212 if (!timer_pending(&dev->stat_monitor)) {
1213 dev->delta = 1;
1214 mod_timer(&dev->stat_monitor,
1215 jiffies + STAT_UPDATE_TIMER);
1216 }
ccdbe7e2
SW
1217
1218 tasklet_schedule(&dev->bh);
55d7de9d
WH
1219 }
1220
1221 return ret;
1222}
1223
1224/* some work can't be done in tasklets, so we use keventd
1225 *
1226 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1227 * but tasklet_schedule() doesn't. hope the failure is rare.
1228 */
e0c79ff6 1229static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
55d7de9d
WH
1230{
1231 set_bit(work, &dev->flags);
1232 if (!schedule_delayed_work(&dev->wq, 0))
1233 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1234}
1235
1236static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1237{
1238 u32 intdata;
1239
1240 if (urb->actual_length != 4) {
1241 netdev_warn(dev->net,
1242 "unexpected urb length %d", urb->actual_length);
1243 return;
1244 }
1245
1246 memcpy(&intdata, urb->transfer_buffer, 4);
1247 le32_to_cpus(&intdata);
1248
1249 if (intdata & INT_ENP_PHY_INT) {
1250 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
cc89c323
WH
1251 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1252
1253 if (dev->domain_data.phyirq > 0)
1254 generic_handle_irq(dev->domain_data.phyirq);
55d7de9d
WH
1255 } else
1256 netdev_warn(dev->net,
1257 "unexpected interrupt: 0x%08x\n", intdata);
1258}
1259
1260static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1261{
1262 return MAX_EEPROM_SIZE;
1263}
1264
1265static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1266 struct ethtool_eeprom *ee, u8 *data)
1267{
1268 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1269 int ret;
1270
1271 ret = usb_autopm_get_interface(dev->intf);
1272 if (ret)
1273 return ret;
55d7de9d
WH
1274
1275 ee->magic = LAN78XX_EEPROM_MAGIC;
1276
8a7ffeb7
NS
1277 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1278
1279 usb_autopm_put_interface(dev->intf);
1280
1281 return ret;
55d7de9d
WH
1282}
1283
1284static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1285 struct ethtool_eeprom *ee, u8 *data)
1286{
1287 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1288 int ret;
1289
1290 ret = usb_autopm_get_interface(dev->intf);
1291 if (ret)
1292 return ret;
55d7de9d 1293
c0776822
NS
1294 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1295 * to load data from EEPROM
1296 */
1297 if (ee->magic == LAN78XX_EEPROM_MAGIC)
8a7ffeb7 1298 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
55d7de9d
WH
1299 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1300 (ee->offset == 0) &&
1301 (ee->len == 512) &&
1302 (data[0] == OTP_INDICATOR_1))
8a7ffeb7 1303 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d 1304
8a7ffeb7
NS
1305 usb_autopm_put_interface(dev->intf);
1306
1307 return ret;
55d7de9d
WH
1308}
1309
1310static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1311 u8 *data)
1312{
1313 if (stringset == ETH_SS_STATS)
1314 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1315}
1316
1317static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1318{
1319 if (sset == ETH_SS_STATS)
1320 return ARRAY_SIZE(lan78xx_gstrings);
1321 else
1322 return -EOPNOTSUPP;
1323}
1324
1325static void lan78xx_get_stats(struct net_device *netdev,
1326 struct ethtool_stats *stats, u64 *data)
1327{
1328 struct lan78xx_net *dev = netdev_priv(netdev);
55d7de9d 1329
20ff5565 1330 lan78xx_update_stats(dev);
55d7de9d 1331
20ff5565
WH
1332 mutex_lock(&dev->stats.access_lock);
1333 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1334 mutex_unlock(&dev->stats.access_lock);
55d7de9d
WH
1335}
1336
1337static void lan78xx_get_wol(struct net_device *netdev,
1338 struct ethtool_wolinfo *wol)
1339{
1340 struct lan78xx_net *dev = netdev_priv(netdev);
1341 int ret;
1342 u32 buf;
1343 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1344
1345 if (usb_autopm_get_interface(dev->intf) < 0)
1346 return;
1347
1348 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1349 if (unlikely(ret < 0)) {
1350 wol->supported = 0;
1351 wol->wolopts = 0;
1352 } else {
1353 if (buf & USB_CFG_RMT_WKP_) {
1354 wol->supported = WAKE_ALL;
1355 wol->wolopts = pdata->wol;
1356 } else {
1357 wol->supported = 0;
1358 wol->wolopts = 0;
1359 }
1360 }
1361
1362 usb_autopm_put_interface(dev->intf);
1363}
1364
1365static int lan78xx_set_wol(struct net_device *netdev,
1366 struct ethtool_wolinfo *wol)
1367{
1368 struct lan78xx_net *dev = netdev_priv(netdev);
1369 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1370 int ret;
1371
1372 ret = usb_autopm_get_interface(dev->intf);
1373 if (ret < 0)
1374 return ret;
1375
9f0962c0
FF
1376 if (wol->wolopts & ~WAKE_ALL)
1377 return -EINVAL;
1378
1379 pdata->wol = wol->wolopts;
55d7de9d
WH
1380
1381 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1382
ce85e13a
WH
1383 phy_ethtool_set_wol(netdev->phydev, wol);
1384
55d7de9d
WH
1385 usb_autopm_put_interface(dev->intf);
1386
1387 return ret;
1388}
1389
1390static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1391{
1392 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1393 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1394 int ret;
1395 u32 buf;
55d7de9d
WH
1396
1397 ret = usb_autopm_get_interface(dev->intf);
1398 if (ret < 0)
1399 return ret;
1400
ce85e13a
WH
1401 ret = phy_ethtool_get_eee(phydev, edata);
1402 if (ret < 0)
1403 goto exit;
1404
55d7de9d
WH
1405 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1406 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1407 edata->eee_enabled = true;
ce85e13a
WH
1408 edata->eee_active = !!(edata->advertised &
1409 edata->lp_advertised);
55d7de9d
WH
1410 edata->tx_lpi_enabled = true;
1411 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1412 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1413 edata->tx_lpi_timer = buf;
1414 } else {
55d7de9d
WH
1415 edata->eee_enabled = false;
1416 edata->eee_active = false;
55d7de9d
WH
1417 edata->tx_lpi_enabled = false;
1418 edata->tx_lpi_timer = 0;
1419 }
1420
ce85e13a
WH
1421 ret = 0;
1422exit:
55d7de9d
WH
1423 usb_autopm_put_interface(dev->intf);
1424
ce85e13a 1425 return ret;
55d7de9d
WH
1426}
1427
1428static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1429{
1430 struct lan78xx_net *dev = netdev_priv(net);
1431 int ret;
1432 u32 buf;
1433
1434 ret = usb_autopm_get_interface(dev->intf);
1435 if (ret < 0)
1436 return ret;
1437
1438 if (edata->eee_enabled) {
1439 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1440 buf |= MAC_CR_EEE_EN_;
1441 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1442
ce85e13a
WH
1443 phy_ethtool_set_eee(net->phydev, edata);
1444
1445 buf = (u32)edata->tx_lpi_timer;
1446 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1447 } else {
1448 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1449 buf &= ~MAC_CR_EEE_EN_;
1450 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1451 }
1452
1453 usb_autopm_put_interface(dev->intf);
1454
1455 return 0;
1456}
1457
1458static u32 lan78xx_get_link(struct net_device *net)
1459{
ce85e13a 1460 phy_read_status(net->phydev);
55d7de9d 1461
ce85e13a 1462 return net->phydev->link;
55d7de9d
WH
1463}
1464
55d7de9d
WH
1465static void lan78xx_get_drvinfo(struct net_device *net,
1466 struct ethtool_drvinfo *info)
1467{
1468 struct lan78xx_net *dev = netdev_priv(net);
1469
1470 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1471 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1472 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1473}
1474
1475static u32 lan78xx_get_msglevel(struct net_device *net)
1476{
1477 struct lan78xx_net *dev = netdev_priv(net);
1478
1479 return dev->msg_enable;
1480}
1481
1482static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1483{
1484 struct lan78xx_net *dev = netdev_priv(net);
1485
1486 dev->msg_enable = level;
1487}
1488
6e76510e
PR
1489static int lan78xx_get_link_ksettings(struct net_device *net,
1490 struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1491{
1492 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1493 struct phy_device *phydev = net->phydev;
55d7de9d 1494 int ret;
55d7de9d 1495
55d7de9d
WH
1496 ret = usb_autopm_get_interface(dev->intf);
1497 if (ret < 0)
1498 return ret;
1499
5514174f 1500 phy_ethtool_ksettings_get(phydev, cmd);
55d7de9d 1501
55d7de9d
WH
1502 usb_autopm_put_interface(dev->intf);
1503
1504 return ret;
1505}
1506
6e76510e
PR
1507static int lan78xx_set_link_ksettings(struct net_device *net,
1508 const struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1509{
1510 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1511 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1512 int ret = 0;
1513 int temp;
1514
55d7de9d
WH
1515 ret = usb_autopm_get_interface(dev->intf);
1516 if (ret < 0)
1517 return ret;
1518
55d7de9d 1519 /* change speed & duplex */
6e76510e 1520 ret = phy_ethtool_ksettings_set(phydev, cmd);
55d7de9d 1521
6e76510e 1522 if (!cmd->base.autoneg) {
55d7de9d 1523 /* force link down */
ce85e13a
WH
1524 temp = phy_read(phydev, MII_BMCR);
1525 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1526 mdelay(1);
ce85e13a 1527 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1528 }
1529
1530 usb_autopm_put_interface(dev->intf);
1531
1532 return ret;
1533}
1534
349e0c5e
WH
1535static void lan78xx_get_pause(struct net_device *net,
1536 struct ethtool_pauseparam *pause)
1537{
1538 struct lan78xx_net *dev = netdev_priv(net);
1539 struct phy_device *phydev = net->phydev;
6e76510e 1540 struct ethtool_link_ksettings ecmd;
349e0c5e 1541
6e76510e 1542 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e
WH
1543
1544 pause->autoneg = dev->fc_autoneg;
1545
1546 if (dev->fc_request_control & FLOW_CTRL_TX)
1547 pause->tx_pause = 1;
1548
1549 if (dev->fc_request_control & FLOW_CTRL_RX)
1550 pause->rx_pause = 1;
1551}
1552
1553static int lan78xx_set_pause(struct net_device *net,
1554 struct ethtool_pauseparam *pause)
1555{
1556 struct lan78xx_net *dev = netdev_priv(net);
1557 struct phy_device *phydev = net->phydev;
6e76510e 1558 struct ethtool_link_ksettings ecmd;
349e0c5e
WH
1559 int ret;
1560
6e76510e 1561 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e 1562
6e76510e 1563 if (pause->autoneg && !ecmd.base.autoneg) {
349e0c5e
WH
1564 ret = -EINVAL;
1565 goto exit;
1566 }
1567
1568 dev->fc_request_control = 0;
1569 if (pause->rx_pause)
1570 dev->fc_request_control |= FLOW_CTRL_RX;
1571
1572 if (pause->tx_pause)
1573 dev->fc_request_control |= FLOW_CTRL_TX;
1574
6e76510e 1575 if (ecmd.base.autoneg) {
349e0c5e 1576 u32 mii_adv;
6e76510e
PR
1577 u32 advertising;
1578
1579 ethtool_convert_link_mode_to_legacy_u32(
1580 &advertising, ecmd.link_modes.advertising);
349e0c5e 1581
6e76510e 1582 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
349e0c5e 1583 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
6e76510e
PR
1584 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1585
1586 ethtool_convert_legacy_u32_to_link_mode(
1587 ecmd.link_modes.advertising, advertising);
1588
1589 phy_ethtool_ksettings_set(phydev, &ecmd);
349e0c5e
WH
1590 }
1591
1592 dev->fc_autoneg = pause->autoneg;
1593
1594 ret = 0;
1595exit:
1596 return ret;
1597}
1598
55d7de9d
WH
1599static const struct ethtool_ops lan78xx_ethtool_ops = {
1600 .get_link = lan78xx_get_link,
860ce4b4 1601 .nway_reset = phy_ethtool_nway_reset,
55d7de9d
WH
1602 .get_drvinfo = lan78xx_get_drvinfo,
1603 .get_msglevel = lan78xx_get_msglevel,
1604 .set_msglevel = lan78xx_set_msglevel,
55d7de9d
WH
1605 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1606 .get_eeprom = lan78xx_ethtool_get_eeprom,
1607 .set_eeprom = lan78xx_ethtool_set_eeprom,
1608 .get_ethtool_stats = lan78xx_get_stats,
1609 .get_sset_count = lan78xx_get_sset_count,
1610 .get_strings = lan78xx_get_strings,
1611 .get_wol = lan78xx_get_wol,
1612 .set_wol = lan78xx_set_wol,
1613 .get_eee = lan78xx_get_eee,
1614 .set_eee = lan78xx_set_eee,
349e0c5e
WH
1615 .get_pauseparam = lan78xx_get_pause,
1616 .set_pauseparam = lan78xx_set_pause,
6e76510e
PR
1617 .get_link_ksettings = lan78xx_get_link_ksettings,
1618 .set_link_ksettings = lan78xx_set_link_ksettings,
55d7de9d
WH
1619};
1620
1621static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1622{
55d7de9d
WH
1623 if (!netif_running(netdev))
1624 return -EINVAL;
1625
ce85e13a 1626 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1627}
1628
1629static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1630{
1631 u32 addr_lo, addr_hi;
1632 int ret;
1633 u8 addr[6];
70e97827 1634 const u8 *mac_addr;
55d7de9d
WH
1635
1636 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1637 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1638
1639 addr[0] = addr_lo & 0xFF;
1640 addr[1] = (addr_lo >> 8) & 0xFF;
1641 addr[2] = (addr_lo >> 16) & 0xFF;
1642 addr[3] = (addr_lo >> 24) & 0xFF;
1643 addr[4] = addr_hi & 0xFF;
1644 addr[5] = (addr_hi >> 8) & 0xFF;
1645
1646 if (!is_valid_ether_addr(addr)) {
70e97827
AF
1647 /* maybe the boot loader passed the MAC address in devicetree */
1648 mac_addr = of_get_mac_address(dev->udev->dev.of_node);
1649 if (mac_addr) memcpy(addr, mac_addr, ETH_ALEN);
1650 if (is_valid_ether_addr(addr)) {
a4977f3e 1651 netif_dbg(dev, ifup, dev->net,
70e97827
AF
1652 "MAC address read from devicetree");
1653 }
1654
1655 if (!is_valid_ether_addr(addr)) {
1656 /* reading mac address from EEPROM or OTP */
1657 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1658 addr) == 0) ||
1659 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1660 addr) == 0)) {
1661 if (is_valid_ether_addr(addr)) {
1662 /* eeprom values are valid so use them */
1663 netif_dbg(dev, ifup, dev->net,
1664 "MAC address read from EEPROM");
1665 }
1666 }
1667 }
1668
1669 if (!is_valid_ether_addr(addr)) {
55d7de9d
WH
1670 /* generate random MAC */
1671 random_ether_addr(addr);
1672 netif_dbg(dev, ifup, dev->net,
70e97827 1673 "MAC address set to random addr");
55d7de9d 1674 }
a4977f3e
PE
1675
1676 addr_lo = addr[0] | (addr[1] << 8) |
1677 (addr[2] << 16) | (addr[3] << 24);
1678 addr_hi = addr[4] | (addr[5] << 8);
1679
1680 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1681 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
55d7de9d
WH
1682 }
1683
1684 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1685 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1686
1687 ether_addr_copy(dev->net->dev_addr, addr);
1688}
1689
ce85e13a
WH
1690/* MDIO read and write wrappers for phylib */
1691static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1692{
1693 struct lan78xx_net *dev = bus->priv;
1694 u32 val, addr;
1695 int ret;
1696
1697 ret = usb_autopm_get_interface(dev->intf);
1698 if (ret < 0)
1699 return ret;
1700
1701 mutex_lock(&dev->phy_mutex);
1702
1703 /* confirm MII not busy */
1704 ret = lan78xx_phy_wait_not_busy(dev);
1705 if (ret < 0)
1706 goto done;
1707
1708 /* set the address, index & direction (read from PHY) */
1709 addr = mii_access(phy_id, idx, MII_READ);
1710 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1711
1712 ret = lan78xx_phy_wait_not_busy(dev);
1713 if (ret < 0)
1714 goto done;
1715
1716 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1717
1718 ret = (int)(val & 0xFFFF);
1719
1720done:
1721 mutex_unlock(&dev->phy_mutex);
1722 usb_autopm_put_interface(dev->intf);
02dc1f3d 1723
ce85e13a
WH
1724 return ret;
1725}
1726
1727static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1728 u16 regval)
1729{
1730 struct lan78xx_net *dev = bus->priv;
1731 u32 val, addr;
1732 int ret;
1733
1734 ret = usb_autopm_get_interface(dev->intf);
1735 if (ret < 0)
1736 return ret;
1737
1738 mutex_lock(&dev->phy_mutex);
1739
1740 /* confirm MII not busy */
1741 ret = lan78xx_phy_wait_not_busy(dev);
1742 if (ret < 0)
1743 goto done;
1744
1745 val = (u32)regval;
1746 ret = lan78xx_write_reg(dev, MII_DATA, val);
1747
1748 /* set the address, index & direction (write to PHY) */
1749 addr = mii_access(phy_id, idx, MII_WRITE);
1750 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1751
1752 ret = lan78xx_phy_wait_not_busy(dev);
1753 if (ret < 0)
1754 goto done;
1755
1756done:
1757 mutex_unlock(&dev->phy_mutex);
1758 usb_autopm_put_interface(dev->intf);
1759 return 0;
1760}
1761
1762static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1763{
ce85e13a 1764 int ret;
ce85e13a
WH
1765
1766 dev->mdiobus = mdiobus_alloc();
1767 if (!dev->mdiobus) {
1768 netdev_err(dev->net, "can't allocate MDIO bus\n");
1769 return -ENOMEM;
1770 }
1771
1772 dev->mdiobus->priv = (void *)dev;
1773 dev->mdiobus->read = lan78xx_mdiobus_read;
1774 dev->mdiobus->write = lan78xx_mdiobus_write;
1775 dev->mdiobus->name = "lan78xx-mdiobus";
1e125b01 1776 dev->mdiobus->parent = &dev->udev->dev;
ce85e13a
WH
1777
1778 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1779 dev->udev->bus->busnum, dev->udev->devnum);
1780
87177ba6
WH
1781 switch (dev->chipid) {
1782 case ID_REV_CHIP_ID_7800_:
1783 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1784 /* set to internal PHY id */
1785 dev->mdiobus->phy_mask = ~(1 << 1);
1786 break;
02dc1f3d
WH
1787 case ID_REV_CHIP_ID_7801_:
1788 /* scan thru PHYAD[2..0] */
1789 dev->mdiobus->phy_mask = ~(0xFF);
1790 break;
ce85e13a
WH
1791 }
1792
1793 ret = mdiobus_register(dev->mdiobus);
1794 if (ret) {
1795 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1796 goto exit1;
ce85e13a
WH
1797 }
1798
1799 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1800 return 0;
ce85e13a
WH
1801exit1:
1802 mdiobus_free(dev->mdiobus);
1803 return ret;
1804}
1805
1806static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1807{
1808 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1809 mdiobus_free(dev->mdiobus);
1810}
1811
1812static void lan78xx_link_status_change(struct net_device *net)
1813{
14437e3f
WH
1814 struct phy_device *phydev = net->phydev;
1815 int ret, temp;
1816
1817 /* At forced 100 F/H mode, chip may fail to set mode correctly
1818 * when cable is switched between long(~50+m) and short one.
1819 * As workaround, set to 10 before setting to 100
1820 * at forced 100 F/H mode.
1821 */
1822 if (!phydev->autoneg && (phydev->speed == 100)) {
1823 /* disable phy interrupt */
1824 temp = phy_read(phydev, LAN88XX_INT_MASK);
1825 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1826 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1827
1828 temp = phy_read(phydev, MII_BMCR);
1829 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1830 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1831 temp |= BMCR_SPEED100;
1832 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1833
1834 /* clear pending interrupt generated while workaround */
1835 temp = phy_read(phydev, LAN88XX_INT_STS);
1836
1837 /* enable phy interrupt back */
1838 temp = phy_read(phydev, LAN88XX_INT_MASK);
1839 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1840 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1841 }
55d7de9d
WH
1842}
1843
cc89c323
WH
1844static int irq_map(struct irq_domain *d, unsigned int irq,
1845 irq_hw_number_t hwirq)
1846{
1847 struct irq_domain_data *data = d->host_data;
1848
1849 irq_set_chip_data(irq, data);
1850 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1851 irq_set_noprobe(irq);
1852
1853 return 0;
1854}
1855
1856static void irq_unmap(struct irq_domain *d, unsigned int irq)
1857{
1858 irq_set_chip_and_handler(irq, NULL, NULL);
1859 irq_set_chip_data(irq, NULL);
1860}
1861
1862static const struct irq_domain_ops chip_domain_ops = {
1863 .map = irq_map,
1864 .unmap = irq_unmap,
1865};
1866
1867static void lan78xx_irq_mask(struct irq_data *irqd)
1868{
1869 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1870
1871 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1872}
1873
1874static void lan78xx_irq_unmask(struct irq_data *irqd)
1875{
1876 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1877
1878 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1879}
1880
1881static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1882{
1883 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1884
1885 mutex_lock(&data->irq_lock);
1886}
1887
1888static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1889{
1890 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1891 struct lan78xx_net *dev =
1892 container_of(data, struct lan78xx_net, domain_data);
1893 u32 buf;
1894 int ret;
1895
1896 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1897 * are only two callbacks executed in non-atomic contex.
1898 */
1899 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1900 if (buf != data->irqenable)
1901 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1902
1903 mutex_unlock(&data->irq_lock);
1904}
1905
1906static struct irq_chip lan78xx_irqchip = {
1907 .name = "lan78xx-irqs",
1908 .irq_mask = lan78xx_irq_mask,
1909 .irq_unmask = lan78xx_irq_unmask,
1910 .irq_bus_lock = lan78xx_irq_bus_lock,
1911 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1912};
1913
1914static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1915{
1916 struct device_node *of_node;
1917 struct irq_domain *irqdomain;
1918 unsigned int irqmap = 0;
1919 u32 buf;
1920 int ret = 0;
1921
1922 of_node = dev->udev->dev.parent->of_node;
1923
1924 mutex_init(&dev->domain_data.irq_lock);
1925
1926 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1927 dev->domain_data.irqenable = buf;
1928
1929 dev->domain_data.irqchip = &lan78xx_irqchip;
1930 dev->domain_data.irq_handler = handle_simple_irq;
1931
1932 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1933 &chip_domain_ops, &dev->domain_data);
1934 if (irqdomain) {
1935 /* create mapping for PHY interrupt */
1936 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1937 if (!irqmap) {
1938 irq_domain_remove(irqdomain);
1939
1940 irqdomain = NULL;
1941 ret = -EINVAL;
1942 }
1943 } else {
1944 ret = -EINVAL;
1945 }
1946
1947 dev->domain_data.irqdomain = irqdomain;
1948 dev->domain_data.phyirq = irqmap;
1949
1950 return ret;
1951}
1952
1953static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1954{
1955 if (dev->domain_data.phyirq > 0) {
1956 irq_dispose_mapping(dev->domain_data.phyirq);
1957
1958 if (dev->domain_data.irqdomain)
1959 irq_domain_remove(dev->domain_data.irqdomain);
1960 }
1961 dev->domain_data.phyirq = 0;
1962 dev->domain_data.irqdomain = NULL;
1963}
1964
02dc1f3d
WH
1965static int lan8835_fixup(struct phy_device *phydev)
1966{
1967 int buf;
1968 int ret;
1969 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1970
1971 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
5f613677 1972 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
02dc1f3d
WH
1973 buf &= ~0x1800;
1974 buf |= 0x0800;
5f613677 1975 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
02dc1f3d
WH
1976
1977 /* RGMII MAC TXC Delay Enable */
1978 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1979 MAC_RGMII_ID_TXC_DELAY_EN_);
1980
1981 /* RGMII TX DLL Tune Adjust */
1982 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1983
1984 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1985
1986 return 1;
1987}
1988
1989static int ksz9031rnx_fixup(struct phy_device *phydev)
1990{
1991 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1992
1993 /* Micrel9301RNX PHY configuration */
1994 /* RGMII Control Signal Pad Skew */
5f613677 1995 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
02dc1f3d 1996 /* RGMII RX Data Pad Skew */
5f613677 1997 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
02dc1f3d 1998 /* RGMII RX Clock Pad Skew */
5f613677 1999 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
02dc1f3d
WH
2000
2001 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2002
2003 return 1;
2004}
2005
55d7de9d
WH
2006static int lan78xx_phy_init(struct lan78xx_net *dev)
2007{
ce85e13a 2008 int ret;
349e0c5e 2009 u32 mii_adv;
ce85e13a 2010 struct phy_device *phydev = dev->net->phydev;
55d7de9d 2011
ce85e13a
WH
2012 phydev = phy_find_first(dev->mdiobus);
2013 if (!phydev) {
2014 netdev_err(dev->net, "no PHY found\n");
2015 return -EIO;
2016 }
55d7de9d 2017
02dc1f3d
WH
2018 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2019 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2020 phydev->is_internal = true;
2021 dev->interface = PHY_INTERFACE_MODE_GMII;
2022
2023 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2024 if (!phydev->drv) {
2025 netdev_err(dev->net, "no PHY driver found\n");
2026 return -EIO;
2027 }
2028
2029 dev->interface = PHY_INTERFACE_MODE_RGMII;
2030
2031 /* external PHY fixup for KSZ9031RNX */
2032 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2033 ksz9031rnx_fixup);
2034 if (ret < 0) {
2035 netdev_err(dev->net, "fail to register fixup\n");
2036 return ret;
2037 }
2038 /* external PHY fixup for LAN8835 */
2039 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2040 lan8835_fixup);
2041 if (ret < 0) {
2042 netdev_err(dev->net, "fail to register fixup\n");
2043 return ret;
2044 }
2045 /* add more external PHY fixup here if needed */
2046
2047 phydev->is_internal = false;
2048 } else {
2049 netdev_err(dev->net, "unknown ID found\n");
2050 ret = -EIO;
2051 goto error;
2052 }
2053
cc89c323
WH
2054 /* if phyirq is not set, use polling mode in phylib */
2055 if (dev->domain_data.phyirq > 0)
2056 phydev->irq = dev->domain_data.phyirq;
2057 else
2058 phydev->irq = 0;
2059 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
e4953910 2060
f6e3ef3e
WH
2061 /* set to AUTOMDIX */
2062 phydev->mdix = ETH_TP_MDI_AUTO;
2063
ce85e13a
WH
2064 ret = phy_connect_direct(dev->net, phydev,
2065 lan78xx_link_status_change,
02dc1f3d 2066 dev->interface);
ce85e13a
WH
2067 if (ret) {
2068 netdev_err(dev->net, "can't attach PHY to %s\n",
2069 dev->mdiobus->id);
2070 return -EIO;
2071 }
55d7de9d 2072
ce85e13a
WH
2073 /* MAC doesn't support 1000T Half */
2074 phydev->supported &= ~SUPPORTED_1000baseT_Half;
e270b2db 2075
349e0c5e
WH
2076 /* support both flow controls */
2077 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2078 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2079 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2080 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2081
ce85e13a
WH
2082 genphy_config_aneg(phydev);
2083
349e0c5e
WH
2084 dev->fc_autoneg = phydev->autoneg;
2085
55d7de9d 2086 return 0;
02dc1f3d
WH
2087
2088error:
2089 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2090 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2091
2092 return ret;
55d7de9d
WH
2093}
2094
2095static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2096{
2097 int ret = 0;
2098 u32 buf;
2099 bool rxenabled;
2100
2101 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2102
2103 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2104
2105 if (rxenabled) {
2106 buf &= ~MAC_RX_RXEN_;
2107 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2108 }
2109
2110 /* add 4 to size for FCS */
2111 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2112 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2113
2114 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2115
2116 if (rxenabled) {
2117 buf |= MAC_RX_RXEN_;
2118 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2119 }
2120
2121 return 0;
2122}
2123
2124static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2125{
2126 struct sk_buff *skb;
2127 unsigned long flags;
2128 int count = 0;
2129
2130 spin_lock_irqsave(&q->lock, flags);
2131 while (!skb_queue_empty(q)) {
2132 struct skb_data *entry;
2133 struct urb *urb;
2134 int ret;
2135
2136 skb_queue_walk(q, skb) {
2137 entry = (struct skb_data *)skb->cb;
2138 if (entry->state != unlink_start)
2139 goto found;
2140 }
2141 break;
2142found:
2143 entry->state = unlink_start;
2144 urb = entry->urb;
2145
2146 /* Get reference count of the URB to avoid it to be
2147 * freed during usb_unlink_urb, which may trigger
2148 * use-after-free problem inside usb_unlink_urb since
2149 * usb_unlink_urb is always racing with .complete
2150 * handler(include defer_bh).
2151 */
2152 usb_get_urb(urb);
2153 spin_unlock_irqrestore(&q->lock, flags);
2154 /* during some PM-driven resume scenarios,
2155 * these (async) unlinks complete immediately
2156 */
2157 ret = usb_unlink_urb(urb);
2158 if (ret != -EINPROGRESS && ret != 0)
2159 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2160 else
2161 count++;
2162 usb_put_urb(urb);
2163 spin_lock_irqsave(&q->lock, flags);
2164 }
2165 spin_unlock_irqrestore(&q->lock, flags);
2166 return count;
2167}
2168
2169static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2170{
2171 struct lan78xx_net *dev = netdev_priv(netdev);
2172 int ll_mtu = new_mtu + netdev->hard_header_len;
2173 int old_hard_mtu = dev->hard_mtu;
2174 int old_rx_urb_size = dev->rx_urb_size;
2175 int ret;
2176
55d7de9d
WH
2177 /* no second zero-length packet read wanted after mtu-sized packets */
2178 if ((ll_mtu % dev->maxpacket) == 0)
2179 return -EDOM;
2180
2181 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2182
2183 netdev->mtu = new_mtu;
2184
2185 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2186 if (dev->rx_urb_size == old_hard_mtu) {
2187 dev->rx_urb_size = dev->hard_mtu;
2188 if (dev->rx_urb_size > old_rx_urb_size) {
2189 if (netif_running(dev->net)) {
2190 unlink_urbs(dev, &dev->rxq);
2191 tasklet_schedule(&dev->bh);
2192 }
2193 }
2194 }
2195
2196 return 0;
2197}
2198
e0c79ff6 2199static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
55d7de9d
WH
2200{
2201 struct lan78xx_net *dev = netdev_priv(netdev);
2202 struct sockaddr *addr = p;
2203 u32 addr_lo, addr_hi;
2204 int ret;
2205
2206 if (netif_running(netdev))
2207 return -EBUSY;
2208
2209 if (!is_valid_ether_addr(addr->sa_data))
2210 return -EADDRNOTAVAIL;
2211
2212 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2213
2214 addr_lo = netdev->dev_addr[0] |
2215 netdev->dev_addr[1] << 8 |
2216 netdev->dev_addr[2] << 16 |
2217 netdev->dev_addr[3] << 24;
2218 addr_hi = netdev->dev_addr[4] |
2219 netdev->dev_addr[5] << 8;
2220
2221 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2222 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2223
9c5239ee
JM
2224 /* Added to support MAC address changes */
2225 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2226 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2227
55d7de9d
WH
2228 return 0;
2229}
2230
2231/* Enable or disable Rx checksum offload engine */
2232static int lan78xx_set_features(struct net_device *netdev,
2233 netdev_features_t features)
2234{
2235 struct lan78xx_net *dev = netdev_priv(netdev);
2236 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2237 unsigned long flags;
2238 int ret;
2239
2240 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2241
2242 if (features & NETIF_F_RXCSUM) {
2243 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2244 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2245 } else {
2246 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2247 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2248 }
2249
2250 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2251 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2252 else
2253 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2254
2255 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2256
2257 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2258
2259 return 0;
2260}
2261
2262static void lan78xx_deferred_vlan_write(struct work_struct *param)
2263{
2264 struct lan78xx_priv *pdata =
2265 container_of(param, struct lan78xx_priv, set_vlan);
2266 struct lan78xx_net *dev = pdata->dev;
2267
2268 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2269 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2270}
2271
2272static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2273 __be16 proto, u16 vid)
2274{
2275 struct lan78xx_net *dev = netdev_priv(netdev);
2276 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2277 u16 vid_bit_index;
2278 u16 vid_dword_index;
2279
2280 vid_dword_index = (vid >> 5) & 0x7F;
2281 vid_bit_index = vid & 0x1F;
2282
2283 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2284
2285 /* defer register writes to a sleepable context */
2286 schedule_work(&pdata->set_vlan);
2287
2288 return 0;
2289}
2290
2291static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2292 __be16 proto, u16 vid)
2293{
2294 struct lan78xx_net *dev = netdev_priv(netdev);
2295 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2296 u16 vid_bit_index;
2297 u16 vid_dword_index;
2298
2299 vid_dword_index = (vid >> 5) & 0x7F;
2300 vid_bit_index = vid & 0x1F;
2301
2302 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2303
2304 /* defer register writes to a sleepable context */
2305 schedule_work(&pdata->set_vlan);
2306
2307 return 0;
2308}
2309
2310static void lan78xx_init_ltm(struct lan78xx_net *dev)
2311{
2312 int ret;
2313 u32 buf;
2314 u32 regs[6] = { 0 };
2315
2316 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2317 if (buf & USB_CFG1_LTM_ENABLE_) {
2318 u8 temp[2];
2319 /* Get values from EEPROM first */
2320 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2321 if (temp[0] == 24) {
2322 ret = lan78xx_read_raw_eeprom(dev,
2323 temp[1] * 2,
2324 24,
2325 (u8 *)regs);
2326 if (ret < 0)
2327 return;
2328 }
2329 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2330 if (temp[0] == 24) {
2331 ret = lan78xx_read_raw_otp(dev,
2332 temp[1] * 2,
2333 24,
2334 (u8 *)regs);
2335 if (ret < 0)
2336 return;
2337 }
2338 }
2339 }
2340
2341 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2342 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2343 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2344 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2345 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2346 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2347}
2348
2349static int lan78xx_reset(struct lan78xx_net *dev)
2350{
2351 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2352 u32 buf;
2353 int ret = 0;
2354 unsigned long timeout;
e78be20d 2355 u8 sig;
55d7de9d
WH
2356
2357 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2358 buf |= HW_CFG_LRST_;
2359 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2360
2361 timeout = jiffies + HZ;
2362 do {
2363 mdelay(1);
2364 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2365 if (time_after(jiffies, timeout)) {
2366 netdev_warn(dev->net,
2367 "timeout on completion of LiteReset");
2368 return -EIO;
2369 }
2370 } while (buf & HW_CFG_LRST_);
2371
2372 lan78xx_init_mac_address(dev);
2373
ce85e13a
WH
2374 /* save DEVID for later usage */
2375 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
2376 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2377 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 2378
55d7de9d
WH
2379 /* Respond to the IN token with a NAK */
2380 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2381 buf |= USB_CFG_BIR_;
2382 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2383
2384 /* Init LTM */
2385 lan78xx_init_ltm(dev);
2386
55d7de9d
WH
2387 if (dev->udev->speed == USB_SPEED_SUPER) {
2388 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2389 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2390 dev->rx_qlen = 4;
2391 dev->tx_qlen = 4;
2392 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2393 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2394 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2395 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2396 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2397 } else {
2398 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2399 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2400 dev->rx_qlen = 4;
2afdce2c 2401 dev->tx_qlen = 4;
55d7de9d
WH
2402 }
2403
2404 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2405 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2406
2407 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2408 buf |= HW_CFG_MEF_;
2409 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2410
2411 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2412 buf |= USB_CFG_BCE_;
2413 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2414
2415 /* set FIFO sizes */
2416 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2417 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2418
2419 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2420 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2421
2422 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2423 ret = lan78xx_write_reg(dev, FLOW, 0);
2424 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2425
2426 /* Don't need rfe_ctl_lock during initialisation */
2427 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2428 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2429 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2430
2431 /* Enable or disable checksum offload engines */
2432 lan78xx_set_features(dev->net, dev->net->features);
2433
2434 lan78xx_set_multicast(dev->net);
2435
2436 /* reset PHY */
2437 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2438 buf |= PMT_CTL_PHY_RST_;
2439 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2440
2441 timeout = jiffies + HZ;
2442 do {
2443 mdelay(1);
2444 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2445 if (time_after(jiffies, timeout)) {
2446 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2447 return -EIO;
2448 }
6c595b03 2449 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 2450
55d7de9d 2451 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
02dc1f3d
WH
2452 /* LAN7801 only has RGMII mode */
2453 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2454 buf &= ~MAC_CR_GMII_EN_;
e78be20d
RC
2455
2456 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2457 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2458 if (!ret && sig != EEPROM_INDICATOR) {
2459 /* Implies there is no external eeprom. Set mac speed */
2460 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2461 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2462 }
2463 }
55d7de9d
WH
2464 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2465
55d7de9d
WH
2466 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2467 buf |= MAC_TX_TXEN_;
2468 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2469
2470 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2471 buf |= FCT_TX_CTL_EN_;
2472 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2473
2474 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2475
2476 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2477 buf |= MAC_RX_RXEN_;
2478 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2479
2480 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2481 buf |= FCT_RX_CTL_EN_;
2482 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2483
55d7de9d
WH
2484 return 0;
2485}
2486
20ff5565
WH
2487static void lan78xx_init_stats(struct lan78xx_net *dev)
2488{
2489 u32 *p;
2490 int i;
2491
2492 /* initialize for stats update
2493 * some counters are 20bits and some are 32bits
2494 */
2495 p = (u32 *)&dev->stats.rollover_max;
2496 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2497 p[i] = 0xFFFFF;
2498
2499 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2500 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2501 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2502 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2503 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2504 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2505 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2506 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2507 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2508 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2509
0f02f2a5 2510 set_bit(EVENT_STAT_UPDATE, &dev->flags);
20ff5565
WH
2511}
2512
55d7de9d
WH
2513static int lan78xx_open(struct net_device *net)
2514{
2515 struct lan78xx_net *dev = netdev_priv(net);
2516 int ret;
2517
2518 ret = usb_autopm_get_interface(dev->intf);
2519 if (ret < 0)
2520 goto out;
2521
6d03ff16
AG
2522 phy_start(net->phydev);
2523
2524 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
ce85e13a 2525
55d7de9d
WH
2526 /* for Link Check */
2527 if (dev->urb_intr) {
2528 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2529 if (ret < 0) {
2530 netif_err(dev, ifup, dev->net,
2531 "intr submit %d\n", ret);
2532 goto done;
2533 }
2534 }
2535
20ff5565
WH
2536 lan78xx_init_stats(dev);
2537
55d7de9d
WH
2538 set_bit(EVENT_DEV_OPEN, &dev->flags);
2539
2540 netif_start_queue(net);
2541
2542 dev->link_on = false;
2543
2544 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2545done:
2546 usb_autopm_put_interface(dev->intf);
2547
2548out:
2549 return ret;
2550}
2551
2552static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2553{
2554 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2555 DECLARE_WAITQUEUE(wait, current);
2556 int temp;
2557
2558 /* ensure there are no more active urbs */
2559 add_wait_queue(&unlink_wakeup, &wait);
2560 set_current_state(TASK_UNINTERRUPTIBLE);
2561 dev->wait = &unlink_wakeup;
2562 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2563
2564 /* maybe wait for deletions to finish. */
2565 while (!skb_queue_empty(&dev->rxq) &&
2566 !skb_queue_empty(&dev->txq) &&
2567 !skb_queue_empty(&dev->done)) {
2568 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2569 set_current_state(TASK_UNINTERRUPTIBLE);
2570 netif_dbg(dev, ifdown, dev->net,
2571 "waited for %d urb completions\n", temp);
2572 }
2573 set_current_state(TASK_RUNNING);
2574 dev->wait = NULL;
2575 remove_wait_queue(&unlink_wakeup, &wait);
2576}
2577
e0c79ff6 2578static int lan78xx_stop(struct net_device *net)
55d7de9d
WH
2579{
2580 struct lan78xx_net *dev = netdev_priv(net);
2581
20ff5565
WH
2582 if (timer_pending(&dev->stat_monitor))
2583 del_timer_sync(&dev->stat_monitor);
2584
6d03ff16
AG
2585 if (net->phydev)
2586 phy_stop(net->phydev);
ce85e13a 2587
55d7de9d
WH
2588 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2589 netif_stop_queue(net);
2590
2591 netif_info(dev, ifdown, dev->net,
2592 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2593 net->stats.rx_packets, net->stats.tx_packets,
2594 net->stats.rx_errors, net->stats.tx_errors);
2595
2596 lan78xx_terminate_urbs(dev);
2597
2598 usb_kill_urb(dev->urb_intr);
2599
2600 skb_queue_purge(&dev->rxq_pause);
2601
2602 /* deferred work (task, timer, softirq) must also stop.
2603 * can't flush_scheduled_work() until we drop rtnl (later),
2604 * else workers could deadlock; so make workers a NOP.
2605 */
2606 dev->flags = 0;
2607 cancel_delayed_work_sync(&dev->wq);
2608 tasklet_kill(&dev->bh);
2609
2610 usb_autopm_put_interface(dev->intf);
2611
2612 return 0;
2613}
2614
55d7de9d
WH
2615static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2616 struct sk_buff *skb, gfp_t flags)
2617{
2618 u32 tx_cmd_a, tx_cmd_b;
2619
d4ca7359 2620 if (skb_cow_head(skb, TX_OVERHEAD)) {
55d7de9d 2621 dev_kfree_skb_any(skb);
d4ca7359 2622 return NULL;
55d7de9d
WH
2623 }
2624
a4533502
ED
2625 if (skb_linearize(skb)) {
2626 dev_kfree_skb_any(skb);
55d7de9d 2627 return NULL;
a4533502 2628 }
55d7de9d
WH
2629
2630 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2631
2632 if (skb->ip_summed == CHECKSUM_PARTIAL)
2633 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2634
2635 tx_cmd_b = 0;
2636 if (skb_is_gso(skb)) {
2637 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2638
2639 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2640
2641 tx_cmd_a |= TX_CMD_A_LSO_;
2642 }
2643
2644 if (skb_vlan_tag_present(skb)) {
2645 tx_cmd_a |= TX_CMD_A_IVTG_;
2646 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2647 }
2648
2649 skb_push(skb, 4);
2650 cpu_to_le32s(&tx_cmd_b);
2651 memcpy(skb->data, &tx_cmd_b, 4);
2652
2653 skb_push(skb, 4);
2654 cpu_to_le32s(&tx_cmd_a);
2655 memcpy(skb->data, &tx_cmd_a, 4);
2656
2657 return skb;
2658}
2659
2660static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2661 struct sk_buff_head *list, enum skb_state state)
2662{
2663 unsigned long flags;
2664 enum skb_state old_state;
2665 struct skb_data *entry = (struct skb_data *)skb->cb;
2666
2667 spin_lock_irqsave(&list->lock, flags);
2668 old_state = entry->state;
2669 entry->state = state;
55d7de9d
WH
2670
2671 __skb_unlink(skb, list);
2672 spin_unlock(&list->lock);
2673 spin_lock(&dev->done.lock);
55d7de9d
WH
2674
2675 __skb_queue_tail(&dev->done, skb);
2676 if (skb_queue_len(&dev->done) == 1)
2677 tasklet_schedule(&dev->bh);
2678 spin_unlock_irqrestore(&dev->done.lock, flags);
2679
2680 return old_state;
2681}
2682
2683static void tx_complete(struct urb *urb)
2684{
2685 struct sk_buff *skb = (struct sk_buff *)urb->context;
2686 struct skb_data *entry = (struct skb_data *)skb->cb;
2687 struct lan78xx_net *dev = entry->dev;
2688
2689 if (urb->status == 0) {
74d79a2e 2690 dev->net->stats.tx_packets += entry->num_of_packet;
55d7de9d
WH
2691 dev->net->stats.tx_bytes += entry->length;
2692 } else {
2693 dev->net->stats.tx_errors++;
2694
2695 switch (urb->status) {
2696 case -EPIPE:
2697 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2698 break;
2699
2700 /* software-driven interface shutdown */
2701 case -ECONNRESET:
2702 case -ESHUTDOWN:
2703 break;
2704
2705 case -EPROTO:
2706 case -ETIME:
2707 case -EILSEQ:
2708 netif_stop_queue(dev->net);
2709 break;
2710 default:
2711 netif_dbg(dev, tx_err, dev->net,
2712 "tx err %d\n", entry->urb->status);
2713 break;
2714 }
2715 }
2716
2717 usb_autopm_put_interface_async(dev->intf);
2718
81c38e81 2719 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2720}
2721
2722static void lan78xx_queue_skb(struct sk_buff_head *list,
2723 struct sk_buff *newsk, enum skb_state state)
2724{
2725 struct skb_data *entry = (struct skb_data *)newsk->cb;
2726
2727 __skb_queue_tail(list, newsk);
2728 entry->state = state;
2729}
2730
e0c79ff6
BX
2731static netdev_tx_t
2732lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
55d7de9d
WH
2733{
2734 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2735 struct sk_buff *skb2 = NULL;
55d7de9d 2736
81c38e81 2737 if (skb) {
55d7de9d 2738 skb_tx_timestamp(skb);
81c38e81
WH
2739 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2740 }
55d7de9d 2741
81c38e81
WH
2742 if (skb2) {
2743 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2744
4b2a4a96
WH
2745 /* throttle TX patch at slower than SUPER SPEED USB */
2746 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2747 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2748 netif_stop_queue(net);
2749 } else {
2750 netif_dbg(dev, tx_err, dev->net,
2751 "lan78xx_tx_prep return NULL\n");
2752 dev->net->stats.tx_errors++;
2753 dev->net->stats.tx_dropped++;
2754 }
2755
2756 tasklet_schedule(&dev->bh);
2757
2758 return NETDEV_TX_OK;
2759}
2760
55d7de9d
WH
2761static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2762{
2763 struct lan78xx_priv *pdata = NULL;
2764 int ret;
2765 int i;
2766
55d7de9d
WH
2767 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2768
2769 pdata = (struct lan78xx_priv *)(dev->data[0]);
2770 if (!pdata) {
2771 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2772 return -ENOMEM;
2773 }
2774
2775 pdata->dev = dev;
2776
2777 spin_lock_init(&pdata->rfe_ctl_lock);
2778 mutex_init(&pdata->dataport_mutex);
2779
2780 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2781
2782 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2783 pdata->vlan_table[i] = 0;
2784
2785 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2786
2787 dev->net->features = 0;
2788
2789 if (DEFAULT_TX_CSUM_ENABLE)
2790 dev->net->features |= NETIF_F_HW_CSUM;
2791
2792 if (DEFAULT_RX_CSUM_ENABLE)
2793 dev->net->features |= NETIF_F_RXCSUM;
2794
2795 if (DEFAULT_TSO_CSUM_ENABLE)
2796 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2797
2798 dev->net->hw_features = dev->net->features;
2799
cc89c323
WH
2800 ret = lan78xx_setup_irq_domain(dev);
2801 if (ret < 0) {
2802 netdev_warn(dev->net,
2803 "lan78xx_setup_irq_domain() failed : %d", ret);
629eeaac 2804 goto out1;
cc89c323
WH
2805 }
2806
0573f94b
NS
2807 dev->net->hard_header_len += TX_OVERHEAD;
2808 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2809
55d7de9d
WH
2810 /* Init all registers */
2811 ret = lan78xx_reset(dev);
629eeaac
RC
2812 if (ret) {
2813 netdev_warn(dev->net, "Registers INIT FAILED....");
2814 goto out2;
2815 }
55d7de9d 2816
fb52c3b5 2817 ret = lan78xx_mdio_init(dev);
629eeaac
RC
2818 if (ret) {
2819 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2820 goto out2;
2821 }
ce85e13a 2822
55d7de9d
WH
2823 dev->net->flags |= IFF_MULTICAST;
2824
2825 pdata->wol = WAKE_MAGIC;
2826
fb52c3b5 2827 return ret;
629eeaac
RC
2828
2829out2:
2830 lan78xx_remove_irq_domain(dev);
2831
2832out1:
2833 netdev_warn(dev->net, "Bind routine FAILED");
2834 cancel_work_sync(&pdata->set_multicast);
2835 cancel_work_sync(&pdata->set_vlan);
2836 kfree(pdata);
2837 return ret;
55d7de9d
WH
2838}
2839
2840static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2841{
2842 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2843
cc89c323
WH
2844 lan78xx_remove_irq_domain(dev);
2845
ce85e13a
WH
2846 lan78xx_remove_mdio(dev);
2847
55d7de9d 2848 if (pdata) {
629eeaac
RC
2849 cancel_work_sync(&pdata->set_multicast);
2850 cancel_work_sync(&pdata->set_vlan);
55d7de9d
WH
2851 netif_dbg(dev, ifdown, dev->net, "free pdata");
2852 kfree(pdata);
2853 pdata = NULL;
2854 dev->data[0] = 0;
2855 }
2856}
2857
2858static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2859 struct sk_buff *skb,
2860 u32 rx_cmd_a, u32 rx_cmd_b)
2861{
2862 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2863 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2864 skb->ip_summed = CHECKSUM_NONE;
2865 } else {
2866 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2867 skb->ip_summed = CHECKSUM_COMPLETE;
2868 }
2869}
2870
e0c79ff6 2871static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
55d7de9d
WH
2872{
2873 int status;
2874
2875 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2876 skb_queue_tail(&dev->rxq_pause, skb);
2877 return;
2878 }
2879
55d7de9d
WH
2880 dev->net->stats.rx_packets++;
2881 dev->net->stats.rx_bytes += skb->len;
2882
74d79a2e
WH
2883 skb->protocol = eth_type_trans(skb, dev->net);
2884
55d7de9d
WH
2885 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2886 skb->len + sizeof(struct ethhdr), skb->protocol);
2887 memset(skb->cb, 0, sizeof(struct skb_data));
2888
2889 if (skb_defer_rx_timestamp(skb))
2890 return;
2891
2892 status = netif_rx(skb);
2893 if (status != NET_RX_SUCCESS)
2894 netif_dbg(dev, rx_err, dev->net,
2895 "netif_rx status %d\n", status);
2896}
2897
2898static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2899{
2900 if (skb->len < dev->net->hard_header_len)
2901 return 0;
2902
2903 while (skb->len > 0) {
2904 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2905 u16 rx_cmd_c;
2906 struct sk_buff *skb2;
2907 unsigned char *packet;
2908
2909 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2910 le32_to_cpus(&rx_cmd_a);
2911 skb_pull(skb, sizeof(rx_cmd_a));
2912
2913 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2914 le32_to_cpus(&rx_cmd_b);
2915 skb_pull(skb, sizeof(rx_cmd_b));
2916
2917 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2918 le16_to_cpus(&rx_cmd_c);
2919 skb_pull(skb, sizeof(rx_cmd_c));
2920
2921 packet = skb->data;
2922
2923 /* get the packet length */
2924 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2925 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2926
2927 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2928 netif_dbg(dev, rx_err, dev->net,
2929 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2930 } else {
2931 /* last frame in this batch */
2932 if (skb->len == size) {
2933 lan78xx_rx_csum_offload(dev, skb,
2934 rx_cmd_a, rx_cmd_b);
2935
2936 skb_trim(skb, skb->len - 4); /* remove fcs */
2937 skb->truesize = size + sizeof(struct sk_buff);
2938
2939 return 1;
2940 }
2941
2942 skb2 = skb_clone(skb, GFP_ATOMIC);
2943 if (unlikely(!skb2)) {
2944 netdev_warn(dev->net, "Error allocating skb");
2945 return 0;
2946 }
2947
2948 skb2->len = size;
2949 skb2->data = packet;
2950 skb_set_tail_pointer(skb2, size);
2951
2952 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2953
2954 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2955 skb2->truesize = size + sizeof(struct sk_buff);
2956
2957 lan78xx_skb_return(dev, skb2);
2958 }
2959
2960 skb_pull(skb, size);
2961
2962 /* padding bytes before the next frame starts */
2963 if (skb->len)
2964 skb_pull(skb, align_count);
2965 }
2966
55d7de9d
WH
2967 return 1;
2968}
2969
2970static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2971{
2972 if (!lan78xx_rx(dev, skb)) {
2973 dev->net->stats.rx_errors++;
2974 goto done;
2975 }
2976
2977 if (skb->len) {
2978 lan78xx_skb_return(dev, skb);
2979 return;
2980 }
2981
2982 netif_dbg(dev, rx_err, dev->net, "drop\n");
2983 dev->net->stats.rx_errors++;
2984done:
2985 skb_queue_tail(&dev->done, skb);
2986}
2987
2988static void rx_complete(struct urb *urb);
2989
2990static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2991{
2992 struct sk_buff *skb;
2993 struct skb_data *entry;
2994 unsigned long lockflags;
2995 size_t size = dev->rx_urb_size;
2996 int ret = 0;
2997
2998 skb = netdev_alloc_skb_ip_align(dev->net, size);
2999 if (!skb) {
3000 usb_free_urb(urb);
3001 return -ENOMEM;
3002 }
3003
3004 entry = (struct skb_data *)skb->cb;
3005 entry->urb = urb;
3006 entry->dev = dev;
3007 entry->length = 0;
3008
3009 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3010 skb->data, size, rx_complete, skb);
3011
3012 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3013
3014 if (netif_device_present(dev->net) &&
3015 netif_running(dev->net) &&
3016 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3017 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3018 ret = usb_submit_urb(urb, GFP_ATOMIC);
3019 switch (ret) {
3020 case 0:
3021 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3022 break;
3023 case -EPIPE:
3024 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3025 break;
3026 case -ENODEV:
3027 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3028 netif_device_detach(dev->net);
3029 break;
3030 case -EHOSTUNREACH:
3031 ret = -ENOLINK;
3032 break;
3033 default:
3034 netif_dbg(dev, rx_err, dev->net,
3035 "rx submit, %d\n", ret);
3036 tasklet_schedule(&dev->bh);
3037 }
3038 } else {
3039 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3040 ret = -ENOLINK;
3041 }
3042 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3043 if (ret) {
3044 dev_kfree_skb_any(skb);
3045 usb_free_urb(urb);
3046 }
3047 return ret;
3048}
3049
3050static void rx_complete(struct urb *urb)
3051{
3052 struct sk_buff *skb = (struct sk_buff *)urb->context;
3053 struct skb_data *entry = (struct skb_data *)skb->cb;
3054 struct lan78xx_net *dev = entry->dev;
3055 int urb_status = urb->status;
3056 enum skb_state state;
3057
3058 skb_put(skb, urb->actual_length);
3059 state = rx_done;
3060 entry->urb = NULL;
3061
3062 switch (urb_status) {
3063 case 0:
3064 if (skb->len < dev->net->hard_header_len) {
3065 state = rx_cleanup;
3066 dev->net->stats.rx_errors++;
3067 dev->net->stats.rx_length_errors++;
3068 netif_dbg(dev, rx_err, dev->net,
3069 "rx length %d\n", skb->len);
3070 }
3071 usb_mark_last_busy(dev->udev);
3072 break;
3073 case -EPIPE:
3074 dev->net->stats.rx_errors++;
3075 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3076 /* FALLTHROUGH */
3077 case -ECONNRESET: /* async unlink */
3078 case -ESHUTDOWN: /* hardware gone */
3079 netif_dbg(dev, ifdown, dev->net,
3080 "rx shutdown, code %d\n", urb_status);
3081 state = rx_cleanup;
3082 entry->urb = urb;
3083 urb = NULL;
3084 break;
3085 case -EPROTO:
3086 case -ETIME:
3087 case -EILSEQ:
3088 dev->net->stats.rx_errors++;
3089 state = rx_cleanup;
3090 entry->urb = urb;
3091 urb = NULL;
3092 break;
3093
3094 /* data overrun ... flush fifo? */
3095 case -EOVERFLOW:
3096 dev->net->stats.rx_over_errors++;
3097 /* FALLTHROUGH */
3098
3099 default:
3100 state = rx_cleanup;
3101 dev->net->stats.rx_errors++;
3102 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3103 break;
3104 }
3105
3106 state = defer_bh(dev, skb, &dev->rxq, state);
3107
3108 if (urb) {
3109 if (netif_running(dev->net) &&
3110 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3111 state != unlink_start) {
3112 rx_submit(dev, urb, GFP_ATOMIC);
3113 return;
3114 }
3115 usb_free_urb(urb);
3116 }
3117 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3118}
3119
3120static void lan78xx_tx_bh(struct lan78xx_net *dev)
3121{
3122 int length;
3123 struct urb *urb = NULL;
3124 struct skb_data *entry;
3125 unsigned long flags;
3126 struct sk_buff_head *tqp = &dev->txq_pend;
3127 struct sk_buff *skb, *skb2;
3128 int ret;
3129 int count, pos;
3130 int skb_totallen, pkt_cnt;
3131
3132 skb_totallen = 0;
3133 pkt_cnt = 0;
74d79a2e
WH
3134 count = 0;
3135 length = 0;
2be27d44 3136 spin_lock_irqsave(&tqp->lock, flags);
55d7de9d
WH
3137 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3138 if (skb_is_gso(skb)) {
3139 if (pkt_cnt) {
3140 /* handle previous packets first */
3141 break;
3142 }
74d79a2e
WH
3143 count = 1;
3144 length = skb->len - TX_OVERHEAD;
2be27d44
SW
3145 __skb_unlink(skb, tqp);
3146 spin_unlock_irqrestore(&tqp->lock, flags);
55d7de9d
WH
3147 goto gso_skb;
3148 }
3149
3150 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3151 break;
3152 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3153 pkt_cnt++;
3154 }
2be27d44 3155 spin_unlock_irqrestore(&tqp->lock, flags);
55d7de9d
WH
3156
3157 /* copy to a single skb */
3158 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3159 if (!skb)
3160 goto drop;
3161
3162 skb_put(skb, skb_totallen);
3163
3164 for (count = pos = 0; count < pkt_cnt; count++) {
3165 skb2 = skb_dequeue(tqp);
3166 if (skb2) {
74d79a2e 3167 length += (skb2->len - TX_OVERHEAD);
55d7de9d
WH
3168 memcpy(skb->data + pos, skb2->data, skb2->len);
3169 pos += roundup(skb2->len, sizeof(u32));
3170 dev_kfree_skb(skb2);
55d7de9d
WH
3171 }
3172 }
3173
55d7de9d
WH
3174gso_skb:
3175 urb = usb_alloc_urb(0, GFP_ATOMIC);
d7c4e84e 3176 if (!urb)
55d7de9d 3177 goto drop;
55d7de9d
WH
3178
3179 entry = (struct skb_data *)skb->cb;
3180 entry->urb = urb;
3181 entry->dev = dev;
3182 entry->length = length;
74d79a2e 3183 entry->num_of_packet = count;
55d7de9d
WH
3184
3185 spin_lock_irqsave(&dev->txq.lock, flags);
3186 ret = usb_autopm_get_interface_async(dev->intf);
3187 if (ret < 0) {
3188 spin_unlock_irqrestore(&dev->txq.lock, flags);
3189 goto drop;
3190 }
3191
3192 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3193 skb->data, skb->len, tx_complete, skb);
3194
3195 if (length % dev->maxpacket == 0) {
3196 /* send USB_ZERO_PACKET */
3197 urb->transfer_flags |= URB_ZERO_PACKET;
3198 }
3199
3200#ifdef CONFIG_PM
3201 /* if this triggers the device is still a sleep */
3202 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3203 /* transmission will be done in resume */
3204 usb_anchor_urb(urb, &dev->deferred);
3205 /* no use to process more packets */
3206 netif_stop_queue(dev->net);
3207 usb_put_urb(urb);
3208 spin_unlock_irqrestore(&dev->txq.lock, flags);
3209 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3210 return;
3211 }
3212#endif
3213
3214 ret = usb_submit_urb(urb, GFP_ATOMIC);
3215 switch (ret) {
3216 case 0:
860e9538 3217 netif_trans_update(dev->net);
55d7de9d
WH
3218 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3219 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3220 netif_stop_queue(dev->net);
3221 break;
3222 case -EPIPE:
3223 netif_stop_queue(dev->net);
3224 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3225 usb_autopm_put_interface_async(dev->intf);
3226 break;
3227 default:
3228 usb_autopm_put_interface_async(dev->intf);
3229 netif_dbg(dev, tx_err, dev->net,
3230 "tx: submit urb err %d\n", ret);
3231 break;
3232 }
3233
3234 spin_unlock_irqrestore(&dev->txq.lock, flags);
3235
3236 if (ret) {
3237 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3238drop:
3239 dev->net->stats.tx_dropped++;
3240 if (skb)
3241 dev_kfree_skb_any(skb);
3242 usb_free_urb(urb);
3243 } else
3244 netif_dbg(dev, tx_queued, dev->net,
3245 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3246}
3247
3248static void lan78xx_rx_bh(struct lan78xx_net *dev)
3249{
3250 struct urb *urb;
3251 int i;
3252
3253 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3254 for (i = 0; i < 10; i++) {
3255 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3256 break;
3257 urb = usb_alloc_urb(0, GFP_ATOMIC);
3258 if (urb)
3259 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3260 return;
3261 }
3262
3263 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3264 tasklet_schedule(&dev->bh);
3265 }
3266 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3267 netif_wake_queue(dev->net);
3268}
3269
3270static void lan78xx_bh(unsigned long param)
3271{
3272 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3273 struct sk_buff *skb;
3274 struct skb_data *entry;
3275
55d7de9d
WH
3276 while ((skb = skb_dequeue(&dev->done))) {
3277 entry = (struct skb_data *)(skb->cb);
3278 switch (entry->state) {
3279 case rx_done:
3280 entry->state = rx_cleanup;
3281 rx_process(dev, skb);
3282 continue;
3283 case tx_done:
3284 usb_free_urb(entry->urb);
3285 dev_kfree_skb(skb);
3286 continue;
3287 case rx_cleanup:
3288 usb_free_urb(entry->urb);
3289 dev_kfree_skb(skb);
3290 continue;
3291 default:
3292 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3293 return;
3294 }
55d7de9d
WH
3295 }
3296
3297 if (netif_device_present(dev->net) && netif_running(dev->net)) {
20ff5565
WH
3298 /* reset update timer delta */
3299 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3300 dev->delta = 1;
3301 mod_timer(&dev->stat_monitor,
3302 jiffies + STAT_UPDATE_TIMER);
3303 }
3304
55d7de9d
WH
3305 if (!skb_queue_empty(&dev->txq_pend))
3306 lan78xx_tx_bh(dev);
3307
3308 if (!timer_pending(&dev->delay) &&
3309 !test_bit(EVENT_RX_HALT, &dev->flags))
3310 lan78xx_rx_bh(dev);
3311 }
3312}
3313
3314static void lan78xx_delayedwork(struct work_struct *work)
3315{
3316 int status;
3317 struct lan78xx_net *dev;
3318
3319 dev = container_of(work, struct lan78xx_net, wq.work);
3320
3321 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3322 unlink_urbs(dev, &dev->txq);
3323 status = usb_autopm_get_interface(dev->intf);
3324 if (status < 0)
3325 goto fail_pipe;
3326 status = usb_clear_halt(dev->udev, dev->pipe_out);
3327 usb_autopm_put_interface(dev->intf);
3328 if (status < 0 &&
3329 status != -EPIPE &&
3330 status != -ESHUTDOWN) {
3331 if (netif_msg_tx_err(dev))
3332fail_pipe:
3333 netdev_err(dev->net,
3334 "can't clear tx halt, status %d\n",
3335 status);
3336 } else {
3337 clear_bit(EVENT_TX_HALT, &dev->flags);
3338 if (status != -ESHUTDOWN)
3339 netif_wake_queue(dev->net);
3340 }
3341 }
3342 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3343 unlink_urbs(dev, &dev->rxq);
3344 status = usb_autopm_get_interface(dev->intf);
3345 if (status < 0)
3346 goto fail_halt;
3347 status = usb_clear_halt(dev->udev, dev->pipe_in);
3348 usb_autopm_put_interface(dev->intf);
3349 if (status < 0 &&
3350 status != -EPIPE &&
3351 status != -ESHUTDOWN) {
3352 if (netif_msg_rx_err(dev))
3353fail_halt:
3354 netdev_err(dev->net,
3355 "can't clear rx halt, status %d\n",
3356 status);
3357 } else {
3358 clear_bit(EVENT_RX_HALT, &dev->flags);
3359 tasklet_schedule(&dev->bh);
3360 }
3361 }
3362
3363 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3364 int ret = 0;
3365
3366 clear_bit(EVENT_LINK_RESET, &dev->flags);
3367 status = usb_autopm_get_interface(dev->intf);
3368 if (status < 0)
3369 goto skip_reset;
3370 if (lan78xx_link_reset(dev) < 0) {
3371 usb_autopm_put_interface(dev->intf);
3372skip_reset:
3373 netdev_info(dev->net, "link reset failed (%d)\n",
3374 ret);
3375 } else {
3376 usb_autopm_put_interface(dev->intf);
3377 }
3378 }
20ff5565
WH
3379
3380 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3381 lan78xx_update_stats(dev);
3382
3383 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3384
3385 mod_timer(&dev->stat_monitor,
3386 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3387
3388 dev->delta = min((dev->delta * 2), 50);
3389 }
55d7de9d
WH
3390}
3391
3392static void intr_complete(struct urb *urb)
3393{
3394 struct lan78xx_net *dev = urb->context;
3395 int status = urb->status;
3396
3397 switch (status) {
3398 /* success */
3399 case 0:
3400 lan78xx_status(dev, urb);
3401 break;
3402
3403 /* software-driven interface shutdown */
3404 case -ENOENT: /* urb killed */
3405 case -ESHUTDOWN: /* hardware gone */
3406 netif_dbg(dev, ifdown, dev->net,
3407 "intr shutdown, code %d\n", status);
3408 return;
3409
3410 /* NOTE: not throttling like RX/TX, since this endpoint
3411 * already polls infrequently
3412 */
3413 default:
3414 netdev_dbg(dev->net, "intr status %d\n", status);
3415 break;
3416 }
3417
3418 if (!netif_running(dev->net))
3419 return;
3420
3421 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3422 status = usb_submit_urb(urb, GFP_ATOMIC);
3423 if (status != 0)
3424 netif_err(dev, timer, dev->net,
3425 "intr resubmit --> %d\n", status);
3426}
3427
3428static void lan78xx_disconnect(struct usb_interface *intf)
3429{
3430 struct lan78xx_net *dev;
3431 struct usb_device *udev;
3432 struct net_device *net;
3433
3434 dev = usb_get_intfdata(intf);
3435 usb_set_intfdata(intf, NULL);
3436 if (!dev)
3437 return;
3438
3439 udev = interface_to_usbdev(intf);
55d7de9d 3440 net = dev->net;
6d03ff16
AG
3441
3442 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3443 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3444
3445 phy_disconnect(net->phydev);
3446
55d7de9d
WH
3447 unregister_netdev(net);
3448
3449 cancel_delayed_work_sync(&dev->wq);
3450
3451 usb_scuttle_anchored_urbs(&dev->deferred);
3452
3453 lan78xx_unbind(dev, intf);
3454
3455 usb_kill_urb(dev->urb_intr);
3456 usb_free_urb(dev->urb_intr);
3457
3458 free_netdev(net);
3459 usb_put_dev(udev);
3460}
3461
e0c79ff6 3462static void lan78xx_tx_timeout(struct net_device *net)
55d7de9d
WH
3463{
3464 struct lan78xx_net *dev = netdev_priv(net);
3465
3466 unlink_urbs(dev, &dev->txq);
3467 tasklet_schedule(&dev->bh);
3468}
3469
d6502fc2
JH
3470static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3471 struct net_device *netdev,
3472 netdev_features_t features)
3473{
3474 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3475 features &= ~NETIF_F_GSO_MASK;
3476
3477 features = vlan_features_check(skb, features);
3478 features = vxlan_features_check(skb, features);
3479
3480 return features;
3481}
3482
55d7de9d
WH
3483static const struct net_device_ops lan78xx_netdev_ops = {
3484 .ndo_open = lan78xx_open,
3485 .ndo_stop = lan78xx_stop,
3486 .ndo_start_xmit = lan78xx_start_xmit,
3487 .ndo_tx_timeout = lan78xx_tx_timeout,
3488 .ndo_change_mtu = lan78xx_change_mtu,
3489 .ndo_set_mac_address = lan78xx_set_mac_addr,
3490 .ndo_validate_addr = eth_validate_addr,
3491 .ndo_do_ioctl = lan78xx_ioctl,
3492 .ndo_set_rx_mode = lan78xx_set_multicast,
3493 .ndo_set_features = lan78xx_set_features,
3494 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3495 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
d6502fc2 3496 .ndo_features_check = lan78xx_features_check,
55d7de9d
WH
3497};
3498
20ff5565
WH
3499static void lan78xx_stat_monitor(unsigned long param)
3500{
3501 struct lan78xx_net *dev;
3502
3503 dev = (struct lan78xx_net *)param;
3504
3505 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3506}
3507
55d7de9d
WH
3508static int lan78xx_probe(struct usb_interface *intf,
3509 const struct usb_device_id *id)
3510{
7105bb40 3511 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
55d7de9d
WH
3512 struct lan78xx_net *dev;
3513 struct net_device *netdev;
3514 struct usb_device *udev;
3515 int ret;
3516 unsigned maxp;
3517 unsigned period;
3518 u8 *buf = NULL;
3519
3520 udev = interface_to_usbdev(intf);
3521 udev = usb_get_dev(udev);
3522
55d7de9d
WH
3523 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3524 if (!netdev) {
fb52c3b5
NS
3525 dev_err(&intf->dev, "Error: OOM\n");
3526 ret = -ENOMEM;
3527 goto out1;
55d7de9d
WH
3528 }
3529
3530 /* netdev_printk() needs this */
3531 SET_NETDEV_DEV(netdev, &intf->dev);
3532
3533 dev = netdev_priv(netdev);
3534 dev->udev = udev;
3535 dev->intf = intf;
3536 dev->net = netdev;
3537 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3538 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3539
3540 skb_queue_head_init(&dev->rxq);
3541 skb_queue_head_init(&dev->txq);
3542 skb_queue_head_init(&dev->done);
3543 skb_queue_head_init(&dev->rxq_pause);
3544 skb_queue_head_init(&dev->txq_pend);
3545 mutex_init(&dev->phy_mutex);
3546
3547 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3548 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3549 init_usb_anchor(&dev->deferred);
3550
3551 netdev->netdev_ops = &lan78xx_netdev_ops;
3552 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3553 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3554
20ff5565
WH
3555 dev->stat_monitor.function = lan78xx_stat_monitor;
3556 dev->stat_monitor.data = (unsigned long)dev;
3557 dev->delta = 1;
3558 init_timer(&dev->stat_monitor);
3559
3560 mutex_init(&dev->stats.access_lock);
3561
7105bb40
JH
3562 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3563 ret = -ENODEV;
3564 goto out2;
3565 }
3566
3567 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3568 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3569 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3570 ret = -ENODEV;
3571 goto out2;
3572 }
3573
3574 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3575 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3576 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3577 ret = -ENODEV;
3578 goto out2;
3579 }
3580
3581 ep_intr = &intf->cur_altsetting->endpoint[2];
3582 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3583 ret = -ENODEV;
3584 goto out2;
3585 }
3586
3587 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3588 usb_endpoint_num(&ep_intr->desc));
3589
55d7de9d
WH
3590 ret = lan78xx_bind(dev, intf);
3591 if (ret < 0)
3592 goto out2;
3593 strcpy(netdev->name, "eth%d");
3594
3595 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3596 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3597
f77f0aee
JW
3598 /* MTU range: 68 - 9000 */
3599 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
7ebdc211 3600 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
f77f0aee 3601
7105bb40 3602 period = ep_intr->desc.bInterval;
55d7de9d
WH
3603 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3604 buf = kmalloc(maxp, GFP_KERNEL);
3605 if (buf) {
3606 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3607 if (!dev->urb_intr) {
51920830 3608 ret = -ENOMEM;
55d7de9d
WH
3609 kfree(buf);
3610 goto out3;
3611 } else {
3612 usb_fill_int_urb(dev->urb_intr, dev->udev,
3613 dev->pipe_intr, buf, maxp,
3614 intr_complete, dev, period);
3c00fe92 3615 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
55d7de9d
WH
3616 }
3617 }
3618
3619 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3620
3621 /* driver requires remote-wakeup capability during autosuspend. */
3622 intf->needs_remote_wakeup = 1;
3623
03a70959
AL
3624 ret = lan78xx_phy_init(dev);
3625 if (ret < 0)
3626 goto out4;
3627
55d7de9d
WH
3628 ret = register_netdev(netdev);
3629 if (ret != 0) {
3630 netif_err(dev, probe, netdev, "couldn't register the device\n");
03a70959 3631 goto out5;
55d7de9d
WH
3632 }
3633
3634 usb_set_intfdata(intf, dev);
3635
3636 ret = device_set_wakeup_enable(&udev->dev, true);
3637
3638 /* Default delay of 2sec has more overhead than advantage.
3639 * Set to 10sec as default.
3640 */
3641 pm_runtime_set_autosuspend_delay(&udev->dev,
3642 DEFAULT_AUTOSUSPEND_DELAY);
3643
3644 return 0;
3645
c62760f0 3646out5:
03a70959 3647 phy_disconnect(netdev->phydev);
c62760f0
WW
3648out4:
3649 usb_free_urb(dev->urb_intr);
55d7de9d
WH
3650out3:
3651 lan78xx_unbind(dev, intf);
3652out2:
3653 free_netdev(netdev);
3654out1:
3655 usb_put_dev(udev);
3656
3657 return ret;
3658}
3659
3660static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3661{
3662 const u16 crc16poly = 0x8005;
3663 int i;
3664 u16 bit, crc, msb;
3665 u8 data;
3666
3667 crc = 0xFFFF;
3668 for (i = 0; i < len; i++) {
3669 data = *buf++;
3670 for (bit = 0; bit < 8; bit++) {
3671 msb = crc >> 15;
3672 crc <<= 1;
3673
3674 if (msb ^ (u16)(data & 1)) {
3675 crc ^= crc16poly;
3676 crc |= (u16)0x0001U;
3677 }
3678 data >>= 1;
3679 }
3680 }
3681
3682 return crc;
3683}
3684
3685static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3686{
3687 u32 buf;
3688 int ret;
3689 int mask_index;
3690 u16 crc;
3691 u32 temp_wucsr;
3692 u32 temp_pmt_ctl;
3693 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3694 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3695 const u8 arp_type[2] = { 0x08, 0x06 };
3696
3697 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3698 buf &= ~MAC_TX_TXEN_;
3699 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3700 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3701 buf &= ~MAC_RX_RXEN_;
3702 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3703
3704 ret = lan78xx_write_reg(dev, WUCSR, 0);
3705 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3706 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3707
3708 temp_wucsr = 0;
3709
3710 temp_pmt_ctl = 0;
3711 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3712 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3713 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3714
3715 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3716 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3717
3718 mask_index = 0;
3719 if (wol & WAKE_PHY) {
3720 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3721
3722 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3723 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3724 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3725 }
3726 if (wol & WAKE_MAGIC) {
3727 temp_wucsr |= WUCSR_MPEN_;
3728
3729 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3730 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3731 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3732 }
3733 if (wol & WAKE_BCAST) {
3734 temp_wucsr |= WUCSR_BCST_EN_;
3735
3736 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3737 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3738 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3739 }
3740 if (wol & WAKE_MCAST) {
3741 temp_wucsr |= WUCSR_WAKE_EN_;
3742
3743 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3744 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3745 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3746 WUF_CFGX_EN_ |
3747 WUF_CFGX_TYPE_MCAST_ |
3748 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3749 (crc & WUF_CFGX_CRC16_MASK_));
3750
3751 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3752 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3753 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3754 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3755 mask_index++;
3756
3757 /* for IPv6 Multicast */
3758 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3759 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3760 WUF_CFGX_EN_ |
3761 WUF_CFGX_TYPE_MCAST_ |
3762 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3763 (crc & WUF_CFGX_CRC16_MASK_));
3764
3765 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3766 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3767 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3768 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3769 mask_index++;
3770
3771 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3772 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3773 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3774 }
3775 if (wol & WAKE_UCAST) {
3776 temp_wucsr |= WUCSR_PFDA_EN_;
3777
3778 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3779 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3780 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3781 }
3782 if (wol & WAKE_ARP) {
3783 temp_wucsr |= WUCSR_WAKE_EN_;
3784
3785 /* set WUF_CFG & WUF_MASK
3786 * for packettype (offset 12,13) = ARP (0x0806)
3787 */
3788 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3789 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3790 WUF_CFGX_EN_ |
3791 WUF_CFGX_TYPE_ALL_ |
3792 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3793 (crc & WUF_CFGX_CRC16_MASK_));
3794
3795 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3796 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3797 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3798 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3799 mask_index++;
3800
3801 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3802 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3803 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3804 }
3805
3806 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3807
3808 /* when multiple WOL bits are set */
3809 if (hweight_long((unsigned long)wol) > 1) {
3810 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3811 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3812 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3813 }
3814 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3815
3816 /* clear WUPS */
3817 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3818 buf |= PMT_CTL_WUPS_MASK_;
3819 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3820
3821 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3822 buf |= MAC_RX_RXEN_;
3823 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3824
3825 return 0;
3826}
3827
e0c79ff6 3828static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
55d7de9d
WH
3829{
3830 struct lan78xx_net *dev = usb_get_intfdata(intf);
3831 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3832 u32 buf;
3833 int ret;
3834 int event;
3835
55d7de9d
WH
3836 event = message.event;
3837
3838 if (!dev->suspend_count++) {
3839 spin_lock_irq(&dev->txq.lock);
3840 /* don't autosuspend while transmitting */
3841 if ((skb_queue_len(&dev->txq) ||
3842 skb_queue_len(&dev->txq_pend)) &&
3843 PMSG_IS_AUTO(message)) {
3844 spin_unlock_irq(&dev->txq.lock);
3845 ret = -EBUSY;
3846 goto out;
3847 } else {
3848 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3849 spin_unlock_irq(&dev->txq.lock);
3850 }
3851
3852 /* stop TX & RX */
3853 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3854 buf &= ~MAC_TX_TXEN_;
3855 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3856 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3857 buf &= ~MAC_RX_RXEN_;
3858 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3859
3860 /* empty out the rx and queues */
3861 netif_device_detach(dev->net);
3862 lan78xx_terminate_urbs(dev);
3863 usb_kill_urb(dev->urb_intr);
3864
3865 /* reattach */
3866 netif_device_attach(dev->net);
3867 }
3868
3869 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
20ff5565
WH
3870 del_timer(&dev->stat_monitor);
3871
55d7de9d
WH
3872 if (PMSG_IS_AUTO(message)) {
3873 /* auto suspend (selective suspend) */
3874 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3875 buf &= ~MAC_TX_TXEN_;
3876 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3877 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3878 buf &= ~MAC_RX_RXEN_;
3879 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3880
3881 ret = lan78xx_write_reg(dev, WUCSR, 0);
3882 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3883 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3884
3885 /* set goodframe wakeup */
3886 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3887
3888 buf |= WUCSR_RFE_WAKE_EN_;
3889 buf |= WUCSR_STORE_WAKE_;
3890
3891 ret = lan78xx_write_reg(dev, WUCSR, buf);
3892
3893 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3894
3895 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3896 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3897
3898 buf |= PMT_CTL_PHY_WAKE_EN_;
3899 buf |= PMT_CTL_WOL_EN_;
3900 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3901 buf |= PMT_CTL_SUS_MODE_3_;
3902
3903 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3904
3905 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3906
3907 buf |= PMT_CTL_WUPS_MASK_;
3908
3909 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3910
3911 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3912 buf |= MAC_RX_RXEN_;
3913 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3914 } else {
3915 lan78xx_set_suspend(dev, pdata->wol);
3916 }
3917 }
3918
49d28b56 3919 ret = 0;
55d7de9d
WH
3920out:
3921 return ret;
3922}
3923
e0c79ff6 3924static int lan78xx_resume(struct usb_interface *intf)
55d7de9d
WH
3925{
3926 struct lan78xx_net *dev = usb_get_intfdata(intf);
3927 struct sk_buff *skb;
3928 struct urb *res;
3929 int ret;
3930 u32 buf;
3931
20ff5565
WH
3932 if (!timer_pending(&dev->stat_monitor)) {
3933 dev->delta = 1;
3934 mod_timer(&dev->stat_monitor,
3935 jiffies + STAT_UPDATE_TIMER);
3936 }
3937
55d7de9d
WH
3938 if (!--dev->suspend_count) {
3939 /* resume interrupt URBs */
3940 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3941 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3942
3943 spin_lock_irq(&dev->txq.lock);
3944 while ((res = usb_get_from_anchor(&dev->deferred))) {
3945 skb = (struct sk_buff *)res->context;
3946 ret = usb_submit_urb(res, GFP_ATOMIC);
3947 if (ret < 0) {
3948 dev_kfree_skb_any(skb);
3949 usb_free_urb(res);
3950 usb_autopm_put_interface_async(dev->intf);
3951 } else {
860e9538 3952 netif_trans_update(dev->net);
55d7de9d
WH
3953 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3954 }
3955 }
3956
3957 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3958 spin_unlock_irq(&dev->txq.lock);
3959
3960 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3961 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3962 netif_start_queue(dev->net);
3963 tasklet_schedule(&dev->bh);
3964 }
3965 }
3966
3967 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3968 ret = lan78xx_write_reg(dev, WUCSR, 0);
3969 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3970
3971 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3972 WUCSR2_ARP_RCD_ |
3973 WUCSR2_IPV6_TCPSYN_RCD_ |
3974 WUCSR2_IPV4_TCPSYN_RCD_);
3975
3976 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3977 WUCSR_EEE_RX_WAKE_ |
3978 WUCSR_PFDA_FR_ |
3979 WUCSR_RFE_WAKE_FR_ |
3980 WUCSR_WUFR_ |
3981 WUCSR_MPR_ |
3982 WUCSR_BCST_FR_);
3983
3984 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3985 buf |= MAC_TX_TXEN_;
3986 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3987
3988 return 0;
3989}
3990
e0c79ff6 3991static int lan78xx_reset_resume(struct usb_interface *intf)
55d7de9d
WH
3992{
3993 struct lan78xx_net *dev = usb_get_intfdata(intf);
3994
3995 lan78xx_reset(dev);
ce85e13a 3996
6d03ff16 3997 phy_start(dev->net->phydev);
ce85e13a 3998
55d7de9d
WH
3999 return lan78xx_resume(intf);
4000}
4001
4002static const struct usb_device_id products[] = {
4003 {
4004 /* LAN7800 USB Gigabit Ethernet Device */
4005 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4006 },
4007 {
4008 /* LAN7850 USB Gigabit Ethernet Device */
4009 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4010 },
02dc1f3d
WH
4011 {
4012 /* LAN7801 USB Gigabit Ethernet Device */
4013 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4014 },
55d7de9d
WH
4015 {},
4016};
4017MODULE_DEVICE_TABLE(usb, products);
4018
4019static struct usb_driver lan78xx_driver = {
4020 .name = DRIVER_NAME,
4021 .id_table = products,
4022 .probe = lan78xx_probe,
4023 .disconnect = lan78xx_disconnect,
4024 .suspend = lan78xx_suspend,
4025 .resume = lan78xx_resume,
4026 .reset_resume = lan78xx_reset_resume,
4027 .supports_autosuspend = 1,
4028 .disable_hub_initiated_lpm = 1,
4029};
4030
4031module_usb_driver(lan78xx_driver);
4032
4033MODULE_AUTHOR(DRIVER_AUTHOR);
4034MODULE_DESCRIPTION(DRIVER_DESC);
4035MODULE_LICENSE("GPL");