--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Ingo Molnar <mingo@elte.hu>
+Date: Wed, 2 May 2018 13:30:57 +0200
+Subject: 8139too: Use disable_irq_nosync() in rtl8139_poll_controller()
+
+From: Ingo Molnar <mingo@elte.hu>
+
+[ Upstream commit af3e0fcf78879f718c5f73df0814951bd7057d34 ]
+
+Use disable_irq_nosync() instead of disable_irq() as this might be
+called in atomic context with netpoll.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/8139too.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -2215,7 +2215,7 @@ static void rtl8139_poll_controller(stru
+ struct rtl8139_private *tp = netdev_priv(dev);
+ const int irq = tp->pci_dev->irq;
+
+- disable_irq(irq);
++ disable_irq_nosync(irq);
+ rtl8139_interrupt(irq, dev);
+ enable_irq(irq);
+ }
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Debabrata Banerjee <dbanerje@akamai.com>
+Date: Wed, 9 May 2018 19:32:10 -0400
+Subject: bonding: do not allow rlb updates to invalid mac
+
+From: Debabrata Banerjee <dbanerje@akamai.com>
+
+[ Upstream commit 4fa8667ca3989ce14cf66301fa251544fbddbdd0 ]
+
+Make sure multicast, broadcast, and zero mac's cannot be the output of rlb
+updates, which should all be directed arps. Receive load balancing will be
+collapsed if any of these happen, as the switch will broadcast.
+
+Signed-off-by: Debabrata Banerjee <dbanerje@akamai.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_alb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -453,7 +453,7 @@ static void rlb_update_client(struct rlb
+ {
+ int i;
+
+- if (!client_info->slave)
++ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
+ return;
+
+ for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Hangbin Liu <liuhangbin@gmail.com>
+Date: Fri, 27 Apr 2018 20:59:24 +0800
+Subject: bridge: check iface upper dev when setting master via ioctl
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit e8238fc2bd7b4c3c7554fa2df067e796610212fc ]
+
+When we set a bond slave's master to bridge via ioctl, we only check
+the IFF_BRIDGE_PORT flag. Although we will find the slave's real master
+at netdev_master_upper_dev_link() later, it already does some settings
+and allocates some resources. It would be better to return as early
+as possible.
+
+v1 -> v2:
+use netdev_master_upper_dev_get() instead of netdev_has_any_upper_dev()
+to check if we have a master, because not all upper devs are masters,
+e.g. vlan device.
+
+Reported-by: syzbot+de73361ee4971b6e6f75@syzkaller.appspotmail.com
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_if.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -445,8 +445,8 @@ int br_add_if(struct net_bridge *br, str
+ if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
+ return -ELOOP;
+
+- /* Device is already being bridged */
+- if (br_port_exists(dev))
++ /* Device has master upper dev */
++ if (netdev_master_upper_dev_get(dev))
+ return -EBUSY;
+
+ /* No bridging devices that dislike that (e.g. wireless) */
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 3 May 2018 09:39:20 -0700
+Subject: dccp: fix tasklet usage
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a8d7aa17bbc970971ccdf71988ea19230ab368b1 ]
+
+syzbot reported a crash in tasklet_action_common() caused by dccp.
+
+dccp needs to make sure socket wont disappear before tasklet handler
+has completed.
+
+This patch takes a reference on the socket when arming the tasklet,
+and moves the sock_put() from dccp_write_xmit_timer() to dccp_write_xmitlet()
+
+kernel BUG at kernel/softirq.c:514!
+invalid opcode: 0000 [#1] SMP KASAN
+Dumping ftrace buffer:
+ (ftrace buffer empty)
+Modules linked in:
+CPU: 1 PID: 17 Comm: ksoftirqd/1 Not tainted 4.17.0-rc3+ #30
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:tasklet_action_common.isra.19+0x6db/0x700 kernel/softirq.c:515
+RSP: 0018:ffff8801d9b3faf8 EFLAGS: 00010246
+dccp_close: ABORT with 65423 bytes unread
+RAX: 1ffff1003b367f6b RBX: ffff8801daf1f3f0 RCX: 0000000000000000
+RDX: ffff8801cf895498 RSI: 0000000000000004 RDI: 0000000000000000
+RBP: ffff8801d9b3fc40 R08: ffffed0039f12a95 R09: ffffed0039f12a94
+dccp_close: ABORT with 65423 bytes unread
+R10: ffffed0039f12a94 R11: ffff8801cf8954a3 R12: 0000000000000000
+R13: ffff8801d9b3fc18 R14: dffffc0000000000 R15: ffff8801cf895490
+FS: 0000000000000000(0000) GS:ffff8801daf00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000001b2bc28000 CR3: 00000001a08a9000 CR4: 00000000001406e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ tasklet_action+0x1d/0x20 kernel/softirq.c:533
+ __do_softirq+0x2e0/0xaf5 kernel/softirq.c:285
+dccp_close: ABORT with 65423 bytes unread
+ run_ksoftirqd+0x86/0x100 kernel/softirq.c:646
+ smpboot_thread_fn+0x417/0x870 kernel/smpboot.c:164
+ kthread+0x345/0x410 kernel/kthread.c:238
+ ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:412
+Code: 48 8b 85 e8 fe ff ff 48 8b 95 f0 fe ff ff e9 94 fb ff ff 48 89 95 f0 fe ff ff e8 81 53 6e 00 48 8b 95 f0 fe ff ff e9 62 fb ff ff <0f> 0b 48 89 cf 48 89 8d e8 fe ff ff e8 64 53 6e 00 48 8b 8d e8
+RIP: tasklet_action_common.isra.19+0x6db/0x700 kernel/softirq.c:515 RSP: ffff8801d9b3faf8
+
+Fixes: dc841e30eaea ("dccp: Extend CCID packet dequeueing interface")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Gerrit Renker <gerrit@erg.abdn.ac.uk>
+Cc: dccp@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/ccids/ccid2.c | 14 ++++++++++++--
+ net/dccp/timer.c | 2 +-
+ 2 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(st
+ DCCPF_SEQ_WMAX));
+ }
+
++static void dccp_tasklet_schedule(struct sock *sk)
++{
++ struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
++
++ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
++ sock_hold(sk);
++ __tasklet_schedule(t);
++ }
++}
++
+ static void ccid2_hc_tx_rto_expire(unsigned long data)
+ {
+ struct sock *sk = (struct sock *)data;
+@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsig
+
+ /* if we were blocked before, we may now send cwnd=1 packet */
+ if (sender_was_blocked)
+- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
++ dccp_tasklet_schedule(sk);
+ /* restart backed-off timer */
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
+ out:
+@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(stru
+ done:
+ /* check if incoming Acks allow pending packets to be sent */
+ if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
+- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
++ dccp_tasklet_schedule(sk);
+ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
+ }
+
+--- a/net/dccp/timer.c
++++ b/net/dccp/timer.c
+@@ -252,12 +252,12 @@ static void dccp_write_xmitlet(unsigned
+ else
+ dccp_write_xmit(sk);
+ bh_unlock_sock(sk);
++ sock_put(sk);
+ }
+
+ static void dccp_write_xmit_timer(unsigned long data)
+ {
+ dccp_write_xmitlet(data);
+- sock_put((struct sock *)data);
+ }
+
+ void dccp_init_xmit_timers(struct sock *sk)
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Andrey Ignatov <rdna@fb.com>
+Date: Thu, 10 May 2018 10:59:34 -0700
+Subject: ipv4: fix memory leaks in udp_sendmsg, ping_v4_sendmsg
+
+From: Andrey Ignatov <rdna@fb.com>
+
+[ Upstream commit 1b97013bfb11d66f041de691de6f0fec748ce016 ]
+
+Fix more memory leaks in ip_cmsg_send() callers. Part of them were fixed
+earlier in 919483096bfe.
+
+* udp_sendmsg one was there since the beginning when linux sources were
+ first added to git;
+* ping_v4_sendmsg one was copy/pasted in c319b4d76b9e.
+
+Whenever return happens in udp_sendmsg() or ping_v4_sendmsg() IP options
+have to be freed if they were allocated previously.
+
+Add label so that future callers (if any) can use it instead of kfree()
+before return that is easy to forget.
+
+Fixes: c319b4d76b9e (net: ipv4: add IPPROTO_ICMP socket kind)
+Signed-off-by: Andrey Ignatov <rdna@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ping.c | 7 +++++--
+ net/ipv4/udp.c | 7 +++++--
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -771,8 +771,10 @@ static int ping_v4_sendmsg(struct kiocb
+ ipc.addr = faddr = daddr;
+
+ if (ipc.opt && ipc.opt->opt.srr) {
+- if (!daddr)
+- return -EINVAL;
++ if (!daddr) {
++ err = -EINVAL;
++ goto out_free;
++ }
+ faddr = ipc.opt->opt.faddr;
+ }
+ tos = get_rttos(&ipc, inet);
+@@ -837,6 +839,7 @@ back_from_confirm:
+
+ out:
+ ip_rt_put(rt);
++out_free:
+ if (free)
+ kfree(ipc.opt);
+ if (!err) {
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -975,8 +975,10 @@ int udp_sendmsg(struct kiocb *iocb, stru
+ ipc.addr = faddr = daddr;
+
+ if (ipc.opt && ipc.opt->opt.srr) {
+- if (!daddr)
+- return -EINVAL;
++ if (!daddr) {
++ err = -EINVAL;
++ goto out_free;
++ }
+ faddr = ipc.opt->opt.faddr;
+ connected = 0;
+ }
+@@ -1081,6 +1083,7 @@ do_append_data:
+
+ out:
+ ip_rt_put(rt);
++out_free:
+ if (free)
+ kfree(ipc.opt);
+ if (!err)
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 7 May 2018 09:02:25 -0700
+Subject: llc: better deal with too small mtu
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2c5d5b13c6eb79f5677e206b8aad59b3a2097f60 ]
+
+syzbot loves to set very small mtu on devices, since it brings joy.
+We must make llc_ui_sendmsg() fool proof.
+
+usercopy: Kernel memory overwrite attempt detected to wrapped address (offset 0, size 18446612139802320068)!
+
+kernel BUG at mm/usercopy.c:100!
+invalid opcode: 0000 [#1] SMP KASAN
+Dumping ftrace buffer:
+ (ftrace buffer empty)
+Modules linked in:
+CPU: 0 PID: 17464 Comm: syz-executor1 Not tainted 4.17.0-rc3+ #36
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:usercopy_abort+0xbb/0xbd mm/usercopy.c:88
+RSP: 0018:ffff8801868bf800 EFLAGS: 00010282
+RAX: 000000000000006c RBX: ffffffff87d2fb00 RCX: 0000000000000000
+RDX: 000000000000006c RSI: ffffffff81610731 RDI: ffffed0030d17ef6
+RBP: ffff8801868bf858 R08: ffff88018daa4200 R09: ffffed003b5c4fb0
+R10: ffffed003b5c4fb0 R11: ffff8801dae27d87 R12: ffffffff87d2f8e0
+R13: ffffffff87d2f7a0 R14: ffffffff87d2f7a0 R15: ffffffff87d2f7a0
+FS: 00007f56a14ac700(0000) GS:ffff8801dae00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000001b2bc21000 CR3: 00000001abeb1000 CR4: 00000000001426f0
+DR0: 0000000020000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000030602
+Call Trace:
+ check_bogus_address mm/usercopy.c:153 [inline]
+ __check_object_size+0x5d9/0x5d9 mm/usercopy.c:256
+ check_object_size include/linux/thread_info.h:108 [inline]
+ check_copy_size include/linux/thread_info.h:139 [inline]
+ copy_from_iter_full include/linux/uio.h:121 [inline]
+ memcpy_from_msg include/linux/skbuff.h:3305 [inline]
+ llc_ui_sendmsg+0x4b1/0x1530 net/llc/af_llc.c:941
+ sock_sendmsg_nosec net/socket.c:629 [inline]
+ sock_sendmsg+0xd5/0x120 net/socket.c:639
+ __sys_sendto+0x3d7/0x670 net/socket.c:1789
+ __do_sys_sendto net/socket.c:1801 [inline]
+ __se_sys_sendto net/socket.c:1797 [inline]
+ __x64_sys_sendto+0xe1/0x1a0 net/socket.c:1797
+ do_syscall_64+0x1b1/0x800 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x455979
+RSP: 002b:00007f56a14abc68 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+RAX: ffffffffffffffda RBX: 00007f56a14ac6d4 RCX: 0000000000455979
+RDX: 0000000000000000 RSI: 0000000020000000 RDI: 0000000000000018
+RBP: 000000000072bea0 R08: 00000000200012c0 R09: 0000000000000010
+R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
+R13: 0000000000000548 R14: 00000000006fbf60 R15: 0000000000000000
+Code: 55 c0 e8 c0 55 bb ff ff 75 c8 48 8b 55 c0 4d 89 f9 ff 75 d0 4d 89 e8 48 89 d9 4c 89 e6 41 56 48 c7 c7 80 fa d2 87 e8 a0 0b a3 ff <0f> 0b e8 95 55 bb ff e8 c0 a8 f7 ff 8b 95 14 ff ff ff 4d 89 e8
+RIP: usercopy_abort+0xbb/0xbd mm/usercopy.c:88 RSP: ffff8801868bf800
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/llc/af_llc.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -928,6 +928,9 @@ static int llc_ui_sendmsg(struct kiocb *
+ if (size > llc->dev->mtu)
+ size = llc->dev->mtu;
+ copied = size - hdrlen;
++ rc = -EINVAL;
++ if (copied < 0)
++ goto release;
+ release_sock(sk);
+ skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+ lock_sock(sk);
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Rob Taglang <rob@taglang.io>
+Date: Thu, 3 May 2018 17:13:06 -0400
+Subject: net: ethernet: sun: niu set correct packet size in skb
+
+From: Rob Taglang <rob@taglang.io>
+
+[ Upstream commit 14224923c3600bae2ac4dcae3bf0c3d4dc2812be ]
+
+Currently, skb->len and skb->data_len are set to the page size, not
+the packet size. This causes the frame check sequence to not be
+located at the "end" of the packet resulting in ethernet frame check
+errors. The driver does work currently, but stricter kernel facing
+networking solutions like OpenVSwitch will drop these packets as
+invalid.
+
+These changes set the packet size correctly so that these errors no
+longer occur. The length does not include the frame check sequence, so
+that subtraction was removed.
+
+Tested on Oracle/SUN Multithreaded 10-Gigabit Ethernet Network
+Controller [108e:abcd] and validated in wireshark.
+
+Signed-off-by: Rob Taglang <rob@taglang.io>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sun/niu.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -3443,7 +3443,7 @@ static int niu_process_rx_pkt(struct nap
+
+ len = (val & RCR_ENTRY_L2_LEN) >>
+ RCR_ENTRY_L2_LEN_SHIFT;
+- len -= ETH_FCS_LEN;
++ append_size = len + ETH_HLEN + ETH_FCS_LEN;
+
+ addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+ RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+@@ -3453,7 +3453,6 @@ static int niu_process_rx_pkt(struct nap
+ RCR_ENTRY_PKTBUFSZ_SHIFT];
+
+ off = addr & ~PAGE_MASK;
+- append_size = rcr_size;
+ if (num_rcr == 1) {
+ int ptype;
+
+@@ -3466,7 +3465,7 @@ static int niu_process_rx_pkt(struct nap
+ else
+ skb_checksum_none_assert(skb);
+ } else if (!(val & RCR_ENTRY_MULTI))
+- append_size = len - skb->len;
++ append_size = append_size - skb->len;
+
+ niu_rx_skb_append(skb, page, off, append_size, rcr_size);
+ if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Moshe Shemesh <moshe@mellanox.com>
+Date: Wed, 9 May 2018 18:35:13 +0300
+Subject: net/mlx4_en: Verify coalescing parameters are in range
+
+From: Moshe Shemesh <moshe@mellanox.com>
+
+[ Upstream commit 6ad4e91c6d796b38a7f0e724db1de28eeb122bad ]
+
+Add check of coalescing parameters received through ethtool are within
+range of values supported by the HW.
+Driver gets the coalescing rx/tx-usecs and rx/tx-frames as set by the
+users through ethtool. The ethtool support up to 32 bit value for each.
+However, mlx4 modify cq limits the coalescing time parameter and
+coalescing frames parameters to 16 bits.
+Return out of range error if user tries to set these parameters to
+higher values.
+Change type of sample-interval and adaptive_rx_coal parameters in mlx4
+driver to u32 as the ethtool holds them as u32 and these parameters are
+not limited due to mlx4 HW.
+
+Fixes: c27a02cd94d6 ('mlx4_en: Add driver for Mellanox ConnectX 10GbE NIC')
+Signed-off-by: Moshe Shemesh <moshe@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 16 ++++++++++++++++
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 7 +++++--
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -453,6 +453,22 @@ static int mlx4_en_set_coalesce(struct n
+ if (!coal->tx_max_coalesced_frames_irq)
+ return -EINVAL;
+
++ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
++ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
++ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
++ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
++ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
++ __func__, MLX4_EN_MAX_COAL_TIME);
++ return -ERANGE;
++ }
++
++ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
++ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
++ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
++ __func__, MLX4_EN_MAX_COAL_PKTS);
++ return -ERANGE;
++ }
++
+ priv->rx_frames = (coal->rx_max_coalesced_frames ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TARGET :
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -138,6 +138,9 @@ enum {
+ #define MLX4_EN_TX_COAL_PKTS 16
+ #define MLX4_EN_TX_COAL_TIME 0x10
+
++#define MLX4_EN_MAX_COAL_PKTS U16_MAX
++#define MLX4_EN_MAX_COAL_TIME U16_MAX
++
+ #define MLX4_EN_RX_RATE_LOW 400000
+ #define MLX4_EN_RX_COAL_TIME_LOW 0
+ #define MLX4_EN_RX_RATE_HIGH 450000
+@@ -535,8 +538,8 @@ struct mlx4_en_priv {
+ u16 rx_usecs_low;
+ u32 pkt_rate_high;
+ u16 rx_usecs_high;
+- u16 sample_interval;
+- u16 adaptive_rx_coal;
++ u32 sample_interval;
++ u32 adaptive_rx_coal;
+ u32 msg_enable;
+ u32 loopback_ok;
+ u32 validate_loopback;
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Lance Richardson <lance.richardson.net@gmail.com>
+Date: Wed, 25 Apr 2018 10:21:54 -0400
+Subject: net: support compat 64-bit time in {s,g}etsockopt
+
+From: Lance Richardson <lance.richardson.net@gmail.com>
+
+[ Upstream commit 988bf7243e03ef69238381594e0334a79cef74a6 ]
+
+For the x32 ABI, struct timeval has two 64-bit fields. However
+the kernel currently interprets the user-space values used for
+the SO_RCVTIMEO and SO_SNDTIMEO socket options as having a pair
+of 32-bit fields.
+
+When the seconds portion of the requested timeout is less than 2**32,
+the seconds portion of the effective timeout is correct but the
+microseconds portion is zero. When the seconds portion of the
+requested timeout is zero and the microseconds portion is non-zero,
+the kernel interprets the timeout as zero (never timeout).
+
+Fix by using 64-bit time for SO_RCVTIMEO/SO_SNDTIMEO as required
+for the ABI.
+
+The code included below demonstrates the problem.
+
+Results before patch:
+ $ gcc -m64 -Wall -O2 -o socktmo socktmo.c && ./socktmo
+ recv time: 2.008181 seconds
+ send time: 2.015985 seconds
+
+ $ gcc -m32 -Wall -O2 -o socktmo socktmo.c && ./socktmo
+ recv time: 2.016763 seconds
+ send time: 2.016062 seconds
+
+ $ gcc -mx32 -Wall -O2 -o socktmo socktmo.c && ./socktmo
+ recv time: 1.007239 seconds
+ send time: 1.023890 seconds
+
+Results after patch:
+ $ gcc -m64 -O2 -Wall -o socktmo socktmo.c && ./socktmo
+ recv time: 2.010062 seconds
+ send time: 2.015836 seconds
+
+ $ gcc -m32 -O2 -Wall -o socktmo socktmo.c && ./socktmo
+ recv time: 2.013974 seconds
+ send time: 2.015981 seconds
+
+ $ gcc -mx32 -O2 -Wall -o socktmo socktmo.c && ./socktmo
+ recv time: 2.030257 seconds
+ send time: 2.013383 seconds
+
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <sys/socket.h>
+ #include <sys/types.h>
+ #include <sys/time.h>
+
+ void checkrc(char *str, int rc)
+ {
+ if (rc >= 0)
+ return;
+
+ perror(str);
+ exit(1);
+ }
+
+ static char buf[1024];
+ int main(int argc, char **argv)
+ {
+ int rc;
+ int socks[2];
+ struct timeval tv;
+ struct timeval start, end, delta;
+
+ rc = socketpair(AF_UNIX, SOCK_STREAM, 0, socks);
+ checkrc("socketpair", rc);
+
+ /* set timeout to 1.999999 seconds */
+ tv.tv_sec = 1;
+ tv.tv_usec = 999999;
+ rc = setsockopt(socks[0], SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof tv);
+ rc = setsockopt(socks[0], SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof tv);
+ checkrc("setsockopt", rc);
+
+ /* measure actual receive timeout */
+ gettimeofday(&start, NULL);
+ rc = recv(socks[0], buf, sizeof buf, 0);
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &delta);
+
+ printf("recv time: %ld.%06ld seconds\n",
+ (long)delta.tv_sec, (long)delta.tv_usec);
+
+ /* fill send buffer */
+ do {
+ rc = send(socks[0], buf, sizeof buf, 0);
+ } while (rc > 0);
+
+ /* measure actual send timeout */
+ gettimeofday(&start, NULL);
+ rc = send(socks[0], buf, sizeof buf, 0);
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &delta);
+
+ printf("send time: %ld.%06ld seconds\n",
+ (long)delta.tv_sec, (long)delta.tv_usec);
+ exit(0);
+ }
+
+Fixes: 515c7af85ed9 ("x32: Use compat shims for {g,s}etsockopt")
+Reported-by: Gopal RajagopalSai <gopalsr83@gmail.com>
+Signed-off-by: Lance Richardson <lance.richardson.net@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/compat.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -387,7 +387,8 @@ static int compat_sock_setsockopt(struct
+ if (optname == SO_ATTACH_FILTER)
+ return do_set_attach_filter(sock, level, optname,
+ optval, optlen);
+- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
++ if (!COMPAT_USE_64BIT_TIME &&
++ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
+ return do_set_sock_timeout(sock, level, optname, optval, optlen);
+
+ return sock_setsockopt(sock, level, optname, optval, optlen);
+@@ -452,7 +453,8 @@ static int do_get_sock_timeout(struct so
+ static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+ {
+- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
++ if (!COMPAT_USE_64BIT_TIME &&
++ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
+ return do_get_sock_timeout(sock, level, optname, optval, optlen);
+ return sock_getsockopt(sock, level, optname, optval, optlen);
+ }
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 May 2018 10:03:30 -0700
+Subject: net_sched: fq: take care of throttled flows before reuse
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 7df40c2673a1307c3260aab6f9d4b9bf97ca8fd7 ]
+
+Normally, a socket can not be freed/reused unless all its TX packets
+left qdisc and were TX-completed. However connect(AF_UNSPEC) allows
+this to happen.
+
+With commit fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for
+reused flows") we cleared f->time_next_packet but took no special
+action if the flow was still in the throttled rb-tree.
+
+Since f->time_next_packet is the key used in the rb-tree searches,
+blindly clearing it might break rb-tree integrity. We need to make
+sure the flow is no longer in the rb-tree to avoid this problem.
+
+Fixes: fc59d5bdf1e3 ("pkt_sched: fq: clear time_next_packet for reused flows")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_fq.c | 37 +++++++++++++++++++++++++------------
+ 1 file changed, 25 insertions(+), 12 deletions(-)
+
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -124,6 +124,28 @@ static bool fq_flow_is_detached(const st
+ return f->next == &detached;
+ }
+
++static bool fq_flow_is_throttled(const struct fq_flow *f)
++{
++ return f->next == &throttled;
++}
++
++static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
++{
++ if (head->first)
++ head->last->next = flow;
++ else
++ head->first = flow;
++ head->last = flow;
++ flow->next = NULL;
++}
++
++static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
++{
++ rb_erase(&f->rate_node, &q->delayed);
++ q->throttled_flows--;
++ fq_flow_add_tail(&q->old_flows, f);
++}
++
+ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
+ {
+ struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
+@@ -151,15 +173,6 @@ static void fq_flow_set_throttled(struct
+
+ static struct kmem_cache *fq_flow_cachep __read_mostly;
+
+-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+-{
+- if (head->first)
+- head->last->next = flow;
+- else
+- head->first = flow;
+- head->last = flow;
+- flow->next = NULL;
+-}
+
+ /* limit number of collected flows per round */
+ #define FQ_GC_MAX 8
+@@ -251,6 +264,8 @@ static struct fq_flow *fq_classify(struc
+ f->socket_hash != sk->sk_hash)) {
+ f->credit = q->initial_quantum;
+ f->socket_hash = sk->sk_hash;
++ if (fq_flow_is_throttled(f))
++ fq_flow_unset_throttled(q, f);
+ f->time_next_packet = 0ULL;
+ }
+ return f;
+@@ -405,9 +420,7 @@ static void fq_check_throttled(struct fq
+ q->time_next_delayed_flow = f->time_next_packet;
+ break;
+ }
+- rb_erase(p, &q->delayed);
+- q->throttled_flows--;
+- fq_flow_add_tail(&q->old_flows, f);
++ fq_flow_unset_throttled(q, f);
+ }
+ }
+
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: "Bjørn Mork" <bjorn@mork.no>
+Date: Wed, 2 May 2018 22:22:54 +0200
+Subject: qmi_wwan: do not steal interfaces from class drivers
+
+From: "Bjørn Mork" <bjorn@mork.no>
+
+[ Upstream commit 5697db4a696c41601a1d15c1922150b4dbf5726c ]
+
+The USB_DEVICE_INTERFACE_NUMBER matching macro assumes that
+the { vendorid, productid, interfacenumber } set uniquely
+identifies one specific function. This has proven to fail
+for some configurable devices. One example is the Quectel
+EM06/EP06 where the same interface number can be either
+QMI or MBIM, without the device ID changing either.
+
+Fix by requiring the vendor-specific class for interface number
+based matching. Functions of other classes can and should use
+class based matching instead.
+
+Fixes: 03304bcb5ec4 ("net: qmi_wwan: use fixed interface number matching")
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -866,6 +866,7 @@ static int qmi_wwan_probe(struct usb_int
+ const struct usb_device_id *prod)
+ {
+ struct usb_device_id *id = (struct usb_device_id *)prod;
++ struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
+
+ /* Workaround to enable dynamic IDs. This disables usbnet
+ * blacklisting functionality. Which, if required, can be
+@@ -877,6 +878,18 @@ static int qmi_wwan_probe(struct usb_int
+ id->driver_info = (unsigned long)&qmi_wwan_info;
+ }
+
++ /* There are devices where the same interface number can be
++ * configured as different functions. We should only bind to
++ * vendor specific functions when matching on interface number
++ */
++ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
++ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
++ dev_dbg(&intf->dev,
++ "Rejecting interface number match for class %02x\n",
++ desc->bInterfaceClass);
++ return -ENODEV;
++ }
++
+ return usbnet_probe(intf, id);
+ }
+
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Mon, 7 May 2018 21:11:21 +0200
+Subject: r8169: fix powering up RTL8168h
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 3148dedfe79e422f448a10250d3e2cdf8b7ee617 ]
+
+Since commit a92a08499b1f "r8169: improve runtime pm in general and
+suspend unused ports" interfaces w/o link are runtime-suspended after
+10s. On systems where drivers take longer to load this can lead to the
+situation that the interface is runtime-suspended already when it's
+initially brought up.
+This shouldn't be a problem because rtl_open() resumes MAC/PHY.
+However with at least one chip version the interface doesn't properly
+come up, as reported here:
+https://bugzilla.kernel.org/show_bug.cgi?id=199549
+
+The vendor driver uses a delay to give certain chip versions some
+time to resume before starting the PHY configuration. So let's do
+the same. I don't know which chip versions may be affected,
+therefore apply this delay always.
+
+This patch was reported to fix the issue for RTL8168h.
+I was able to reproduce the issue on an Asus H310I-Plus which also
+uses a RTL8168h. Also in my case the patch fixed the issue.
+
+Reported-by: Slava Kardakov <ojab@ojab.ru>
+Tested-by: Slava Kardakov <ojab@ojab.ru>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -4764,6 +4764,9 @@ static void rtl_pll_power_down(struct rt
+ static void rtl_pll_power_up(struct rtl8169_private *tp)
+ {
+ rtl_generic_op(tp, tp->pll_power_ops.up);
++
++ /* give MAC/PHY some time to resume */
++ msleep(20);
+ }
+
+ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
--- /dev/null
+From foo@baz Wed May 16 12:08:47 CEST 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sat, 5 May 2018 14:59:47 +0800
+Subject: sctp: delay the authentication for the duplicated cookie-echo chunk
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 59d8d4434f429b4fa8a346fd889058bda427a837 ]
+
+Now sctp only delays the authentication for the normal cookie-echo
+chunk by setting chunk->auth_chunk in sctp_endpoint_bh_rcv(). But
+for the duplicated one with auth, in sctp_assoc_bh_rcv(), it does
+authentication first based on the old asoc, which will definitely
+fail due to the different auth info in the old asoc.
+
+The duplicated cookie-echo chunk will create a new asoc with the
+auth info from this chunk, and the authentication should also be
+done with the new asoc's auth info for all of the collision 'A',
+'B' and 'D'. Otherwise, the duplicated cookie-echo chunk with auth
+will never pass the authentication and create the new connection.
+
+This issue exists since very beginning, and this fix is to make
+sctp_assoc_bh_rcv() follow the way sctp_endpoint_bh_rcv() does
+for the normal cookie-echo chunk to delay the authentication.
+
+While at it, remove the unused params from sctp_sf_authenticate()
+and define sctp_auth_chunk_verify() used for all the places that
+do the delayed authentication.
+
+v1->v2:
+ fix the typo in changelog as Marcelo noticed.
+
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/associola.c | 30 ++++++++++++++++
+ net/sctp/sm_statefuns.c | 87 ++++++++++++++++++++++++++----------------------
+ 2 files changed, 77 insertions(+), 40 deletions(-)
+
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1001,9 +1001,10 @@ static void sctp_assoc_bh_rcv(struct wor
+ struct sctp_endpoint *ep;
+ struct sctp_chunk *chunk;
+ struct sctp_inq *inqueue;
+- int state;
+ sctp_subtype_t subtype;
++ int first_time = 1; /* is this the first time through the loop */
+ int error = 0;
++ int state;
+
+ /* The association should be held so we should be safe. */
+ ep = asoc->ep;
+@@ -1014,6 +1015,30 @@ static void sctp_assoc_bh_rcv(struct wor
+ state = asoc->state;
+ subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+
++ /* If the first chunk in the packet is AUTH, do special
++ * processing specified in Section 6.3 of SCTP-AUTH spec
++ */
++ if (first_time && subtype.chunk == SCTP_CID_AUTH) {
++ struct sctp_chunkhdr *next_hdr;
++
++ next_hdr = sctp_inq_peek(inqueue);
++ if (!next_hdr)
++ goto normal;
++
++ /* If the next chunk is COOKIE-ECHO, skip the AUTH
++ * chunk while saving a pointer to it so we can do
++ * Authentication later (during cookie-echo
++ * processing).
++ */
++ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
++ chunk->auth_chunk = skb_clone(chunk->skb,
++ GFP_ATOMIC);
++ chunk->auth = 1;
++ continue;
++ }
++ }
++
++normal:
+ /* SCTP-AUTH, Section 6.3:
+ * The receiver has a list of chunk types which it expects
+ * to be received only after an AUTH-chunk. This list has
+@@ -1052,6 +1077,9 @@ static void sctp_assoc_bh_rcv(struct wor
+ /* If there is an error on chunk, discard this packet. */
+ if (error && chunk)
+ chunk->pdiscard = 1;
++
++ if (first_time)
++ first_time = 0;
+ }
+ sctp_association_put(asoc);
+ }
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -144,10 +144,8 @@ static sctp_disposition_t sctp_sf_violat
+ void *arg,
+ sctp_cmd_seq_t *commands);
+
+-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+- const struct sctp_endpoint *ep,
++static sctp_ierror_t sctp_sf_authenticate(
+ const struct sctp_association *asoc,
+- const sctp_subtype_t type,
+ struct sctp_chunk *chunk);
+
+ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+@@ -615,6 +613,38 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(s
+ return SCTP_DISPOSITION_CONSUME;
+ }
+
++static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
++ const struct sctp_association *asoc)
++{
++ struct sctp_chunk auth;
++
++ if (!chunk->auth_chunk)
++ return true;
++
++ /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
++ * is supposed to be authenticated and we have to do delayed
++ * authentication. We've just recreated the association using
++ * the information in the cookie and now it's much easier to
++ * do the authentication.
++ */
++
++ /* Make sure that we and the peer are AUTH capable */
++ if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
++ return false;
++
++ /* set-up our fake chunk so that we can process it */
++ auth.skb = chunk->auth_chunk;
++ auth.asoc = chunk->asoc;
++ auth.sctp_hdr = chunk->sctp_hdr;
++ auth.chunk_hdr = (struct sctp_chunkhdr *)
++ skb_push(chunk->auth_chunk,
++ sizeof(struct sctp_chunkhdr));
++ skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
++ auth.transport = chunk->transport;
++
++ return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
++}
++
+ /*
+ * Respond to a normal COOKIE ECHO chunk.
+ * We are the side that is being asked for an association.
+@@ -751,36 +781,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(st
+ if (error)
+ goto nomem_init;
+
+- /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
+- * is supposed to be authenticated and we have to do delayed
+- * authentication. We've just recreated the association using
+- * the information in the cookie and now it's much easier to
+- * do the authentication.
+- */
+- if (chunk->auth_chunk) {
+- struct sctp_chunk auth;
+- sctp_ierror_t ret;
+-
+- /* Make sure that we and the peer are AUTH capable */
+- if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+- sctp_association_free(new_asoc);
+- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+- }
+-
+- /* set-up our fake chunk so that we can process it */
+- auth.skb = chunk->auth_chunk;
+- auth.asoc = chunk->asoc;
+- auth.sctp_hdr = chunk->sctp_hdr;
+- auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
+- sizeof(sctp_chunkhdr_t));
+- skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
+- auth.transport = chunk->transport;
+-
+- ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
+- if (ret != SCTP_IERROR_NO_ERROR) {
+- sctp_association_free(new_asoc);
+- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+- }
++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
++ sctp_association_free(new_asoc);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ repl = sctp_make_cookie_ack(new_asoc, chunk);
+@@ -1717,13 +1720,15 @@ static sctp_disposition_t sctp_sf_do_dup
+ GFP_ATOMIC))
+ goto nomem;
+
++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
++ return SCTP_DISPOSITION_DISCARD;
++
+ /* Make sure no new addresses are being added during the
+ * restart. Though this is a pretty complicated attack
+ * since you'd have to get inside the cookie.
+ */
+- if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
++ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
+ return SCTP_DISPOSITION_CONSUME;
+- }
+
+ /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
+ * the peer has restarted (Action A), it MUST NOT setup a new
+@@ -1828,6 +1833,9 @@ static sctp_disposition_t sctp_sf_do_dup
+ GFP_ATOMIC))
+ goto nomem;
+
++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
++ return SCTP_DISPOSITION_DISCARD;
++
+ /* Update the content of current association. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+@@ -1920,6 +1928,9 @@ static sctp_disposition_t sctp_sf_do_dup
+ * a COOKIE ACK.
+ */
+
++ if (!sctp_auth_chunk_verify(net, chunk, asoc))
++ return SCTP_DISPOSITION_DISCARD;
++
+ /* Don't accidentally move back into established state. */
+ if (asoc->state < SCTP_STATE_ESTABLISHED) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+@@ -3985,10 +3996,8 @@ gen_shutdown:
+ *
+ * The return value is the disposition of the chunk.
+ */
+-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+- const struct sctp_endpoint *ep,
++static sctp_ierror_t sctp_sf_authenticate(
+ const struct sctp_association *asoc,
+- const sctp_subtype_t type,
+ struct sctp_chunk *chunk)
+ {
+ struct sctp_authhdr *auth_hdr;
+@@ -4087,7 +4096,7 @@ sctp_disposition_t sctp_sf_eat_auth(stru
+ commands);
+
+ auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
+- error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
++ error = sctp_sf_authenticate(asoc, chunk);
+ switch (error) {
+ case SCTP_IERROR_AUTH_BAD_HMAC:
+ /* Generate the ERROR chunk and discard the rest
--- /dev/null
+From foo@baz Wed May 16 12:08:47 CEST 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Wed, 2 May 2018 13:45:12 +0800
+Subject: sctp: fix the issue that the cookie-ack with auth can't get processed
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit ce402f044e4e432c296f90eaabb8dbe8f3624391 ]
+
+When auth is enabled for cookie-ack chunk, in sctp_inq_pop, sctp
+processes auth chunk first, then continues to the next chunk in
+this packet if chunk_end + chunk_hdr size < skb_tail_pointer().
+Otherwise, it will go to the next packet or discard this chunk.
+
+However, it missed the fact that cookie-ack chunk's size is equal
+to chunk_hdr size, which couldn't match that check, and thus this
+chunk would not get processed.
+
+This patch fixes it by changing the check to chunk_end + chunk_hdr
+size <= skb_tail_pointer().
+
+Fixes: 26b87c788100 ("net: sctp: fix remote memory pressure from excessive queueing")
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/inqueue.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -178,7 +178,7 @@ struct sctp_chunk *sctp_inq_pop(struct s
+ skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
+ chunk->subh.v = NULL; /* Subheader is no longer valid. */
+
+- if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
++ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
+ skb_tail_pointer(chunk->skb)) {
+ /* This is not a singleton */
+ chunk->singleton = 0;
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Wed, 2 May 2018 13:39:46 +0800
+Subject: sctp: use the old asoc when making the cookie-ack chunk in dupcook_d
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 46e16d4b956867013e0bbd7f2bad206f4aa55752 ]
+
+When processing a duplicate cookie-echo chunk, for case 'D', sctp will
+not process the param from this chunk. It means old asoc has nothing
+to be updated, and the new temp asoc doesn't have the complete info.
+
+So there's no reason to use the new asoc when creating the cookie-ack
+chunk. Otherwise, like when auth is enabled for cookie-ack, the chunk
+can not be set with auth, and it will definitely be dropped by peer.
+
+This issue is there since very beginning, and we fix it by using the
+old asoc instead.
+
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/sm_statefuns.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -1959,7 +1959,7 @@ static sctp_disposition_t sctp_sf_do_dup
+ }
+ }
+
+- repl = sctp_make_cookie_ack(new_asoc, chunk);
++ repl = sctp_make_cookie_ack(asoc, chunk);
+ if (!repl)
+ goto nomem;
+
--- /dev/null
+8139too-use-disable_irq_nosync-in-rtl8139_poll_controller.patch
+bridge-check-iface-upper-dev-when-setting-master-via-ioctl.patch
+dccp-fix-tasklet-usage.patch
+ipv4-fix-memory-leaks-in-udp_sendmsg-ping_v4_sendmsg.patch
+llc-better-deal-with-too-small-mtu.patch
+net-ethernet-sun-niu-set-correct-packet-size-in-skb.patch
+net-mlx4_en-verify-coalescing-parameters-are-in-range.patch
+net_sched-fq-take-care-of-throttled-flows-before-reuse.patch
+net-support-compat-64-bit-time-in-s-g-etsockopt.patch
+r8169-fix-powering-up-rtl8168h.patch
+sctp-use-the-old-asoc-when-making-the-cookie-ack-chunk-in-dupcook_d.patch
+tg3-fix-vunmap-bug_on-triggered-from-tg3_free_consistent.patch
+bonding-do-not-allow-rlb-updates-to-invalid-mac.patch
+tcp-ignore-fast-open-on-repair-mode.patch
+sctp-fix-the-issue-that-the-cookie-ack-with-auth-can-t-get-processed.patch
+sctp-delay-the-authentication-for-the-duplicated-cookie-echo-chunk.patch
+qmi_wwan-do-not-steal-interfaces-from-class-drivers.patch
--- /dev/null
+From foo@baz Wed May 16 12:08:47 CEST 2018
+From: Yuchung Cheng <ycheng@google.com>
+Date: Wed, 25 Apr 2018 11:33:08 -0700
+Subject: tcp: ignore Fast Open on repair mode
+
+From: Yuchung Cheng <ycheng@google.com>
+
+[ Upstream commit 16ae6aa1705299789f71fdea59bfb119c1fbd9c0 ]
+
+The TCP repair sequence of operation is to first set the socket in
+repair mode, then inject the TCP stats into the socket with repair
+socket options, then call connect() to re-activate the socket. The
+connect syscall simply returns and set state to ESTABLISHED
+mode. As a result Fast Open is meaningless for TCP repair.
+
+However allowing sendto() system call with MSG_FASTOPEN flag half-way
+during the repair operation could unexpectedly cause data to be
+sent, before the operation finishes changing the internal TCP stats
+(e.g. MSS). This in turn triggers TCP warnings on inconsistent
+packet accounting.
+
+The fix is to simply disallow Fast Open operation once the socket
+is in the repair mode.
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Reviewed-by: Neal Cardwell <ncardwell@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1105,7 +1105,7 @@ int tcp_sendmsg(struct kiocb *iocb, stru
+ lock_sock(sk);
+
+ flags = msg->msg_flags;
+- if (flags & MSG_FASTOPEN) {
++ if ((flags & MSG_FASTOPEN) && !tp->repair) {
+ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
+ if (err == -EINPROGRESS && copied_syn > 0)
+ goto out;
--- /dev/null
+From foo@baz Wed May 16 17:26:10 CEST 2018
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 3 May 2018 20:04:27 -0400
+Subject: tg3: Fix vunmap() BUG_ON() triggered from tg3_free_consistent().
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit d89a2adb8bfe6f8949ff389acdb9fa298b6e8e12 ]
+
+tg3_free_consistent() calls dma_free_coherent() to free tp->hw_stats
+under spinlock and can trigger BUG_ON() in vunmap() because vunmap()
+may sleep. Fix it by removing the spinlock and relying on the
+TG3_FLAG_INIT_COMPLETE flag to prevent race conditions between
+tg3_get_stats64() and tg3_free_consistent(). TG3_FLAG_INIT_COMPLETE
+is always cleared under tp->lock before tg3_free_consistent()
+and therefore tg3_get_stats64() can safely access tp->hw_stats
+under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+
+Fixes: f5992b72ebe0 ("tg3: Fix race condition in tg3_get_stats64().")
+Reported-by: Zumeng Chen <zumeng.chen@gmail.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -8705,14 +8705,15 @@ static void tg3_free_consistent(struct t
+ tg3_mem_rx_release(tp);
+ tg3_mem_tx_release(tp);
+
+- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+- tg3_full_lock(tp, 0);
++ /* tp->hw_stats can be referenced safely:
++ * 1. under rtnl_lock
++ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
++ */
+ if (tp->hw_stats) {
+ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+ tp->hw_stats, tp->stats_mapping);
+ tp->hw_stats = NULL;
+ }
+- tg3_full_unlock(tp);
+ }
+
+ /*
+@@ -14137,7 +14138,7 @@ static struct rtnl_link_stats64 *tg3_get
+ struct tg3 *tp = netdev_priv(dev);
+
+ spin_lock_bh(&tp->lock);
+- if (!tp->hw_stats) {
++ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
+ *stats = tp->net_stats_prev;
+ spin_unlock_bh(&tp->lock);
+ return stats;