--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 19 Mar 2019 05:46:18 -0700
+Subject: dccp: do not use ipv6 header for ipv4 flow
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit e0aa67709f89d08c8d8e5bdd9e0b649df61d0090 ]
+
+When a dual stack dccp listener accepts an ipv4 flow,
+it should not attempt to use an ipv6 header or
+inet6_iif() helper.
+
+Fixes: 3df80d9320bc ("[DCCP]: Introduce DCCPv6")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/ipv6.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -433,8 +433,8 @@ static struct sock *dccp_v6_request_recv
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
+- newnp->mcast_oif = inet6_iif(skb);
+- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
++ newnp->mcast_oif = inet_iif(skb);
++ newnp->mcast_hops = ip_hdr(skb)->ttl;
+
+ /*
+ * No need to charge this sock to the relevant IPv6 refcnt debug socks count
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 21 Mar 2019 15:02:50 +0800
+Subject: genetlink: Fix a memory leak on error path
+
+From: YueHaibing <yuehaibing@huawei.com>
+
+[ Upstream commit ceabee6c59943bdd5e1da1a6a20dc7ee5f8113a2 ]
+
+In genl_register_family(), when idr_alloc() fails,
+we forget to free the memory we possibly allocate for
+family->attrbuf.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: 2ae0f17df1cd ("genetlink: use idr to track families")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/genetlink.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -365,7 +365,7 @@ int genl_register_family(struct genl_fam
+ start, end + 1, GFP_KERNEL);
+ if (family->id < 0) {
+ err = family->id;
+- goto errout_locked;
++ goto errout_free;
+ }
+
+ err = genl_validate_assign_mc_groups(family);
+@@ -384,6 +384,7 @@ int genl_register_family(struct genl_fam
+
+ errout_remove:
+ idr_remove(&genl_fam_idr, family->id);
++errout_free:
+ kfree(family->attrbuf);
+ errout_locked:
+ genl_unlock_all();
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Sat, 16 Mar 2019 14:21:19 +1100
+Subject: mac8390: Fix mmio access size probe
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+[ Upstream commit bb9e5c5bcd76f4474eac3baf643d7a39f7bac7bb ]
+
+The bug that Stan reported is as follows. After a restart, a 16-bit NIC
+may be incorrectly identified as a 32-bit NIC and stop working.
+
+mac8390 slot.E: Memory length resource not found, probing
+mac8390 slot.E: Farallon EtherMac II-C (type farallon)
+mac8390 slot.E: MAC 00:00:c5:30:c2:99, IRQ 61, 32 KB shared memory at 0xfeed0000, 32-bit access.
+
+The bug never arises after a cold start and only intermittently after a
+warm start. (I didn't investigate why the bug is intermittent.)
+
+It turns out that memcpy_toio() is deprecated and memcmp_withio() also
+has issues. Replacing these calls with mmio accessors fixes the problem.
+
+Reported-and-tested-by: Stan Johnson <userm57@yahoo.com>
+Fixes: 2964db0f5904 ("m68k: Mac DP8390 update")
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/8390/mac8390.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/8390/mac8390.c
++++ b/drivers/net/ethernet/8390/mac8390.c
+@@ -156,8 +156,6 @@ static void dayna_block_output(struct ne
+ #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+ #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
+
+-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
+-
+ /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
+ static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+@@ -237,19 +235,26 @@ static enum mac8390_type __init mac8390_
+
+ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
+ {
+- unsigned long outdata = 0xA5A0B5B0;
+- unsigned long indata = 0x00000000;
++ u32 outdata = 0xA5A0B5B0;
++ u32 indata = 0;
++
+ /* Try writing 32 bits */
+- memcpy_toio(membase, &outdata, 4);
+- /* Now compare them */
+- if (memcmp_withio(&outdata, membase, 4) == 0)
++ nubus_writel(outdata, membase);
++ /* Now read it back */
++ indata = nubus_readl(membase);
++ if (outdata == indata)
+ return ACCESS_32;
++
++ outdata = 0xC5C0D5D0;
++ indata = 0;
++
+ /* Write 16 bit output */
+ word_memcpy_tocard(membase, &outdata, 4);
+ /* Now read it back */
+ word_memcpy_fromcard(&indata, membase, 4);
+ if (outdata == indata)
+ return ACCESS_16;
++
+ return ACCESS_UNKNOWN;
+ }
+
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Bjorn Helgaas <bhelgaas@google.com>
+Date: Mon, 18 Mar 2019 08:51:06 -0500
+Subject: mISDN: hfcpci: Test both vendor & device ID for Digium HFC4S
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit fae846e2b7124d4b076ef17791c73addf3b26350 ]
+
+The device ID alone does not uniquely identify a device. Test both the
+vendor and device ID to make sure we don't mistakenly think some other
+vendor's 0xB410 device is a Digium HFC4S. Also, instead of the bare hex
+ID, use the same constant (PCI_DEVICE_ID_DIGIUM_HFC4S) used in the device
+ID table.
+
+No functional change intended.
+
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/isdn/hardware/mISDN/hfcmulti.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
+@@ -4367,7 +4367,8 @@ setup_pci(struct hfc_multi *hc, struct p
+ if (m->clock2)
+ test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
+
+- if (ent->device == 0xB410) {
++ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
++ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
+ test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
+ test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
+ test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Dmitry Bogdanov <dmitry.bogdanov@aquantia.com>
+Date: Sat, 16 Mar 2019 08:28:18 +0000
+Subject: net: aquantia: fix rx checksum offload for UDP/TCP over IPv6
+
+From: Dmitry Bogdanov <dmitry.bogdanov@aquantia.com>
+
+[ Upstream commit a7faaa0c5dc7d091cc9f72b870d7edcdd6f43f12 ]
+
+TCP/UDP checksum validity was propagated to skb
+only if IP checksum is valid.
+But for IPv6 there is no validity as there is no checksum in IPv6.
+This patch propagates TCP/UDP checksum validity regardless of IP checksum.
+
+Fixes: 018423e90bee ("net: ethernet: aquantia: Add ring support code")
+Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
+Signed-off-by: Nikita Danilov <nikita.danilov@aquantia.com>
+Signed-off-by: Dmitry Bogdanov <dmitry.bogdanov@aquantia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -270,11 +270,12 @@ int aq_ring_rx_clean(struct aq_ring_s *s
+ } else {
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+- if (buff->is_udp_cso || buff->is_tcp_cso)
+- __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
++
++ if (buff->is_udp_cso || buff->is_tcp_cso)
++ __skb_incr_checksum_unnecessary(skb);
+ }
+
+ skb_set_hash(skb, buff->rss_hash,
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Mon, 25 Mar 2019 14:18:06 +0100
+Subject: net: datagram: fix unbounded loop in __skb_try_recv_datagram()
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 0b91bce1ebfc797ff3de60c8f4a1e6219a8a3187 ]
+
+Christoph reported a stall while peeking datagram with an offset when
+busy polling is enabled. __skb_try_recv_datagram() uses as the loop
+termination condition 'queue empty'. When peeking, the socket
+queue can be not empty, even when no additional packets are received.
+
+Address the issue explicitly checking for receive queue changes,
+as currently done by __skb_wait_for_more_packets().
+
+Fixes: 2b5cd0dfa384 ("net: Change return type of sk_busy_loop from bool to void")
+Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/datagram.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -281,7 +281,7 @@ struct sk_buff *__skb_try_recv_datagram(
+ break;
+
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
+- } while (!skb_queue_empty(&sk->sk_receive_queue));
++ } while (sk->sk_receive_queue.prev != *last);
+
+ error = -EAGAIN;
+
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Christoph Paasch <cpaasch@apple.com>
+Date: Mon, 18 Mar 2019 23:14:52 -0700
+Subject: net/packet: Set __GFP_NOWARN upon allocation in alloc_pg_vec
+
+From: Christoph Paasch <cpaasch@apple.com>
+
+[ Upstream commit 398f0132c14754fcd03c1c4f8e7176d001ce8ea1 ]
+
+Since commit fc62814d690c ("net/packet: fix 4gb buffer limit due to overflow check")
+one can now allocate packet ring buffers >= UINT_MAX. However, syzkaller
+found that that triggers a warning:
+
+[ 21.100000] WARNING: CPU: 2 PID: 2075 at mm/page_alloc.c:4584 __alloc_pages_nod0
+[ 21.101490] Modules linked in:
+[ 21.101921] CPU: 2 PID: 2075 Comm: syz-executor.0 Not tainted 5.0.0 #146
+[ 21.102784] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 0.5.1 01/01/2011
+[ 21.103887] RIP: 0010:__alloc_pages_nodemask+0x2a0/0x630
+[ 21.104640] Code: fe ff ff 65 48 8b 04 25 c0 de 01 00 48 05 90 0f 00 00 41 bd 01 00 00 00 48 89 44 24 48 e9 9c fe 3
+[ 21.107121] RSP: 0018:ffff88805e1cf920 EFLAGS: 00010246
+[ 21.107819] RAX: 0000000000000000 RBX: ffffffff85a488a0 RCX: 0000000000000000
+[ 21.108753] RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000000
+[ 21.109699] RBP: 1ffff1100bc39f28 R08: ffffed100bcefb67 R09: ffffed100bcefb67
+[ 21.110646] R10: 0000000000000001 R11: ffffed100bcefb66 R12: 000000000000000d
+[ 21.111623] R13: 0000000000000000 R14: ffff88805e77d888 R15: 000000000000000d
+[ 21.112552] FS: 00007f7c7de05700(0000) GS:ffff88806d100000(0000) knlGS:0000000000000000
+[ 21.113612] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 21.114405] CR2: 000000000065c000 CR3: 000000005e58e006 CR4: 00000000001606e0
+[ 21.115367] Call Trace:
+[ 21.115705] ? __alloc_pages_slowpath+0x21c0/0x21c0
+[ 21.116362] alloc_pages_current+0xac/0x1e0
+[ 21.116923] kmalloc_order+0x18/0x70
+[ 21.117393] kmalloc_order_trace+0x18/0x110
+[ 21.117949] packet_set_ring+0x9d5/0x1770
+[ 21.118524] ? packet_rcv_spkt+0x440/0x440
+[ 21.119094] ? lock_downgrade+0x620/0x620
+[ 21.119646] ? __might_fault+0x177/0x1b0
+[ 21.120177] packet_setsockopt+0x981/0x2940
+[ 21.120753] ? __fget+0x2fb/0x4b0
+[ 21.121209] ? packet_release+0xab0/0xab0
+[ 21.121740] ? sock_has_perm+0x1cd/0x260
+[ 21.122297] ? selinux_secmark_relabel_packet+0xd0/0xd0
+[ 21.123013] ? __fget+0x324/0x4b0
+[ 21.123451] ? selinux_netlbl_socket_setsockopt+0x101/0x320
+[ 21.124186] ? selinux_netlbl_sock_rcv_skb+0x3a0/0x3a0
+[ 21.124908] ? __lock_acquire+0x529/0x3200
+[ 21.125453] ? selinux_socket_setsockopt+0x5d/0x70
+[ 21.126075] ? __sys_setsockopt+0x131/0x210
+[ 21.126533] ? packet_release+0xab0/0xab0
+[ 21.127004] __sys_setsockopt+0x131/0x210
+[ 21.127449] ? kernel_accept+0x2f0/0x2f0
+[ 21.127911] ? ret_from_fork+0x8/0x50
+[ 21.128313] ? do_raw_spin_lock+0x11b/0x280
+[ 21.128800] __x64_sys_setsockopt+0xba/0x150
+[ 21.129271] ? lockdep_hardirqs_on+0x37f/0x560
+[ 21.129769] do_syscall_64+0x9f/0x450
+[ 21.130182] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+We should allocate with __GFP_NOWARN to handle this.
+
+Cc: Kal Conley <kal.conley@dectris.com>
+Cc: Andrey Konovalov <andreyknvl@google.com>
+Fixes: fc62814d690c ("net/packet: fix 4gb buffer limit due to overflow check")
+Signed-off-by: Christoph Paasch <cpaasch@apple.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4232,7 +4232,7 @@ static struct pgv *alloc_pg_vec(struct t
+ struct pgv *pg_vec;
+ int i;
+
+- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
++ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
+ if (unlikely(!pg_vec))
+ goto out;
+
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 15 Mar 2019 10:41:14 -0700
+Subject: net: rose: fix a possible stack overflow
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit e5dcc0c3223c45c94100f05f28d8ef814db3d82c ]
+
+rose_write_internal() uses a temp buffer of 100 bytes, but a manual
+inspection showed that given arbitrary input, rose_create_facilities()
+can fill up to 110 bytes.
+
+Lets use a tailroom of 256 bytes for peace of mind, and remove
+the bounce buffer : we can simply allocate a big enough skb
+and adjust its length as needed.
+
+syzbot report :
+
+BUG: KASAN: stack-out-of-bounds in memcpy include/linux/string.h:352 [inline]
+BUG: KASAN: stack-out-of-bounds in rose_create_facilities net/rose/rose_subr.c:521 [inline]
+BUG: KASAN: stack-out-of-bounds in rose_write_internal+0x597/0x15d0 net/rose/rose_subr.c:116
+Write of size 7 at addr ffff88808b1ffbef by task syz-executor.0/24854
+
+CPU: 0 PID: 24854 Comm: syz-executor.0 Not tainted 5.0.0+ #97
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x172/0x1f0 lib/dump_stack.c:113
+ print_address_description.cold+0x7c/0x20d mm/kasan/report.c:187
+ kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
+ check_memory_region_inline mm/kasan/generic.c:185 [inline]
+ check_memory_region+0x123/0x190 mm/kasan/generic.c:191
+ memcpy+0x38/0x50 mm/kasan/common.c:131
+ memcpy include/linux/string.h:352 [inline]
+ rose_create_facilities net/rose/rose_subr.c:521 [inline]
+ rose_write_internal+0x597/0x15d0 net/rose/rose_subr.c:116
+ rose_connect+0x7cb/0x1510 net/rose/af_rose.c:826
+ __sys_connect+0x266/0x330 net/socket.c:1685
+ __do_sys_connect net/socket.c:1696 [inline]
+ __se_sys_connect net/socket.c:1693 [inline]
+ __x64_sys_connect+0x73/0xb0 net/socket.c:1693
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x458079
+Code: ad b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007f47b8d9dc78 EFLAGS: 00000246 ORIG_RAX: 000000000000002a
+RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000458079
+RDX: 000000000000001c RSI: 0000000020000040 RDI: 0000000000000004
+RBP: 000000000073bf00 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007f47b8d9e6d4
+R13: 00000000004be4a4 R14: 00000000004ceca8 R15: 00000000ffffffff
+
+The buggy address belongs to the page:
+page:ffffea00022c7fc0 count:0 mapcount:0 mapping:0000000000000000 index:0x0
+flags: 0x1fffc0000000000()
+raw: 01fffc0000000000 0000000000000000 ffffffff022c0101 0000000000000000
+raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff88808b1ffa80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff88808b1ffb00: 00 00 00 00 00 00 00 00 f1 f1 f1 f1 00 00 00 03
+>ffff88808b1ffb80: f2 f2 00 00 00 00 00 00 00 00 00 00 00 00 04 f3
+ ^
+ ffff88808b1ffc00: f3 f3 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff88808b1ffc80: 00 00 00 00 00 00 00 f1 f1 f1 f1 f1 f1 01 f2 01
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rose/rose_subr.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/net/rose/rose_subr.c
++++ b/net/rose/rose_subr.c
+@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk
+ struct sk_buff *skb;
+ unsigned char *dptr;
+ unsigned char lci1, lci2;
+- char buffer[100];
+- int len, faclen = 0;
++ int maxfaclen = 0;
++ int len, faclen;
++ int reserve;
+
+- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
++ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
++ len = ROSE_MIN_LEN;
+
+ switch (frametype) {
+ case ROSE_CALL_REQUEST:
+ len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
+- faclen = rose_create_facilities(buffer, rose);
+- len += faclen;
++ maxfaclen = 256;
+ break;
+ case ROSE_CALL_ACCEPTED:
+ case ROSE_CLEAR_REQUEST:
+@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk
+ break;
+ }
+
+- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
++ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
++ if (!skb)
+ return;
+
+ /*
+ * Space for AX.25 header and PID.
+ */
+- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
++ skb_reserve(skb, reserve);
+
+- dptr = skb_put(skb, skb_tailroom(skb));
++ dptr = skb_put(skb, len);
+
+ lci1 = (rose->lci >> 8) & 0x0F;
+ lci2 = (rose->lci >> 0) & 0xFF;
+@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk
+ dptr += ROSE_ADDR_LEN;
+ memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
+ dptr += ROSE_ADDR_LEN;
+- memcpy(dptr, buffer, faclen);
++ faclen = rose_create_facilities(dptr, rose);
++ skb_put(skb, faclen);
+ dptr += faclen;
+ break;
+
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Aaro Koskinen <aaro.koskinen@nokia.com>
+Date: Mon, 18 Mar 2019 23:36:08 +0200
+Subject: net: stmmac: fix memory corruption with large MTUs
+
+From: Aaro Koskinen <aaro.koskinen@nokia.com>
+
+[ Upstream commit 223a960c01227e4dbcb6f9fa06b47d73bda21274 ]
+
+When using 16K DMA buffers and ring mode, the DES3 refill is not working
+correctly as the function is using a bogus pointer for checking the
+private data. As a result stale pointers will remain in the RX descriptor
+ring, so DMA will now likely overwrite/corrupt some already freed memory.
+
+As simple reproducer, just receive some UDP traffic:
+
+ # ifconfig eth0 down; ifconfig eth0 mtu 9000; ifconfig eth0 up
+ # iperf3 -c 192.168.253.40 -u -b 0 -R
+
+If you didn't crash by now check the RX descriptors to find non-contiguous
+RX buffers:
+
+ cat /sys/kernel/debug/stmmaceth/eth0/descriptors_status
+ [...]
+ 1 [0x2be5020]: 0xa3220321 0x9ffc1ffc 0x72d70082 0x130e207e
+ ^^^^^^^^^^^^^^^^^^^^^
+ 2 [0x2be5040]: 0xa3220321 0x9ffc1ffc 0x72998082 0x1311a07e
+ ^^^^^^^^^^^^^^^^^^^^^
+
+A simple ping test will now report bad data:
+
+ # ping -s 8200 192.168.253.40
+ PING 192.168.253.40 (192.168.253.40) 8200(8228) bytes of data.
+ 8208 bytes from 192.168.253.40: icmp_seq=1 ttl=64 time=1.00 ms
+ wrong data byte #8144 should be 0xd0 but was 0x88
+
+Fix the wrong pointer. Also we must refill DES3 only if the DMA buffer
+size is 16K.
+
+Fixes: 54139cf3bb33 ("net: stmmac: adding multiple buffers for rx")
+Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
+Acked-by: Jose Abreu <joabreu@synopsys.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -114,10 +114,11 @@ static unsigned int stmmac_is_jumbo_frm(
+
+ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+ {
+- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
++ struct stmmac_rx_queue *rx_q = priv_ptr;
++ struct stmmac_priv *priv = rx_q->priv_data;
+
+ /* Fill DES3 in case of RING mode */
+- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
++ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
+ }
+
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Tue, 19 Mar 2019 10:16:53 +0800
+Subject: net-sysfs: call dev_hold if kobject_init_and_add success
+
+From: YueHaibing <yuehaibing@huawei.com>
+
+[ Upstream commit a3e23f719f5c4a38ffb3d30c8d7632a4ed8ccd9e ]
+
+In netdev_queue_add_kobject and rx_queue_add_kobject,
+if sysfs_create_group failed, kobject_put will call
+netdev_queue_release to decrease dev refcont, however
+dev_hold has not be called. So we will see this while
+unregistering dev:
+
+unregister_netdevice: waiting for bcsh0 to become free. Usage count = -1
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: d0d668371679 ("net: don't decrement kobj reference count on init failure")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/net-sysfs.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -917,6 +917,8 @@ static int rx_queue_add_kobject(struct n
+ if (error)
+ return error;
+
++ dev_hold(queue->dev);
++
+ if (dev->sysfs_rx_queue_group) {
+ error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
+ if (error) {
+@@ -926,7 +928,6 @@ static int rx_queue_add_kobject(struct n
+ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+- dev_hold(queue->dev);
+
+ return error;
+ }
+@@ -1327,6 +1328,8 @@ static int netdev_queue_add_kobject(stru
+ if (error)
+ return error;
+
++ dev_hold(queue->dev);
++
+ #ifdef CONFIG_BQL
+ error = sysfs_create_group(kobj, &dql_group);
+ if (error) {
+@@ -1336,7 +1339,6 @@ static int netdev_queue_add_kobject(stru
+ #endif
+
+ kobject_uevent(kobj, KOBJ_ADD);
+- dev_hold(queue->dev);
+
+ return 0;
+ }
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Wang Hai <wanghai26@huawei.com>
+Date: Wed, 20 Mar 2019 14:25:05 -0400
+Subject: net-sysfs: Fix memory leak in netdev_register_kobject
+
+From: Wang Hai <wanghai26@huawei.com>
+
+[ Upstream commit 6b70fc94afd165342876e53fc4b2f7d085009945 ]
+
+When registering struct net_device, it will call
+ register_netdevice ->
+ netdev_register_kobject ->
+ device_initialize(dev);
+ dev_set_name(dev, "%s", ndev->name)
+ device_add(dev)
+ register_queue_kobjects(ndev)
+
+In netdev_register_kobject(), if device_add(dev) or
+register_queue_kobjects(ndev) failed. Register_netdevice()
+will return error, causing netdev_freemem(ndev) to be
+called to free net_device, however put_device(&dev->dev)->..->
+kobject_cleanup() won't be called, resulting in a memory leak.
+
+syzkaller report this:
+BUG: memory leak
+unreferenced object 0xffff8881f4fad168 (size 8):
+comm "syz-executor.0", pid 3575, jiffies 4294778002 (age 20.134s)
+hex dump (first 8 bytes):
+ 77 70 61 6e 30 00 ff ff wpan0...
+backtrace:
+ [<000000006d2d91d7>] kstrdup_const+0x3d/0x50 mm/util.c:73
+ [<00000000ba9ff953>] kvasprintf_const+0x112/0x170 lib/kasprintf.c:48
+ [<000000005555ec09>] kobject_set_name_vargs+0x55/0x130 lib/kobject.c:281
+ [<0000000098d28ec3>] dev_set_name+0xbb/0xf0 drivers/base/core.c:1915
+ [<00000000b7553017>] netdev_register_kobject+0xc0/0x410 net/core/net-sysfs.c:1727
+ [<00000000c826a797>] register_netdevice+0xa51/0xeb0 net/core/dev.c:8711
+ [<00000000857bfcfd>] cfg802154_update_iface_num.isra.2+0x13/0x90 [ieee802154]
+ [<000000003126e453>] ieee802154_llsec_fill_key_id+0x1d5/0x570 [ieee802154]
+ [<00000000e4b3df51>] 0xffffffffc1500e0e
+ [<00000000b4319776>] platform_drv_probe+0xc6/0x180 drivers/base/platform.c:614
+ [<0000000037669347>] really_probe+0x491/0x7c0 drivers/base/dd.c:509
+ [<000000008fed8862>] driver_probe_device+0xdc/0x240 drivers/base/dd.c:671
+ [<00000000baf52041>] device_driver_attach+0xf2/0x130 drivers/base/dd.c:945
+ [<00000000c7cc8dec>] __driver_attach+0x10e/0x210 drivers/base/dd.c:1022
+ [<0000000057a757c2>] bus_for_each_dev+0x154/0x1e0 drivers/base/bus.c:304
+ [<000000005f5ae04b>] bus_add_driver+0x427/0x5e0 drivers/base/bus.c:645
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: 1fa5ae857bb1 ("driver core: get rid of struct device's bus_id string array")
+Signed-off-by: Wang Hai <wanghai26@huawei.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/net-sysfs.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1598,16 +1598,20 @@ int netdev_register_kobject(struct net_d
+
+ error = device_add(dev);
+ if (error)
+- return error;
++ goto error_put_device;
+
+ error = register_queue_kobjects(ndev);
+- if (error) {
+- device_del(dev);
+- return error;
+- }
++ if (error)
++ goto error_device_del;
+
+ pm_runtime_set_memalloc_noio(dev, true);
+
++ return 0;
++
++error_device_del:
++ device_del(dev);
++error_put_device:
++ put_device(dev);
+ return error;
+ }
+
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Sat, 16 Mar 2019 14:41:30 +0100
+Subject: packets: Always register packet sk in the same order
+
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+
+[ Upstream commit a4dc6a49156b1f8d6e17251ffda17c9e6a5db78a ]
+
+When using fanouts with AF_PACKET, the demux functions such as
+fanout_demux_cpu will return an index in the fanout socket array, which
+corresponds to the selected socket.
+
+The ordering of this array depends on the order the sockets were added
+to a given fanout group, so for FANOUT_CPU this means sockets are bound
+to cpus in the order they are configured, which is OK.
+
+However, when stopping then restarting the interface these sockets are
+bound to, the sockets are reassigned to the fanout group in the reverse
+order, due to the fact that they were inserted at the head of the
+interface's AF_PACKET socket list.
+
+This means that traffic that was directed to the first socket in the
+fanout group is now directed to the last one after an interface restart.
+
+In the case of FANOUT_CPU, traffic from CPU0 will be directed to the
+socket that used to receive traffic from the last CPU after an interface
+restart.
+
+This commit introduces a helper to add a socket at the tail of a list,
+then uses it to register AF_PACKET sockets.
+
+Note that this changes the order in which sockets are listed in /proc and
+with sock_diag.
+
+Fixes: dc99f600698d ("packet: Add fanout support")
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h | 6 ++++++
+ net/packet/af_packet.c | 2 +-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -682,6 +682,12 @@ static inline void sk_add_node_rcu(struc
+ hlist_add_head_rcu(&sk->sk_node, list);
+ }
+
++static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
++{
++ sock_hold(sk);
++ hlist_add_tail_rcu(&sk->sk_node, list);
++}
++
+ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+ {
+ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3281,7 +3281,7 @@ static int packet_create(struct net *net
+ }
+
+ mutex_lock(&net->packet.sklist_lock);
+- sk_add_node_rcu(sk, &net->packet.sklist);
++ sk_add_node_tail_rcu(sk, &net->packet.sklist);
+ mutex_unlock(&net->packet.sklist_lock);
+
+ preempt_disable();
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 21 Mar 2019 09:39:52 +0800
+Subject: rhashtable: Still do rehash when we get EEXIST
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 408f13ef358aa5ad56dc6230c2c7deb92cf462b1 ]
+
+As it stands if a shrink is delayed because of an outstanding
+rehash, we will go into a rescheduling loop without ever doing
+the rehash.
+
+This patch fixes this by still carrying out the rehash and then
+rescheduling so that we can shrink after the completion of the
+rehash should it still be necessary.
+
+The return value of EEXIST captures this case and other cases
+(e.g., another thread expanded/rehashed the table at the same
+time) where we should still proceed with the rehash.
+
+Fixes: da20420f83ea ("rhashtable: Add nested tables")
+Reported-by: Josh Elsasser <jelsasser@appneta.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Tested-by: Josh Elsasser <jelsasser@appneta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/rhashtable.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -459,8 +459,12 @@ static void rht_deferred_worker(struct w
+ else if (tbl->nest)
+ err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
+
+- if (!err)
+- err = rhashtable_rehash_table(ht);
++ if (!err || err == -EEXIST) {
++ int nerr;
++
++ nerr = rhashtable_rehash_table(ht);
++ err = err ?: nerr;
++ }
+
+ mutex_unlock(&ht->mutex);
+
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Xin Long <lucien.xin@gmail.com>
+Date: Mon, 18 Mar 2019 19:47:00 +0800
+Subject: sctp: get sctphdr by offset in sctp_compute_cksum
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 273160ffc6b993c7c91627f5a84799c66dfe4dee ]
+
+sctp_hdr(skb) only works when skb->transport_header is set properly.
+
+But in Netfilter, skb->transport_header for ipv6 is not guaranteed
+to be right value for sctphdr. It would cause to fail to check the
+checksum for sctp packets.
+
+So fix it by using offset, which is always right in all places.
+
+v1->v2:
+ - Fix the changelog.
+
+Fixes: e6d8b64b34aa ("net: sctp: fix and consolidate SCTP checksumming code")
+Reported-by: Li Shuang <shuali@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sctp/checksum.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/sctp/checksum.h
++++ b/include/net/sctp/checksum.h
+@@ -60,7 +60,7 @@ static inline __wsum sctp_csum_combine(_
+ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+ unsigned int offset)
+ {
+- struct sctphdr *sh = sctp_hdr(skb);
++ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
+ __le32 ret, old = sh->checksum;
+ const struct skb_checksum_ops ops = {
+ .update = sctp_csum_update,
bluetooth-verify-that-l2cap_get_conf_opt-provides-large-enough-buffer.patch
video-fbdev-set-pixclock-0-in-goldfishfb.patch
stmmac-copy-unicast-mac-address-to-mac-registers.patch
+dccp-do-not-use-ipv6-header-for-ipv4-flow.patch
+genetlink-fix-a-memory-leak-on-error-path.patch
+misdn-hfcpci-test-both-vendor-device-id-for-digium-hfc4s.patch
+net-datagram-fix-unbounded-loop-in-__skb_try_recv_datagram.patch
+net-packet-set-__gfp_nowarn-upon-allocation-in-alloc_pg_vec.patch
+net-rose-fix-a-possible-stack-overflow.patch
+net-stmmac-fix-memory-corruption-with-large-mtus.patch
+net-sysfs-call-dev_hold-if-kobject_init_and_add-success.patch
+net-sysfs-fix-memory-leak-in-netdev_register_kobject.patch
+packets-always-register-packet-sk-in-the-same-order.patch
+rhashtable-still-do-rehash-when-we-get-eexist.patch
+tcp-do-not-use-ipv6-header-for-ipv4-flow.patch
+thunderx-enable-page-recycling-for-non-xdp-case.patch
+thunderx-eliminate-extra-calls-to-put_page-for-pages-held-for-recycling.patch
+vxlan-don-t-call-gro_cells_destroy-before-device-is-unregistered.patch
+sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch
+net-aquantia-fix-rx-checksum-offload-for-udp-tcp-over-ipv6.patch
+mac8390-fix-mmio-access-size-probe.patch
+tun-properly-test-for-iff_up.patch
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 19 Mar 2019 05:45:35 -0700
+Subject: tcp: do not use ipv6 header for ipv4 flow
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 89e4130939a20304f4059ab72179da81f5347528 ]
+
+When a dual stack tcp listener accepts an ipv4 flow,
+it should not attempt to use an ipv6 header or tcp_v6_iif() helper.
+
+Fixes: 1397ed35f22d ("ipv6: add flowinfo for tcp6 pkt_options for all cases")
+Fixes: df3687ffc665 ("ipv6: add the IPV6_FL_F_REFLECT flag to IPV6_FL_A_GET")
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/tcp_ipv6.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1083,11 +1083,11 @@ static struct sock *tcp_v6_syn_recv_sock
+ newnp->ipv6_fl_list = NULL;
+ newnp->pktoptions = NULL;
+ newnp->opt = NULL;
+- newnp->mcast_oif = tcp_v6_iif(skb);
+- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
+- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
++ newnp->mcast_oif = inet_iif(skb);
++ newnp->mcast_hops = ip_hdr(skb)->ttl;
++ newnp->rcv_flowinfo = 0;
+ if (np->repflow)
+- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
++ newnp->flow_label = 0;
+
+ /*
+ * No need to charge this sock to the relevant IPv6 refcnt debug socks count
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Dean Nelson <dnelson@redhat.com>
+Date: Tue, 26 Mar 2019 11:53:26 -0400
+Subject: thunderx: eliminate extra calls to put_page() for pages held for recycling
+
+From: Dean Nelson <dnelson@redhat.com>
+
+[ Upstream commit cd35ef91490ad8049dd180bb060aff7ee192eda9 ]
+
+For the non-XDP case, commit 773225388dae15e72790 ("net: thunderx: Optimize
+page recycling for XDP") added code to nicvf_free_rbdr() that, when releasing
+the additional receive buffer page reference held for recycling, repeatedly
+calls put_page() until the page's _refcount goes to zero. Which results in
+the page being freed.
+
+This is not okay if the page's _refcount was greater than 1 (in the non-XDP
+case), because nicvf_free_rbdr() should not be subtracting more than what
+nicvf_alloc_page() had previously added to the page's _refcount, which was
+only 1 (in the non-XDP case).
+
+This can arise if a received packet is still being processed and the receive
+buffer (i.e., skb->head) has not yet been freed via skb_free_head() when
+nicvf_free_rbdr() is spinning through the aforementioned put_page() loop.
+
+If this should occur, when the received packet finishes processing and
+skb_free_head() is called, various problems can ensue. Exactly what, depends on
+whether the page has already been reallocated or not, anything from "BUG: Bad
+page state ... ", to "Unable to handle kernel NULL pointer dereference ..." or
+"Unable to handle kernel paging request...".
+
+So this patch changes nicvf_free_rbdr() to only call put_page() once for pages
+held for recycling (in the non-XDP case).
+
+Fixes: 773225388dae ("net: thunderx: Optimize page recycling for XDP")
+Signed-off-by: Dean Nelson <dnelson@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -364,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf
+ while (head < rbdr->pgcnt) {
+ pgcache = &rbdr->pgcache[head];
+ if (pgcache->page && page_ref_count(pgcache->page) != 0) {
+- if (!rbdr->is_xdp) {
+- put_page(pgcache->page);
+- continue;
++ if (rbdr->is_xdp) {
++ page_ref_sub(pgcache->page,
++ pgcache->ref_count - 1);
+ }
+- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
+ put_page(pgcache->page);
+ }
+ head++;
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Dean Nelson <dnelson@redhat.com>
+Date: Tue, 26 Mar 2019 11:53:19 -0400
+Subject: thunderx: enable page recycling for non-XDP case
+
+From: Dean Nelson <dnelson@redhat.com>
+
+[ Upstream commit b3e208069477588c06f4d5d986164b435bb06e6d ]
+
+Commit 773225388dae15e72790 ("net: thunderx: Optimize page recycling for XDP")
+added code to nicvf_alloc_page() that inadvertently disables receive buffer
+page recycling for the non-XDP case by always NULL'ng the page pointer.
+
+This patch corrects two if-conditionals to allow for the recycling of non-XDP
+mode pages by only setting the page pointer to NULL when the page is not ready
+for recycling.
+
+Fixes: 773225388dae ("net: thunderx: Optimize page recycling for XDP")
+Signed-off-by: Dean Nelson <dnelson@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 23 ++++++++++-----------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_allo
+ /* Check if page can be recycled */
+ if (page) {
+ ref_count = page_ref_count(page);
+- /* Check if this page has been used once i.e 'put_page'
+- * called after packet transmission i.e internal ref_count
+- * and page's ref_count are equal i.e page can be recycled.
++ /* This page can be recycled if internal ref_count and page's
++ * ref_count are equal, indicating that the page has been used
++ * once for packet transmission. For non-XDP mode, internal
++ * ref_count is always '1'.
+ */
+- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
+- pgcache->ref_count--;
+- else
+- page = NULL;
+-
+- /* In non-XDP mode, page's ref_count needs to be '1' for it
+- * to be recycled.
+- */
+- if (!rbdr->is_xdp && (ref_count != 1))
++ if (rbdr->is_xdp) {
++ if (ref_count == pgcache->ref_count)
++ pgcache->ref_count--;
++ else
++ page = NULL;
++ } else if (ref_count != 1) {
+ page = NULL;
++ }
+ }
+
+ if (!page) {
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 14 Mar 2019 20:19:47 -0700
+Subject: tun: properly test for IFF_UP
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 4477138fa0ae4e1b699786ef0600863ea6e6c61c ]
+
+Same reasons than the ones explained in commit 4179cb5a4c92
+("vxlan: test dev->flags & IFF_UP before calling netif_rx()")
+
+netif_rx_ni() or napi_gro_frags() must be called under a strict contract.
+
+At device dismantle phase, core networking clears IFF_UP
+and flush_all_backlogs() is called after rcu grace period
+to make sure no incoming packet might be in a cpu backlog
+and still referencing the device.
+
+A similar protocol is used for gro layer.
+
+Most drivers call netif_rx() from their interrupt handler,
+and since the interrupts are disabled at device dismantle,
+netif_rx() does not have to check dev->flags & IFF_UP
+
+Virtual drivers do not have this guarantee, and must
+therefore make the check themselves.
+
+Fixes: 1bd4978a88ac ("tun: honor IFF_UP in tun_get_user()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/tun.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1403,9 +1403,6 @@ static ssize_t tun_get_user(struct tun_s
+ u32 rxhash;
+ int skb_xdp = 1;
+
+- if (!(tun->dev->flags & IFF_UP))
+- return -EIO;
+-
+ if (!(tun->flags & IFF_NO_PI)) {
+ if (len < sizeof(pi))
+ return -EINVAL;
+@@ -1493,9 +1490,11 @@ static ssize_t tun_get_user(struct tun_s
+ err = skb_copy_datagram_from_iter(skb, 0, from, len);
+
+ if (err) {
++ err = -EFAULT;
++drop:
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ kfree_skb(skb);
+- return -EFAULT;
++ return err;
+ }
+ }
+
+@@ -1566,11 +1565,19 @@ static ssize_t tun_get_user(struct tun_s
+ }
+
+ rxhash = __skb_get_hash_symmetric(skb);
++
++ rcu_read_lock();
++ if (unlikely(!(tun->dev->flags & IFF_UP))) {
++ err = -EIO;
++ goto drop;
++ }
++
+ #ifndef CONFIG_4KSTACKS
+ tun_rx_batched(tun, tfile, skb, more);
+ #else
+ netif_rx_ni(skb);
+ #endif
++ rcu_read_unlock();
+
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
--- /dev/null
+From foo@baz Thu Mar 28 21:57:57 CET 2019
+From: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Date: Sat, 16 Mar 2019 17:02:54 +0800
+Subject: vxlan: Don't call gro_cells_destroy() before device is unregistered
+
+From: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+
+[ Upstream commit cc4807bb609230d8959fd732b0bf3bd4c2de8eac ]
+
+Commit ad6c9986bcb62 ("vxlan: Fix GRO cells race condition between
+receive and link delete") fixed a race condition for the typical case a vxlan
+device is dismantled from the current netns. But if a netns is dismantled,
+vxlan_destroy_tunnels() is called to schedule a unregister_netdevice_queue()
+of all the vxlan tunnels that are related to this netns.
+
+In vxlan_destroy_tunnels(), gro_cells_destroy() is called and finished before
+unregister_netdevice_queue(). This means that the gro_cells_destroy() call is
+done too soon, for the same reasons explained in above commit.
+
+So we need to fully respect the RCU rules, and thus must remove the
+gro_cells_destroy() call or risk use after-free.
+
+Fixes: 58ce31cca1ff ("vxlan: GRO support at tunnel layer")
+Signed-off-by: Suanming.Mou <mousuanming@huawei.com>
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Reviewed-by: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3793,10 +3793,8 @@ static void __net_exit vxlan_exit_net(st
+ /* If vxlan->dev is in the same netns, it has already been added
+ * to the list by the previous loop.
+ */
+- if (!net_eq(dev_net(vxlan->dev), net)) {
+- gro_cells_destroy(&vxlan->gro_cells);
++ if (!net_eq(dev_net(vxlan->dev), net))
+ unregister_netdevice_queue(vxlan->dev, &list);
+- }
+ }
+
+ unregister_netdevice_many(&list);