ubifs-fix-read-out-of-bounds-in-ubifs_wbuf_write_nolock.patch
ubifs-fix-to-add-refcount-once-page-is-set-private.patch
ubifs-rename_whiteout-correct-old_dir-size-computing.patch
+wireguard-queueing-use-cfi-safe-ptr_ring-cleanup-function.patch
+wireguard-socket-free-skb-in-send6-when-ipv6-is-disabled.patch
+wireguard-socket-ignore-v6-endpoints-when-ipv6-is-disabled.patch
+xarray-fix-xas_create_range-when-multi-order-entry-present.patch
--- /dev/null
+From ec59f128a9bd4255798abb1e06ac3b442f46ef68 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Tue, 29 Mar 2022 21:31:24 -0400
+Subject: wireguard: queueing: use CFI-safe ptr_ring cleanup function
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit ec59f128a9bd4255798abb1e06ac3b442f46ef68 upstream.
+
+We make too nuanced use of ptr_ring to entirely move to the skb_array
+wrappers, but we at least should avoid the naughty function pointer cast
+when cleaning up skbs. Otherwise RAP/CFI will honk at us. This patch
+uses the __skb_array_destroy_skb wrapper for the cleanup, rather than
+directly providing kfree_skb, which is what other drivers in the same
+situation do too.
+
+Reported-by: PaX Team <pageexec@freemail.hu>
+Fixes: 886fcee939ad ("wireguard: receive: use ring buffer for incoming handshakes")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/queueing.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireguard/queueing.c
++++ b/drivers/net/wireguard/queueing.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include "queueing.h"
++#include <linux/skb_array.h>
+
+ struct multicore_worker __percpu *
+ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+@@ -42,7 +43,7 @@ void wg_packet_queue_free(struct crypt_q
+ {
+ free_percpu(queue->worker);
+ WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+- ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
++ ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
+ }
+
+ #define NEXT(skb) ((skb)->prev)
--- /dev/null
+From bbbf962d9460194993ee1943a793a0a0af4a7fbf Mon Sep 17 00:00:00 2001
+From: Wang Hai <wanghai38@huawei.com>
+Date: Tue, 29 Mar 2022 21:31:26 -0400
+Subject: wireguard: socket: free skb in send6 when ipv6 is disabled
+
+From: Wang Hai <wanghai38@huawei.com>
+
+commit bbbf962d9460194993ee1943a793a0a0af4a7fbf upstream.
+
+I got a memory leak report:
+
+unreferenced object 0xffff8881191fc040 (size 232):
+ comm "kworker/u17:0", pid 23193, jiffies 4295238848 (age 3464.870s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<ffffffff814c3ef4>] slab_post_alloc_hook+0x84/0x3b0
+ [<ffffffff814c8977>] kmem_cache_alloc_node+0x167/0x340
+ [<ffffffff832974fb>] __alloc_skb+0x1db/0x200
+ [<ffffffff82612b5d>] wg_socket_send_buffer_to_peer+0x3d/0xc0
+ [<ffffffff8260e94a>] wg_packet_send_handshake_initiation+0xfa/0x110
+ [<ffffffff8260ec81>] wg_packet_handshake_send_worker+0x21/0x30
+ [<ffffffff8119c558>] process_one_work+0x2e8/0x770
+ [<ffffffff8119ca2a>] worker_thread+0x4a/0x4b0
+ [<ffffffff811a88e0>] kthread+0x120/0x160
+ [<ffffffff8100242f>] ret_from_fork+0x1f/0x30
+
+In function wg_socket_send_buffer_as_reply_to_skb() or wg_socket_send_
+buffer_to_peer(), the semantics of send6() is required to free skb. But
+when CONFIG_IPV6 is disable, kfree_skb() is missing. This patch adds it
+to fix this bug.
+
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/socket.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wireguard/socket.c
++++ b/drivers/net/wireguard/socket.c
+@@ -160,6 +160,7 @@ out:
+ rcu_read_unlock_bh();
+ return ret;
+ #else
++ kfree_skb(skb);
+ return -EAFNOSUPPORT;
+ #endif
+ }
--- /dev/null
+From 77fc73ac89be96ec8f39e8efa53885caa7cb3645 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Tue, 29 Mar 2022 21:31:27 -0400
+Subject: wireguard: socket: ignore v6 endpoints when ipv6 is disabled
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 77fc73ac89be96ec8f39e8efa53885caa7cb3645 upstream.
+
+The previous commit fixed a memory leak on the send path in the event
+that IPv6 is disabled at compile time, but how did a packet even arrive
+there to begin with? It turns out we have previously allowed IPv6
+endpoints even when IPv6 support is disabled at compile time. This is
+awkward and inconsistent. Instead, let's just ignore all things IPv6,
+the same way we do other malformed endpoints, in the case where IPv6 is
+disabled.
+
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/socket.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireguard/socket.c
++++ b/drivers/net/wireguard/socket.c
+@@ -242,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct e
+ endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ endpoint->src4.s_addr = ip_hdr(skb)->daddr;
+ endpoint->src_if4 = skb->skb_iif;
+- } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
+ endpoint->addr6.sin6_family = AF_INET6;
+ endpoint->addr6.sin6_port = udp_hdr(skb)->source;
+ endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
+@@ -285,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct
+ peer->endpoint.addr4 = endpoint->addr4;
+ peer->endpoint.src4 = endpoint->src4;
+ peer->endpoint.src_if4 = endpoint->src_if4;
+- } else if (endpoint->addr.sa_family == AF_INET6) {
++ } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
+ peer->endpoint.addr6 = endpoint->addr6;
+ peer->endpoint.src6 = endpoint->src6;
+ } else {
--- /dev/null
+From 3e3c658055c002900982513e289398a1aad4a488 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Mon, 28 Mar 2022 19:25:11 -0400
+Subject: XArray: Fix xas_create_range() when multi-order entry present
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit 3e3c658055c002900982513e289398a1aad4a488 upstream.
+
+If there is already an entry present that is of order >= XA_CHUNK_SHIFT
+when we call xas_create_range(), xas_create_range() will misinterpret
+that entry as a node and dereference xa_node->parent, generally leading
+to a crash that looks something like this:
+
+general protection fault, probably for non-canonical address 0xdffffc0000000001:
+0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000008-0x000000000000000f]
+CPU: 0 PID: 32 Comm: khugepaged Not tainted 5.17.0-rc8-syzkaller-00003-g56e337f2cf13 #0
+RIP: 0010:xa_parent_locked include/linux/xarray.h:1207 [inline]
+RIP: 0010:xas_create_range+0x2d9/0x6e0 lib/xarray.c:725
+
+It's deterministically reproducable once you know what the problem is,
+but producing it in a live kernel requires khugepaged to hit a race.
+While the problem has been present since xas_create_range() was
+introduced, I'm not aware of a way to hit it before the page cache was
+converted to use multi-index entries.
+
+Fixes: 6b24ca4a1a8d ("mm: Use multi-index entries in the page cache")
+Reported-by: syzbot+0d2b0bf32ca5cfd09f2e@syzkaller.appspotmail.com
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/test_xarray.c | 22 ++++++++++++++++++++++
+ lib/xarray.c | 2 ++
+ 2 files changed, 24 insertions(+)
+
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -1463,6 +1463,25 @@ unlock:
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+
++static noinline void check_create_range_5(struct xarray *xa,
++ unsigned long index, unsigned int order)
++{
++ XA_STATE_ORDER(xas, xa, index, order);
++ unsigned int i;
++
++ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
++
++ for (i = 0; i < order + 10; i++) {
++ do {
++ xas_lock(&xas);
++ xas_create_range(&xas);
++ xas_unlock(&xas);
++ } while (xas_nomem(&xas, GFP_KERNEL));
++ }
++
++ xa_destroy(xa);
++}
++
+ static noinline void check_create_range(struct xarray *xa)
+ {
+ unsigned int order;
+@@ -1490,6 +1509,9 @@ static noinline void check_create_range(
+ check_create_range_4(xa, (3U << order) + 1, order);
+ check_create_range_4(xa, (3U << order) - 1, order);
+ check_create_range_4(xa, (1U << 24) + 1, order);
++
++ check_create_range_5(xa, 0, order);
++ check_create_range_5(xa, (1U << order), order);
+ }
+
+ check_create_range_3();
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *x
+
+ for (;;) {
+ struct xa_node *node = xas->xa_node;
++ if (node->shift >= shift)
++ break;
+ xas->xa_node = xa_parent_locked(xas->xa, node);
+ xas->xa_offset = node->offset - 1;
+ if (node->offset != 0)