]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jul 2024 11:36:05 +0000 (13:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jul 2024 11:36:05 +0000 (13:36 +0200)
added patches:
fix-userfaultfd_api-to-return-einval-as-expected.patch
libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch
wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch
wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch
wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch

queue-5.15/fix-userfaultfd_api-to-return-einval-as-expected.patch [new file with mode: 0644]
queue-5.15/libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch [new file with mode: 0644]
queue-5.15/wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch [new file with mode: 0644]
queue-5.15/wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch [new file with mode: 0644]

diff --git a/queue-5.15/fix-userfaultfd_api-to-return-einval-as-expected.patch b/queue-5.15/fix-userfaultfd_api-to-return-einval-as-expected.patch
new file mode 100644 (file)
index 0000000..64d2b9e
--- /dev/null
@@ -0,0 +1,65 @@
+From 1723f04caacb32cadc4e063725d836a0c4450694 Mon Sep 17 00:00:00 2001
+From: Audra Mitchell <audra@redhat.com>
+Date: Wed, 26 Jun 2024 09:05:11 -0400
+Subject: Fix userfaultfd_api to return EINVAL as expected
+
+From: Audra Mitchell <audra@redhat.com>
+
+commit 1723f04caacb32cadc4e063725d836a0c4450694 upstream.
+
+Currently if we request a feature that is not set in the Kernel config we
+fail silently and return all the available features.  However, the man
+page indicates we should return an EINVAL.
+
+We need to fix this issue since we can end up with a Kernel warning should
+a program request the feature UFFD_FEATURE_WP_UNPOPULATED on a kernel with
+the config not set with this feature.
+
+ [  200.812896] WARNING: CPU: 91 PID: 13634 at mm/memory.c:1660 zap_pte_range+0x43d/0x660
+ [  200.820738] Modules linked in:
+ [  200.869387] CPU: 91 PID: 13634 Comm: userfaultfd Kdump: loaded Not tainted 6.9.0-rc5+ #8
+ [  200.877477] Hardware name: Dell Inc. PowerEdge R6525/0N7YGH, BIOS 2.7.3 03/30/2022
+ [  200.885052] RIP: 0010:zap_pte_range+0x43d/0x660
+
+Link: https://lkml.kernel.org/r/20240626130513.120193-1-audra@redhat.com
+Fixes: e06f1e1dd499 ("userfaultfd: wp: enabled write protection in userfaultfd API")
+Signed-off-by: Audra Mitchell <audra@redhat.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Rafael Aquini <raquini@redhat.com>
+Cc: Shaohua Li <shli@fb.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/userfaultfd.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1935,7 +1935,7 @@ static int userfaultfd_api(struct userfa
+               goto out;
+       features = uffdio_api.features;
+       ret = -EINVAL;
+-      if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++      if (uffdio_api.api != UFFD_API)
+               goto err_out;
+       ret = -EPERM;
+       if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+@@ -1949,6 +1949,11 @@ static int userfaultfd_api(struct userfa
+ #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+       uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
+ #endif
++
++      ret = -EINVAL;
++      if (features & ~uffdio_api.features)
++              goto err_out;
++
+       uffdio_api.ioctls = UFFD_API_IOCTLS;
+       ret = -EFAULT;
+       if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
diff --git a/queue-5.15/libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch b/queue-5.15/libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch
new file mode 100644 (file)
index 0000000..99e9f52
--- /dev/null
@@ -0,0 +1,86 @@
+From 69c7b2fe4c9cc1d3b1186d1c5606627ecf0de883 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Mon, 8 Jul 2024 22:37:29 +0200
+Subject: libceph: fix race between delayed_work() and ceph_monc_stop()
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 69c7b2fe4c9cc1d3b1186d1c5606627ecf0de883 upstream.
+
+The way the delayed work is handled in ceph_monc_stop() is prone to
+races with mon_fault() and possibly also finish_hunting().  Both of
+these can requeue the delayed work which wouldn't be canceled by any of
+the following code in case that happens after cancel_delayed_work_sync()
+runs -- __close_session() doesn't mess with the delayed work in order
+to avoid interfering with the hunting interval logic.  This part was
+missed in commit b5d91704f53e ("libceph: behave in mon_fault() if
+cur_mon < 0") and use-after-free can still ensue on monc and objects
+that hang off of it, with monc->auth and monc->monmap being
+particularly susceptible to quickly being reused.
+
+To fix this:
+
+- clear monc->cur_mon and monc->hunting as part of closing the session
+  in ceph_monc_stop()
+- bail from delayed_work() if monc->cur_mon is cleared, similar to how
+  it's done in mon_fault() and finish_hunting() (based on monc->hunting)
+- call cancel_delayed_work_sync() after the session is closed
+
+Cc: stable@vger.kernel.org
+Link: https://tracker.ceph.com/issues/66857
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Xiubo Li <xiubli@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/mon_client.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_str
+       struct ceph_mon_client *monc =
+               container_of(work, struct ceph_mon_client, delayed_work.work);
+-      dout("monc delayed_work\n");
+       mutex_lock(&monc->mutex);
++      dout("%s mon%d\n", __func__, monc->cur_mon);
++      if (monc->cur_mon < 0) {
++              goto out;
++      }
++
+       if (monc->hunting) {
+               dout("%s continuing hunt\n", __func__);
+               reopen_session(monc);
+       } else {
+               int is_auth = ceph_auth_is_authenticated(monc->auth);
++
++              dout("%s is_authed %d\n", __func__, is_auth);
+               if (ceph_con_keepalive_expired(&monc->con,
+                                              CEPH_MONC_PING_TIMEOUT)) {
+                       dout("monc keepalive timeout\n");
+@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_str
+               }
+       }
+       __schedule_delayed(monc);
++
++out:
+       mutex_unlock(&monc->mutex);
+ }
+@@ -1233,13 +1241,15 @@ EXPORT_SYMBOL(ceph_monc_init);
+ void ceph_monc_stop(struct ceph_mon_client *monc)
+ {
+       dout("stop\n");
+-      cancel_delayed_work_sync(&monc->delayed_work);
+       mutex_lock(&monc->mutex);
+       __close_session(monc);
++      monc->hunting = false;
+       monc->cur_mon = -1;
+       mutex_unlock(&monc->mutex);
++      cancel_delayed_work_sync(&monc->delayed_work);
++
+       /*
+        * flush msgr queue before we destroy ourselves to ensure that:
+        *  - any work that references our embedded con is finished.
index 6fe88e8ff2420cdd8b56cc5b7883938ec552ed06..60aa1aa708a29655921df46de66789dec994c087 100644 (file)
@@ -121,3 +121,8 @@ nvmem-core-only-change-name-to-fram-for-current-attribute.patch
 alsa-hda-realtek-add-quirk-for-clevo-v50tu.patch
 alsa-hda-realtek-enable-mute-led-on-hp-250-g7.patch
 alsa-hda-realtek-limit-mic-boost-on-vaio-pro-px.patch
+fix-userfaultfd_api-to-return-einval-as-expected.patch
+libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch
+wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch
+wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch
+wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch
diff --git a/queue-5.15/wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch b/queue-5.15/wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch
new file mode 100644 (file)
index 0000000..c7d7f7c
--- /dev/null
@@ -0,0 +1,44 @@
+From 948f991c62a4018fb81d85804eeab3029c6209f8 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@kernel.org>
+Date: Thu, 4 Jul 2024 17:45:15 +0200
+Subject: wireguard: allowedips: avoid unaligned 64-bit memory accesses
+
+From: Helge Deller <deller@kernel.org>
+
+commit 948f991c62a4018fb81d85804eeab3029c6209f8 upstream.
+
+On the parisc platform, the kernel issues kernel warnings because
+swap_endian() tries to load a 128-bit IPv6 address from an unaligned
+memory location:
+
+ Kernel: unaligned access to 0x55f4688c in wg_allowedips_insert_v6+0x2c/0x80 [wireguard] (iir 0xf3010df)
+ Kernel: unaligned access to 0x55f46884 in wg_allowedips_insert_v6+0x38/0x80 [wireguard] (iir 0xf2010dc)
+
+Avoid such unaligned memory accesses by instead using the
+get_unaligned_be64() helper macro.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+[Jason: replace src[8] in original patch with src+8]
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://patch.msgid.link/20240704154517.1572127-3-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/allowedips.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u
+       if (bits == 32) {
+               *(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+       } else if (bits == 128) {
+-              ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+-              ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
++              ((u64 *)dst)[0] = get_unaligned_be64(src);
++              ((u64 *)dst)[1] = get_unaligned_be64(src + 8);
+       }
+ }
diff --git a/queue-5.15/wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch b/queue-5.15/wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch
new file mode 100644 (file)
index 0000000..799a853
--- /dev/null
@@ -0,0 +1,73 @@
+From 2fe3d6d2053c57f2eae5e85ca1656d185ebbe4e8 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 4 Jul 2024 17:45:16 +0200
+Subject: wireguard: queueing: annotate intentional data race in cpu round robin
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 2fe3d6d2053c57f2eae5e85ca1656d185ebbe4e8 upstream.
+
+KCSAN reports a race in the CPU round robin function, which, as the
+comment points out, is intentional:
+
+    BUG: KCSAN: data-race in wg_packet_send_staged_packets / wg_packet_send_staged_packets
+
+    read to 0xffff88811254eb28 of 4 bytes by task 3160 on cpu 1:
+     wg_cpumask_next_online drivers/net/wireguard/queueing.h:127 [inline]
+     wg_queue_enqueue_per_device_and_peer drivers/net/wireguard/queueing.h:173 [inline]
+     wg_packet_create_data drivers/net/wireguard/send.c:320 [inline]
+     wg_packet_send_staged_packets+0x60e/0xac0 drivers/net/wireguard/send.c:388
+     wg_packet_send_keepalive+0xe2/0x100 drivers/net/wireguard/send.c:239
+     wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline]
+     wg_packet_handshake_receive_worker+0x449/0x5f0 drivers/net/wireguard/receive.c:213
+     process_one_work kernel/workqueue.c:3248 [inline]
+     process_scheduled_works+0x483/0x9a0 kernel/workqueue.c:3329
+     worker_thread+0x526/0x720 kernel/workqueue.c:3409
+     kthread+0x1d1/0x210 kernel/kthread.c:389
+     ret_from_fork+0x4b/0x60 arch/x86/kernel/process.c:147
+     ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+
+    write to 0xffff88811254eb28 of 4 bytes by task 3158 on cpu 0:
+     wg_cpumask_next_online drivers/net/wireguard/queueing.h:130 [inline]
+     wg_queue_enqueue_per_device_and_peer drivers/net/wireguard/queueing.h:173 [inline]
+     wg_packet_create_data drivers/net/wireguard/send.c:320 [inline]
+     wg_packet_send_staged_packets+0x6e5/0xac0 drivers/net/wireguard/send.c:388
+     wg_packet_send_keepalive+0xe2/0x100 drivers/net/wireguard/send.c:239
+     wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline]
+     wg_packet_handshake_receive_worker+0x449/0x5f0 drivers/net/wireguard/receive.c:213
+     process_one_work kernel/workqueue.c:3248 [inline]
+     process_scheduled_works+0x483/0x9a0 kernel/workqueue.c:3329
+     worker_thread+0x526/0x720 kernel/workqueue.c:3409
+     kthread+0x1d1/0x210 kernel/kthread.c:389
+     ret_from_fork+0x4b/0x60 arch/x86/kernel/process.c:147
+     ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+
+    value changed: 0xffffffff -> 0x00000000
+
+Mark this race as intentional by using READ/WRITE_ONCE().
+
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://patch.msgid.link/20240704154517.1572127-4-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/queueing.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -126,10 +126,10 @@ static inline int wg_cpumask_choose_onli
+  */
+ static inline int wg_cpumask_next_online(int *last_cpu)
+ {
+-      int cpu = cpumask_next(*last_cpu, cpu_online_mask);
++      int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
+       if (cpu >= nr_cpu_ids)
+               cpu = cpumask_first(cpu_online_mask);
+-      *last_cpu = cpu;
++      WRITE_ONCE(*last_cpu, cpu);
+       return cpu;
+ }
diff --git a/queue-5.15/wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch b/queue-5.15/wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch
new file mode 100644 (file)
index 0000000..29eeb38
--- /dev/null
@@ -0,0 +1,82 @@
+From 381a7d453fa2ac5f854a154d3c9b1bbb90c4f94f Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 4 Jul 2024 17:45:17 +0200
+Subject: wireguard: send: annotate intentional data race in checking empty queue
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 381a7d453fa2ac5f854a154d3c9b1bbb90c4f94f upstream.
+
+KCSAN reports a race in wg_packet_send_keepalive, which is intentional:
+
+    BUG: KCSAN: data-race in wg_packet_send_keepalive / wg_packet_send_staged_packets
+
+    write to 0xffff88814cd91280 of 8 bytes by task 3194 on cpu 0:
+     __skb_queue_head_init include/linux/skbuff.h:2162 [inline]
+     skb_queue_splice_init include/linux/skbuff.h:2248 [inline]
+     wg_packet_send_staged_packets+0xe5/0xad0 drivers/net/wireguard/send.c:351
+     wg_xmit+0x5b8/0x660 drivers/net/wireguard/device.c:218
+     __netdev_start_xmit include/linux/netdevice.h:4940 [inline]
+     netdev_start_xmit include/linux/netdevice.h:4954 [inline]
+     xmit_one net/core/dev.c:3548 [inline]
+     dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3564
+     __dev_queue_xmit+0xeff/0x1d80 net/core/dev.c:4349
+     dev_queue_xmit include/linux/netdevice.h:3134 [inline]
+     neigh_connected_output+0x231/0x2a0 net/core/neighbour.c:1592
+     neigh_output include/net/neighbour.h:542 [inline]
+     ip6_finish_output2+0xa66/0xce0 net/ipv6/ip6_output.c:137
+     ip6_finish_output+0x1a5/0x490 net/ipv6/ip6_output.c:222
+     NF_HOOK_COND include/linux/netfilter.h:303 [inline]
+     ip6_output+0xeb/0x220 net/ipv6/ip6_output.c:243
+     dst_output include/net/dst.h:451 [inline]
+     NF_HOOK include/linux/netfilter.h:314 [inline]
+     ndisc_send_skb+0x4a2/0x670 net/ipv6/ndisc.c:509
+     ndisc_send_rs+0x3ab/0x3e0 net/ipv6/ndisc.c:719
+     addrconf_dad_completed+0x640/0x8e0 net/ipv6/addrconf.c:4295
+     addrconf_dad_work+0x891/0xbc0
+     process_one_work kernel/workqueue.c:2633 [inline]
+     process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2706
+     worker_thread+0x525/0x730 kernel/workqueue.c:2787
+     kthread+0x1d7/0x210 kernel/kthread.c:388
+     ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+     ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:242
+
+    read to 0xffff88814cd91280 of 8 bytes by task 3202 on cpu 1:
+     skb_queue_empty include/linux/skbuff.h:1798 [inline]
+     wg_packet_send_keepalive+0x20/0x100 drivers/net/wireguard/send.c:225
+     wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline]
+     wg_packet_handshake_receive_worker+0x445/0x5e0 drivers/net/wireguard/receive.c:213
+     process_one_work kernel/workqueue.c:2633 [inline]
+     process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2706
+     worker_thread+0x525/0x730 kernel/workqueue.c:2787
+     kthread+0x1d7/0x210 kernel/kthread.c:388
+     ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+     ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:242
+
+    value changed: 0xffff888148fef200 -> 0xffff88814cd91280
+
+Mark this race as intentional by using the skb_queue_empty_lockless()
+function rather than skb_queue_empty(), which uses READ_ONCE()
+internally to annotate the race.
+
+Cc: stable@vger.kernel.org
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://patch.msgid.link/20240704154517.1572127-5-Jason@zx2c4.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireguard/send.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_
+ {
+       struct sk_buff *skb;
+-      if (skb_queue_empty(&peer->staged_packet_queue)) {
++      if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
+               skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+                               GFP_ATOMIC);
+               if (unlikely(!skb))