From: Greg Kroah-Hartman Date: Mon, 15 Jul 2024 11:34:47 +0000 (+0200) Subject: 5.10-stable patches X-Git-Tag: v4.19.318~47 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=cade44b302525931987a4b3c03ca7889e0d1b6c2;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch --- diff --git a/queue-5.10/libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch b/queue-5.10/libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch new file mode 100644 index 00000000000..dce7d89a311 --- /dev/null +++ b/queue-5.10/libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch @@ -0,0 +1,86 @@ +From 69c7b2fe4c9cc1d3b1186d1c5606627ecf0de883 Mon Sep 17 00:00:00 2001 +From: Ilya Dryomov +Date: Mon, 8 Jul 2024 22:37:29 +0200 +Subject: libceph: fix race between delayed_work() and ceph_monc_stop() + +From: Ilya Dryomov + +commit 69c7b2fe4c9cc1d3b1186d1c5606627ecf0de883 upstream. + +The way the delayed work is handled in ceph_monc_stop() is prone to +races with mon_fault() and possibly also finish_hunting(). Both of +these can requeue the delayed work which wouldn't be canceled by any of +the following code in case that happens after cancel_delayed_work_sync() +runs -- __close_session() doesn't mess with the delayed work in order +to avoid interfering with the hunting interval logic. This part was +missed in commit b5d91704f53e ("libceph: behave in mon_fault() if +cur_mon < 0") and use-after-free can still ensue on monc and objects +that hang off of it, with monc->auth and monc->monmap being +particularly susceptible to quickly being reused. + +To fix this: + +- clear monc->cur_mon and monc->hunting as part of closing the session + in ceph_monc_stop() +- bail from delayed_work() if monc->cur_mon is cleared, similar to how + it's done in mon_fault() and finish_hunting() (based on monc->hunting) +- call cancel_delayed_work_sync() after the session is closed + +Cc: stable@vger.kernel.org +Link: https://tracker.ceph.com/issues/66857 +Signed-off-by: Ilya Dryomov +Reviewed-by: Xiubo Li +Signed-off-by: Greg Kroah-Hartman +--- + net/ceph/mon_client.c | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +--- a/net/ceph/mon_client.c ++++ b/net/ceph/mon_client.c +@@ -1014,13 +1014,19 @@ static void delayed_work(struct work_str + struct ceph_mon_client *monc = + container_of(work, struct ceph_mon_client, delayed_work.work); + +- dout("monc delayed_work\n"); + mutex_lock(&monc->mutex); ++ dout("%s mon%d\n", __func__, monc->cur_mon); ++ if (monc->cur_mon < 0) { ++ goto out; ++ } ++ + if (monc->hunting) { + dout("%s continuing hunt\n", __func__); + reopen_session(monc); + } else { + int is_auth = ceph_auth_is_authenticated(monc->auth); ++ ++ dout("%s is_authed %d\n", __func__, is_auth); + if (ceph_con_keepalive_expired(&monc->con, + CEPH_MONC_PING_TIMEOUT)) { + dout("monc keepalive timeout\n"); +@@ -1045,6 +1051,8 @@ static void delayed_work(struct work_str + } + } + __schedule_delayed(monc); ++ ++out: + mutex_unlock(&monc->mutex); + } + +@@ -1157,13 +1165,15 @@ EXPORT_SYMBOL(ceph_monc_init); + void ceph_monc_stop(struct ceph_mon_client *monc) + { + dout("stop\n"); +- cancel_delayed_work_sync(&monc->delayed_work); + + mutex_lock(&monc->mutex); + __close_session(monc); ++ monc->hunting = false; + monc->cur_mon = -1; + mutex_unlock(&monc->mutex); + ++ cancel_delayed_work_sync(&monc->delayed_work); ++ + /* + * flush msgr queue before we destroy ourselves to ensure that: + * - any work that references our embedded con is finished. diff --git a/queue-5.10/series b/queue-5.10/series index ada0f82b21d..b144349fcfa 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -87,3 +87,7 @@ hpet-support-32-bit-userspace.patch nvmem-meson-efuse-fix-return-value-of-nvmem-callbacks.patch alsa-hda-realtek-enable-mute-led-on-hp-250-g7.patch alsa-hda-realtek-limit-mic-boost-on-vaio-pro-px.patch +libceph-fix-race-between-delayed_work-and-ceph_monc_stop.patch +wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch +wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch +wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch diff --git a/queue-5.10/wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch b/queue-5.10/wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch new file mode 100644 index 00000000000..c7d7f7cd931 --- /dev/null +++ b/queue-5.10/wireguard-allowedips-avoid-unaligned-64-bit-memory-accesses.patch @@ -0,0 +1,44 @@ +From 948f991c62a4018fb81d85804eeab3029c6209f8 Mon Sep 17 00:00:00 2001 +From: Helge Deller +Date: Thu, 4 Jul 2024 17:45:15 +0200 +Subject: wireguard: allowedips: avoid unaligned 64-bit memory accesses + +From: Helge Deller + +commit 948f991c62a4018fb81d85804eeab3029c6209f8 upstream. + +On the parisc platform, the kernel issues kernel warnings because +swap_endian() tries to load a 128-bit IPv6 address from an unaligned +memory location: + + Kernel: unaligned access to 0x55f4688c in wg_allowedips_insert_v6+0x2c/0x80 [wireguard] (iir 0xf3010df) + Kernel: unaligned access to 0x55f46884 in wg_allowedips_insert_v6+0x38/0x80 [wireguard] (iir 0xf2010dc) + +Avoid such unaligned memory accesses by instead using the +get_unaligned_be64() helper macro. + +Signed-off-by: Helge Deller +[Jason: replace src[8] in original patch with src+8] +Cc: stable@vger.kernel.org +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Link: https://patch.msgid.link/20240704154517.1572127-3-Jason@zx2c4.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wireguard/allowedips.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/allowedips.c ++++ b/drivers/net/wireguard/allowedips.c +@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u + if (bits == 32) { + *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); + } else if (bits == 128) { +- ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); +- ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); ++ ((u64 *)dst)[0] = get_unaligned_be64(src); ++ ((u64 *)dst)[1] = get_unaligned_be64(src + 8); + } + } + diff --git a/queue-5.10/wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch b/queue-5.10/wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch new file mode 100644 index 00000000000..799a853a803 --- /dev/null +++ b/queue-5.10/wireguard-queueing-annotate-intentional-data-race-in-cpu-round-robin.patch @@ -0,0 +1,73 @@ +From 2fe3d6d2053c57f2eae5e85ca1656d185ebbe4e8 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Thu, 4 Jul 2024 17:45:16 +0200 +Subject: wireguard: queueing: annotate intentional data race in cpu round robin + +From: Jason A. Donenfeld + +commit 2fe3d6d2053c57f2eae5e85ca1656d185ebbe4e8 upstream. + +KCSAN reports a race in the CPU round robin function, which, as the +comment points out, is intentional: + + BUG: KCSAN: data-race in wg_packet_send_staged_packets / wg_packet_send_staged_packets + + read to 0xffff88811254eb28 of 4 bytes by task 3160 on cpu 1: + wg_cpumask_next_online drivers/net/wireguard/queueing.h:127 [inline] + wg_queue_enqueue_per_device_and_peer drivers/net/wireguard/queueing.h:173 [inline] + wg_packet_create_data drivers/net/wireguard/send.c:320 [inline] + wg_packet_send_staged_packets+0x60e/0xac0 drivers/net/wireguard/send.c:388 + wg_packet_send_keepalive+0xe2/0x100 drivers/net/wireguard/send.c:239 + wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline] + wg_packet_handshake_receive_worker+0x449/0x5f0 drivers/net/wireguard/receive.c:213 + process_one_work kernel/workqueue.c:3248 [inline] + process_scheduled_works+0x483/0x9a0 kernel/workqueue.c:3329 + worker_thread+0x526/0x720 kernel/workqueue.c:3409 + kthread+0x1d1/0x210 kernel/kthread.c:389 + ret_from_fork+0x4b/0x60 arch/x86/kernel/process.c:147 + ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 + + write to 0xffff88811254eb28 of 4 bytes by task 3158 on cpu 0: + wg_cpumask_next_online drivers/net/wireguard/queueing.h:130 [inline] + wg_queue_enqueue_per_device_and_peer drivers/net/wireguard/queueing.h:173 [inline] + wg_packet_create_data drivers/net/wireguard/send.c:320 [inline] + wg_packet_send_staged_packets+0x6e5/0xac0 drivers/net/wireguard/send.c:388 + wg_packet_send_keepalive+0xe2/0x100 drivers/net/wireguard/send.c:239 + wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline] + wg_packet_handshake_receive_worker+0x449/0x5f0 drivers/net/wireguard/receive.c:213 + process_one_work kernel/workqueue.c:3248 [inline] + process_scheduled_works+0x483/0x9a0 kernel/workqueue.c:3329 + worker_thread+0x526/0x720 kernel/workqueue.c:3409 + kthread+0x1d1/0x210 kernel/kthread.c:389 + ret_from_fork+0x4b/0x60 arch/x86/kernel/process.c:147 + ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 + + value changed: 0xffffffff -> 0x00000000 + +Mark this race as intentional by using READ/WRITE_ONCE(). + +Cc: stable@vger.kernel.org +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Link: https://patch.msgid.link/20240704154517.1572127-4-Jason@zx2c4.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wireguard/queueing.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -126,10 +126,10 @@ static inline int wg_cpumask_choose_onli + */ + static inline int wg_cpumask_next_online(int *last_cpu) + { +- int cpu = cpumask_next(*last_cpu, cpu_online_mask); ++ int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first(cpu_online_mask); +- *last_cpu = cpu; ++ WRITE_ONCE(*last_cpu, cpu); + return cpu; + } + diff --git a/queue-5.10/wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch b/queue-5.10/wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch new file mode 100644 index 00000000000..29eeb3827ca --- /dev/null +++ b/queue-5.10/wireguard-send-annotate-intentional-data-race-in-checking-empty-queue.patch @@ -0,0 +1,82 @@ +From 381a7d453fa2ac5f854a154d3c9b1bbb90c4f94f Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Thu, 4 Jul 2024 17:45:17 +0200 +Subject: wireguard: send: annotate intentional data race in checking empty queue + +From: Jason A. Donenfeld + +commit 381a7d453fa2ac5f854a154d3c9b1bbb90c4f94f upstream. + +KCSAN reports a race in wg_packet_send_keepalive, which is intentional: + + BUG: KCSAN: data-race in wg_packet_send_keepalive / wg_packet_send_staged_packets + + write to 0xffff88814cd91280 of 8 bytes by task 3194 on cpu 0: + __skb_queue_head_init include/linux/skbuff.h:2162 [inline] + skb_queue_splice_init include/linux/skbuff.h:2248 [inline] + wg_packet_send_staged_packets+0xe5/0xad0 drivers/net/wireguard/send.c:351 + wg_xmit+0x5b8/0x660 drivers/net/wireguard/device.c:218 + __netdev_start_xmit include/linux/netdevice.h:4940 [inline] + netdev_start_xmit include/linux/netdevice.h:4954 [inline] + xmit_one net/core/dev.c:3548 [inline] + dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3564 + __dev_queue_xmit+0xeff/0x1d80 net/core/dev.c:4349 + dev_queue_xmit include/linux/netdevice.h:3134 [inline] + neigh_connected_output+0x231/0x2a0 net/core/neighbour.c:1592 + neigh_output include/net/neighbour.h:542 [inline] + ip6_finish_output2+0xa66/0xce0 net/ipv6/ip6_output.c:137 + ip6_finish_output+0x1a5/0x490 net/ipv6/ip6_output.c:222 + NF_HOOK_COND include/linux/netfilter.h:303 [inline] + ip6_output+0xeb/0x220 net/ipv6/ip6_output.c:243 + dst_output include/net/dst.h:451 [inline] + NF_HOOK include/linux/netfilter.h:314 [inline] + ndisc_send_skb+0x4a2/0x670 net/ipv6/ndisc.c:509 + ndisc_send_rs+0x3ab/0x3e0 net/ipv6/ndisc.c:719 + addrconf_dad_completed+0x640/0x8e0 net/ipv6/addrconf.c:4295 + addrconf_dad_work+0x891/0xbc0 + process_one_work kernel/workqueue.c:2633 [inline] + process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2706 + worker_thread+0x525/0x730 kernel/workqueue.c:2787 + kthread+0x1d7/0x210 kernel/kthread.c:388 + ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147 + ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:242 + + read to 0xffff88814cd91280 of 8 bytes by task 3202 on cpu 1: + skb_queue_empty include/linux/skbuff.h:1798 [inline] + wg_packet_send_keepalive+0x20/0x100 drivers/net/wireguard/send.c:225 + wg_receive_handshake_packet drivers/net/wireguard/receive.c:186 [inline] + wg_packet_handshake_receive_worker+0x445/0x5e0 drivers/net/wireguard/receive.c:213 + process_one_work kernel/workqueue.c:2633 [inline] + process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2706 + worker_thread+0x525/0x730 kernel/workqueue.c:2787 + kthread+0x1d7/0x210 kernel/kthread.c:388 + ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147 + ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:242 + + value changed: 0xffff888148fef200 -> 0xffff88814cd91280 + +Mark this race as intentional by using the skb_queue_empty_lockless() +function rather than skb_queue_empty(), which uses READ_ONCE() +internally to annotate the race. + +Cc: stable@vger.kernel.org +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Link: https://patch.msgid.link/20240704154517.1572127-5-Jason@zx2c4.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wireguard/send.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_ + { + struct sk_buff *skb; + +- if (skb_queue_empty(&peer->staged_packet_queue)) { ++ if (skb_queue_empty_lockless(&peer->staged_packet_queue)) { + skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, + GFP_ATOMIC); + if (unlikely(!skb))