]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2025 12:16:00 +0000 (14:16 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2025 12:16:00 +0000 (14:16 +0200)
added patches:
kasan-remove-kasan_find_vm_area-to-prevent-possible-deadlock.patch
ksmbd-fix-potential-use-after-free-in-oplock-lease-break-ack.patch
vhost-scsi-protect-vq-log_used-with-vq-mutex.patch
x86-fix-x86_feature_verw_clear-definition.patch
x86-mm-disable-hugetlb-page-table-sharing-on-32-bit.patch

queue-6.1/kasan-remove-kasan_find_vm_area-to-prevent-possible-deadlock.patch [new file with mode: 0644]
queue-6.1/ksmbd-fix-potential-use-after-free-in-oplock-lease-break-ack.patch [new file with mode: 0644]
queue-6.1/vhost-scsi-protect-vq-log_used-with-vq-mutex.patch [new file with mode: 0644]
queue-6.1/x86-fix-x86_feature_verw_clear-definition.patch [new file with mode: 0644]
queue-6.1/x86-mm-disable-hugetlb-page-table-sharing-on-32-bit.patch [new file with mode: 0644]

diff --git a/queue-6.1/kasan-remove-kasan_find_vm_area-to-prevent-possible-deadlock.patch b/queue-6.1/kasan-remove-kasan_find_vm_area-to-prevent-possible-deadlock.patch
new file mode 100644 (file)
index 0000000..743f77c
--- /dev/null
@@ -0,0 +1,71 @@
+From 6ee9b3d84775944fb8c8a447961cd01274ac671c Mon Sep 17 00:00:00 2001
+From: Yeoreum Yun <yeoreum.yun@arm.com>
+Date: Thu, 3 Jul 2025 19:10:18 +0100
+Subject: kasan: remove kasan_find_vm_area() to prevent possible deadlock
+
+From: Yeoreum Yun <yeoreum.yun@arm.com>
+
+commit 6ee9b3d84775944fb8c8a447961cd01274ac671c upstream.
+
+find_vm_area() couldn't be called in atomic_context.  If find_vm_area() is
+called to reports vm area information, kasan can trigger deadlock like:
+
+CPU0                                CPU1
+vmalloc();
+ alloc_vmap_area();
+  spin_lock(&vn->busy.lock)
+                                    spin_lock_bh(&some_lock);
+   <interrupt occurs>
+   <in softirq>
+   spin_lock(&some_lock);
+                                    <access invalid address>
+                                    kasan_report();
+                                     print_report();
+                                      print_address_description();
+                                       kasan_find_vm_area();
+                                        find_vm_area();
+                                         spin_lock(&vn->busy.lock) // deadlock!
+
+To prevent possible deadlock while kasan reports, remove kasan_find_vm_area().
+
+Link: https://lkml.kernel.org/r/20250703181018.580833-1-yeoreum.yun@arm.com
+Fixes: c056a364e954 ("kasan: print virtual mapping info in reports")
+Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
+Reported-by: Yunseong Kim <ysk@kzalloc.com>
+Reviewed-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/report.c |   13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -337,17 +337,8 @@ static void print_address_description(vo
+       }
+       if (is_vmalloc_addr(addr)) {
+-              struct vm_struct *va = find_vm_area(addr);
+-
+-              if (va) {
+-                      pr_err("The buggy address belongs to the virtual mapping at\n"
+-                             " [%px, %px) created by:\n"
+-                             " %pS\n",
+-                             va->addr, va->addr + va->size, va->caller);
+-                      pr_err("\n");
+-
+-                      page = vmalloc_to_page(addr);
+-              }
++              pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
++              page = vmalloc_to_page(addr);
+       }
+       if (page) {
diff --git a/queue-6.1/ksmbd-fix-potential-use-after-free-in-oplock-lease-break-ack.patch b/queue-6.1/ksmbd-fix-potential-use-after-free-in-oplock-lease-break-ack.patch
new file mode 100644 (file)
index 0000000..6b0023b
--- /dev/null
@@ -0,0 +1,90 @@
+From 50f930db22365738d9387c974416f38a06e8057e Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 8 Jul 2025 07:47:40 +0900
+Subject: ksmbd: fix potential use-after-free in oplock/lease break ack
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 50f930db22365738d9387c974416f38a06e8057e upstream.
+
+If ksmbd_iov_pin_rsp return error, use-after-free can happen by
+accessing opinfo->state and opinfo_put and ksmbd_fd_put could
+called twice.
+
+Reported-by: Ziyan Xu <research@securitygossip.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c |   29 +++++++++--------------------
+ 1 file changed, 9 insertions(+), 20 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -8144,11 +8144,6 @@ static void smb20_oplock_break_ack(struc
+               goto err_out;
+       }
+-      opinfo->op_state = OPLOCK_STATE_NONE;
+-      wake_up_interruptible_all(&opinfo->oplock_q);
+-      opinfo_put(opinfo);
+-      ksmbd_fd_put(work, fp);
+-
+       rsp->StructureSize = cpu_to_le16(24);
+       rsp->OplockLevel = rsp_oplevel;
+       rsp->Reserved = 0;
+@@ -8156,16 +8151,15 @@ static void smb20_oplock_break_ack(struc
+       rsp->VolatileFid = volatile_id;
+       rsp->PersistentFid = persistent_id;
+       ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
+-      if (!ret)
+-              return;
+-
++      if (ret) {
+ err_out:
++              smb2_set_err_rsp(work);
++      }
++
+       opinfo->op_state = OPLOCK_STATE_NONE;
+       wake_up_interruptible_all(&opinfo->oplock_q);
+-
+       opinfo_put(opinfo);
+       ksmbd_fd_put(work, fp);
+-      smb2_set_err_rsp(work);
+ }
+ static int check_lease_state(struct lease *lease, __le32 req_state)
+@@ -8295,11 +8289,6 @@ static void smb21_lease_break_ack(struct
+       }
+       lease_state = lease->state;
+-      opinfo->op_state = OPLOCK_STATE_NONE;
+-      wake_up_interruptible_all(&opinfo->oplock_q);
+-      atomic_dec(&opinfo->breaking_cnt);
+-      wake_up_interruptible_all(&opinfo->oplock_brk);
+-      opinfo_put(opinfo);
+       rsp->StructureSize = cpu_to_le16(36);
+       rsp->Reserved = 0;
+@@ -8308,16 +8297,16 @@ static void smb21_lease_break_ack(struct
+       rsp->LeaseState = lease_state;
+       rsp->LeaseDuration = 0;
+       ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
+-      if (!ret)
+-              return;
+-
++      if (ret) {
+ err_out:
++              smb2_set_err_rsp(work);
++      }
++
++      opinfo->op_state = OPLOCK_STATE_NONE;
+       wake_up_interruptible_all(&opinfo->oplock_q);
+       atomic_dec(&opinfo->breaking_cnt);
+       wake_up_interruptible_all(&opinfo->oplock_brk);
+-
+       opinfo_put(opinfo);
+-      smb2_set_err_rsp(work);
+ }
+ /**
diff --git a/queue-6.1/vhost-scsi-protect-vq-log_used-with-vq-mutex.patch b/queue-6.1/vhost-scsi-protect-vq-log_used-with-vq-mutex.patch
new file mode 100644 (file)
index 0000000..17ac141
--- /dev/null
@@ -0,0 +1,73 @@
+From f591cf9fce724e5075cc67488c43c6e39e8cbe27 Mon Sep 17 00:00:00 2001
+From: Dongli Zhang <dongli.zhang@oracle.com>
+Date: Wed, 2 Apr 2025 23:29:46 -0700
+Subject: vhost-scsi: protect vq->log_used with vq->mutex
+
+From: Dongli Zhang <dongli.zhang@oracle.com>
+
+commit f591cf9fce724e5075cc67488c43c6e39e8cbe27 upstream.
+
+The vhost-scsi completion path may access vq->log_base when vq->log_used is
+already set to false.
+
+    vhost-thread                       QEMU-thread
+
+vhost_scsi_complete_cmd_work()
+-> vhost_add_used()
+   -> vhost_add_used_n()
+      if (unlikely(vq->log_used))
+                                      QEMU disables vq->log_used
+                                      via VHOST_SET_VRING_ADDR.
+                                      mutex_lock(&vq->mutex);
+                                      vq->log_used = false now!
+                                      mutex_unlock(&vq->mutex);
+
+                                     QEMU gfree(vq->log_base)
+        log_used()
+        -> log_write(vq->log_base)
+
+Assuming the VMM is QEMU. The vq->log_base is from QEMU userpace and can be
+reclaimed via gfree(). As a result, this causes invalid memory writes to
+QEMU userspace.
+
+The control queue path has the same issue.
+
+Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Mike Christie <michael.christie@oracle.com>
+Message-Id: <20250403063028.16045-2-dongli.zhang@oracle.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+[ resoloved conflicts in drivers/vhost/scsi.c
+  bacause vhost_scsi_complete_cmd_work() has been refactored. ]
+Signed-off-by: Xinyu Zheng <zhengxinyu6@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/scsi.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -568,8 +568,10 @@ static void vhost_scsi_complete_cmd_work
+               ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+               if (likely(ret == sizeof(v_rsp))) {
+                       struct vhost_scsi_virtqueue *q;
+-                      vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
+                       q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
++                      mutex_lock(&q->vq.mutex);
++                      vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
++                      mutex_unlock(&q->vq.mutex);
+                       vq = q - vs->vqs;
+                       __set_bit(vq, vs->compl_bitmap);
+               } else
+@@ -1173,8 +1175,11 @@ static void vhost_scsi_tmf_resp_work(str
+       else
+               resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
++      mutex_lock(&tmf->svq->vq.mutex);
+       vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
+                                tmf->vq_desc, &tmf->resp_iov, resp_code);
++      mutex_unlock(&tmf->svq->vq.mutex);
++
+       vhost_scsi_release_tmf_res(tmf);
+ }
diff --git a/queue-6.1/x86-fix-x86_feature_verw_clear-definition.patch b/queue-6.1/x86-fix-x86_feature_verw_clear-definition.patch
new file mode 100644 (file)
index 0000000..74288c6
--- /dev/null
@@ -0,0 +1,35 @@
+From jinpu.wang@ionos.com  Tue Jul 15 13:54:05 2025
+From: Jack Wang <jinpu.wang@ionos.com>
+Date: Mon, 14 Jul 2025 21:33:39 +0200
+Subject: x86: Fix X86_FEATURE_VERW_CLEAR definition
+To: gregkh@linuxfoundation.org, sashal@kernel.org, stable@vger.kernel.org
+Cc: Borislav Petkov <bp@alien8.de>
+Message-ID: <20250714193339.6954-1-jinpu.wang@ionos.com>
+
+From: Jack Wang <jinpu.wang@ionos.com>
+
+This is a mistake during backport.
+VERW_CLEAR is on bit 5, not bit 10.
+
+Fixes: d12145e8454f ("x86/bugs: Add a Transient Scheduler Attacks mitigation")
+
+Cc: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -429,8 +429,8 @@
+ #define X86_FEATURE_V_TSC_AUX         (19*32+ 9) /* "" Virtual TSC_AUX */
+ #define X86_FEATURE_SME_COHERENT      (19*32+10) /* "" AMD hardware-enforced cache coherency */
++#define X86_FEATURE_VERW_CLEAR                (20*32+ 5) /* "" The memory form of VERW mitigates TSA */
+ #define X86_FEATURE_AUTOIBRS          (20*32+ 8) /* "" Automatic IBRS */
+-#define X86_FEATURE_VERW_CLEAR                (20*32+ 10) /* "" The memory form of VERW mitigates TSA */
+ #define X86_FEATURE_SBPB              (20*32+27) /* "" Selective Branch Prediction Barrier */
+ #define X86_FEATURE_IBPB_BRTYPE               (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO           (20*32+29) /* "" CPU is not affected by SRSO */
diff --git a/queue-6.1/x86-mm-disable-hugetlb-page-table-sharing-on-32-bit.patch b/queue-6.1/x86-mm-disable-hugetlb-page-table-sharing-on-32-bit.patch
new file mode 100644 (file)
index 0000000..974563d
--- /dev/null
@@ -0,0 +1,55 @@
+From 76303ee8d54bff6d9a6d55997acd88a6c2ba63cf Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Wed, 2 Jul 2025 10:32:04 +0200
+Subject: x86/mm: Disable hugetlb page table sharing on 32-bit
+
+From: Jann Horn <jannh@google.com>
+
+commit 76303ee8d54bff6d9a6d55997acd88a6c2ba63cf upstream.
+
+Only select ARCH_WANT_HUGE_PMD_SHARE on 64-bit x86.
+Page table sharing requires at least three levels because it involves
+shared references to PMD tables; 32-bit x86 has either two-level paging
+(without PAE) or three-level paging (with PAE), but even with
+three-level paging, having a dedicated PGD entry for hugetlb is only
+barely possible (because the PGD only has four entries), and it seems
+unlikely anyone's actually using PMD sharing on 32-bit.
+
+Having ARCH_WANT_HUGE_PMD_SHARE enabled on non-PAE 32-bit X86 (which
+has 2-level paging) became particularly problematic after commit
+59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count"),
+since that changes `struct ptdesc` such that the `pt_mm` (for PGDs) and
+the `pt_share_count` (for PMDs) share the same union storage - and with
+2-level paging, PMDs are PGDs.
+
+(For comparison, arm64 also gates ARCH_WANT_HUGE_PMD_SHARE on the
+configuration of page tables such that it is never enabled with 2-level
+paging.)
+
+Closes: https://lore.kernel.org/r/srhpjxlqfna67blvma5frmy3aa@altlinux.org
+Fixes: cfe28c5d63d8 ("x86: mm: Remove x86 version of huge_pmd_share.")
+Reported-by: Vitaly Chikunov <vt@altlinux.org>
+Suggested-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Acked-by: David Hildenbrand <david@redhat.com>
+Tested-by: Vitaly Chikunov <vt@altlinux.org>
+Cc:stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20250702-x86-2level-hugetlb-v2-1-1a98096edf92%40google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -124,7 +124,7 @@ config X86
+       select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+       select ARCH_WANTS_NO_INSTR
+       select ARCH_WANT_GENERAL_HUGETLB
+-      select ARCH_WANT_HUGE_PMD_SHARE
++      select ARCH_WANT_HUGE_PMD_SHARE         if X86_64
+       select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP  if X86_64
+       select ARCH_WANT_LD_ORPHAN_WARN
+       select ARCH_WANTS_THP_SWAP              if X86_64