]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Aug 2024 14:42:33 +0000 (16:42 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 12 Aug 2024 14:42:33 +0000 (16:42 +0200)
added patches:
mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch
nouveau-set-placement-to-original-placement-on-uvmm-validate.patch
xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch

queue-6.6/mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch [new file with mode: 0644]
queue-6.6/nouveau-set-placement-to-original-placement-on-uvmm-validate.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch [new file with mode: 0644]

diff --git a/queue-6.6/mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch b/queue-6.6/mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch
new file mode 100644 (file)
index 0000000..0ed5206
--- /dev/null
@@ -0,0 +1,75 @@
+From 5596d9e8b553dacb0ac34bcf873cbbfb16c3ba3e Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Mon, 8 Jul 2024 10:51:27 +0800
+Subject: mm/hugetlb: fix potential race in __update_and_free_hugetlb_folio()
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 5596d9e8b553dacb0ac34bcf873cbbfb16c3ba3e upstream.
+
+There is a potential race between __update_and_free_hugetlb_folio() and
+try_memory_failure_hugetlb():
+
+ CPU1                                  CPU2
+ __update_and_free_hugetlb_folio       try_memory_failure_hugetlb
+                                        folio_test_hugetlb
+                                         -- It's still hugetlb folio.
+  folio_clear_hugetlb_hwpoison
+                                         spin_lock_irq(&hugetlb_lock);
+                                          __get_huge_page_for_hwpoison
+                                           folio_set_hugetlb_hwpoison
+                                         spin_unlock_irq(&hugetlb_lock);
+  spin_lock_irq(&hugetlb_lock);
+  __folio_clear_hugetlb(folio);
+   -- Hugetlb flag is cleared but too late.
+  spin_unlock_irq(&hugetlb_lock);
+
+When the above race occurs, raw error page info will be leaked.  Even
+worse, raw error pages won't have hwpoisoned flag set and hit
+pcplists/buddy.  Fix this issue by deferring
+folio_clear_hugetlb_hwpoison() until __folio_clear_hugetlb() is done.  So
+all raw error pages will have hwpoisoned flag set.
+
+Link: https://lkml.kernel.org/r/20240708025127.107713-1-linmiaohe@huawei.com
+Fixes: 32c877191e02 ("hugetlb: do not clear hugetlb dtor until allocating vmemmap")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Acked-by: Muchun Song <muchun.song@linux.dev>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1770,13 +1770,6 @@ static void __update_and_free_hugetlb_fo
+       }
+       /*
+-       * Move PageHWPoison flag from head page to the raw error pages,
+-       * which makes any healthy subpages reusable.
+-       */
+-      if (unlikely(folio_test_hwpoison(folio)))
+-              folio_clear_hugetlb_hwpoison(folio);
+-
+-      /*
+        * If vmemmap pages were allocated above, then we need to clear the
+        * hugetlb destructor under the hugetlb lock.
+        */
+@@ -1787,6 +1780,13 @@ static void __update_and_free_hugetlb_fo
+       }
+       /*
++       * Move PageHWPoison flag from head page to the raw error pages,
++       * which makes any healthy subpages reusable.
++       */
++      if (unlikely(folio_test_hwpoison(folio)))
++              folio_clear_hugetlb_hwpoison(folio);
++
++      /*
+        * Non-gigantic pages demoted from CMA allocated gigantic pages
+        * need to be given back to CMA in free_gigantic_folio.
+        */
diff --git a/queue-6.6/nouveau-set-placement-to-original-placement-on-uvmm-validate.patch b/queue-6.6/nouveau-set-placement-to-original-placement-on-uvmm-validate.patch
new file mode 100644 (file)
index 0000000..0ac7cb7
--- /dev/null
@@ -0,0 +1,51 @@
+From 9c685f61722d30a22d55bb8a48f7a48bb2e19bcc Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Wed, 15 May 2024 12:55:41 +1000
+Subject: nouveau: set placement to original placement on uvmm validate.
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 9c685f61722d30a22d55bb8a48f7a48bb2e19bcc upstream.
+
+When a buffer is evicted for memory pressure or TTM evict all,
+the placement is set to the eviction domain, this means the
+buffer never gets revalidated on the next exec to the correct domain.
+
+I think this should be fine to use the initial domain from the
+object creation, as least with VM_BIND this won't change after
+init so this should be the correct answer.
+
+Fixes: b88baab82871 ("drm/nouveau: implement new VM_BIND uAPI")
+Cc: Danilo Krummrich <dakr@redhat.com>
+Cc: <stable@vger.kernel.org> # v6.6
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240515025542.2156774-1-airlied@gmail.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_uvmm.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+@@ -1320,6 +1320,7 @@ nouveau_uvmm_bind_job_submit(struct nouv
+               drm_gpuva_for_each_op(va_op, op->ops) {
+                       struct drm_gem_object *obj = op_gem_obj(va_op);
++                      struct nouveau_bo *nvbo;
+                       if (unlikely(!obj))
+                               continue;
+@@ -1330,8 +1331,9 @@ nouveau_uvmm_bind_job_submit(struct nouv
+                       if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
+                               continue;
+-                      ret = nouveau_bo_validate(nouveau_gem_object(obj),
+-                                                true, false);
++                      nvbo = nouveau_gem_object(obj);
++                      nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
++                      ret = nouveau_bo_validate(nvbo, true, false);
+                       if (ret) {
+                               op = list_last_op(&bind_job->ops);
+                               goto unwind;
index 208392351a3b240dea7adfb8cb41fb1b294db7db..7d6b2c270fec0fb9febba5016cbd435741a208cb 100644 (file)
@@ -176,3 +176,6 @@ btrfs-fix-corruption-after-buffer-fault-in-during-direct-io-append-write.patch
 netfilter-nf_tables-prefer-nft_chain_validate.patch
 ipv6-fix-source-address-selection-with-route-leak.patch
 tools-headers-arm64-sync-arm64-s-cputype.h-with-the-kernel-sources.patch
+mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch
+nouveau-set-placement-to-original-placement-on-uvmm-validate.patch
+xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch
diff --git a/queue-6.6/xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch b/queue-6.6/xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch
new file mode 100644 (file)
index 0000000..89ccc53
--- /dev/null
@@ -0,0 +1,72 @@
+From 45cf976008ddef4a9c9a30310c9b4fb2a9a6602a Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 30 Apr 2024 06:07:55 +0200
+Subject: xfs: fix log recovery buffer allocation for the legacy h_size fixup
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 45cf976008ddef4a9c9a30310c9b4fb2a9a6602a upstream.
+
+Commit a70f9fe52daa ("xfs: detect and handle invalid iclog size set by
+mkfs") added a fixup for incorrect h_size values used for the initial
+umount record in old xfsprogs versions.  Later commit 0c771b99d6c9
+("xfs: clean up calculation of LR header blocks") cleaned up the log
+reover buffer calculation, but stoped using the fixed up h_size value
+to size the log recovery buffer, which can lead to an out of bounds
+access when the incorrect h_size does not come from the old mkfs
+tool, but a fuzzer.
+
+Fix this by open coding xlog_logrec_hblks and taking the fixed h_size
+into account for this calculation.
+
+Fixes: 0c771b99d6c9 ("xfs: clean up calculation of LR header blocks")
+Reported-by: Sam Sun <samsun1006219@gmail.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
+Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
+Signed-off-by: Kevin Berry <kpberry@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_log_recover.c |   20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2965,7 +2965,7 @@ xlog_do_recovery_pass(
+       int                     error = 0, h_size, h_len;
+       int                     error2 = 0;
+       int                     bblks, split_bblks;
+-      int                     hblks, split_hblks, wrapped_hblks;
++      int                     hblks = 1, split_hblks, wrapped_hblks;
+       int                     i;
+       struct hlist_head       rhash[XLOG_RHASH_SIZE];
+       LIST_HEAD               (buffer_list);
+@@ -3021,14 +3021,22 @@ xlog_do_recovery_pass(
+               if (error)
+                       goto bread_err1;
+-              hblks = xlog_logrec_hblks(log, rhead);
+-              if (hblks != 1) {
+-                      kmem_free(hbp);
+-                      hbp = xlog_alloc_buffer(log, hblks);
++              /*
++               * This open codes xlog_logrec_hblks so that we can reuse the
++               * fixed up h_size value calculated above.  Without that we'd
++               * still allocate the buffer based on the incorrect on-disk
++               * size.
++               */
++              if (h_size > XLOG_HEADER_CYCLE_SIZE &&
++                  (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) {
++                      hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
++                      if (hblks > 1) {
++                              kmem_free(hbp);
++                              hbp = xlog_alloc_buffer(log, hblks);
++                      }
+               }
+       } else {
+               ASSERT(log->l_sectBBsize == 1);
+-              hblks = 1;
+               hbp = xlog_alloc_buffer(log, 1);
+               h_size = XLOG_BIG_RECORD_BSIZE;
+       }