From: Greg Kroah-Hartman Date: Mon, 12 Aug 2024 14:42:33 +0000 (+0200) Subject: 6.6-stable patches X-Git-Tag: v6.1.105~36 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=2c3999768bffef7a02f398f42b6e5f34cc501cd4;p=thirdparty%2Fkernel%2Fstable-queue.git 6.6-stable patches added patches: mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch nouveau-set-placement-to-original-placement-on-uvmm-validate.patch xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch --- diff --git a/queue-6.6/mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch b/queue-6.6/mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch new file mode 100644 index 00000000000..0ed5206c685 --- /dev/null +++ b/queue-6.6/mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch @@ -0,0 +1,75 @@ +From 5596d9e8b553dacb0ac34bcf873cbbfb16c3ba3e Mon Sep 17 00:00:00 2001 +From: Miaohe Lin +Date: Mon, 8 Jul 2024 10:51:27 +0800 +Subject: mm/hugetlb: fix potential race in __update_and_free_hugetlb_folio() + +From: Miaohe Lin + +commit 5596d9e8b553dacb0ac34bcf873cbbfb16c3ba3e upstream. + +There is a potential race between __update_and_free_hugetlb_folio() and +try_memory_failure_hugetlb(): + + CPU1 CPU2 + __update_and_free_hugetlb_folio try_memory_failure_hugetlb + folio_test_hugetlb + -- It's still hugetlb folio. + folio_clear_hugetlb_hwpoison + spin_lock_irq(&hugetlb_lock); + __get_huge_page_for_hwpoison + folio_set_hugetlb_hwpoison + spin_unlock_irq(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); + __folio_clear_hugetlb(folio); + -- Hugetlb flag is cleared but too late. + spin_unlock_irq(&hugetlb_lock); + +When the above race occurs, raw error page info will be leaked. Even +worse, raw error pages won't have hwpoisoned flag set and hit +pcplists/buddy. Fix this issue by deferring +folio_clear_hugetlb_hwpoison() until __folio_clear_hugetlb() is done. So +all raw error pages will have hwpoisoned flag set. + +Link: https://lkml.kernel.org/r/20240708025127.107713-1-linmiaohe@huawei.com +Fixes: 32c877191e02 ("hugetlb: do not clear hugetlb dtor until allocating vmemmap") +Signed-off-by: Miaohe Lin +Acked-by: Muchun Song +Reviewed-by: Oscar Salvador +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Miaohe Lin +Signed-off-by: Greg Kroah-Hartman +--- + mm/hugetlb.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1770,13 +1770,6 @@ static void __update_and_free_hugetlb_fo + } + + /* +- * Move PageHWPoison flag from head page to the raw error pages, +- * which makes any healthy subpages reusable. +- */ +- if (unlikely(folio_test_hwpoison(folio))) +- folio_clear_hugetlb_hwpoison(folio); +- +- /* + * If vmemmap pages were allocated above, then we need to clear the + * hugetlb destructor under the hugetlb lock. + */ +@@ -1787,6 +1780,13 @@ static void __update_and_free_hugetlb_fo + } + + /* ++ * Move PageHWPoison flag from head page to the raw error pages, ++ * which makes any healthy subpages reusable. ++ */ ++ if (unlikely(folio_test_hwpoison(folio))) ++ folio_clear_hugetlb_hwpoison(folio); ++ ++ /* + * Non-gigantic pages demoted from CMA allocated gigantic pages + * need to be given back to CMA in free_gigantic_folio. + */ diff --git a/queue-6.6/nouveau-set-placement-to-original-placement-on-uvmm-validate.patch b/queue-6.6/nouveau-set-placement-to-original-placement-on-uvmm-validate.patch new file mode 100644 index 00000000000..0ac7cb77a52 --- /dev/null +++ b/queue-6.6/nouveau-set-placement-to-original-placement-on-uvmm-validate.patch @@ -0,0 +1,51 @@ +From 9c685f61722d30a22d55bb8a48f7a48bb2e19bcc Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Wed, 15 May 2024 12:55:41 +1000 +Subject: nouveau: set placement to original placement on uvmm validate. + +From: Dave Airlie + +commit 9c685f61722d30a22d55bb8a48f7a48bb2e19bcc upstream. + +When a buffer is evicted for memory pressure or TTM evict all, +the placement is set to the eviction domain, this means the +buffer never gets revalidated on the next exec to the correct domain. + +I think this should be fine to use the initial domain from the +object creation, as least with VM_BIND this won't change after +init so this should be the correct answer. + +Fixes: b88baab82871 ("drm/nouveau: implement new VM_BIND uAPI") +Cc: Danilo Krummrich +Cc: # v6.6 +Signed-off-by: Dave Airlie +Signed-off-by: Danilo Krummrich +Link: https://patchwork.freedesktop.org/patch/msgid/20240515025542.2156774-1-airlied@gmail.com +Signed-off-by: Danilo Krummrich +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/nouveau/nouveau_uvmm.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c +@@ -1320,6 +1320,7 @@ nouveau_uvmm_bind_job_submit(struct nouv + + drm_gpuva_for_each_op(va_op, op->ops) { + struct drm_gem_object *obj = op_gem_obj(va_op); ++ struct nouveau_bo *nvbo; + + if (unlikely(!obj)) + continue; +@@ -1330,8 +1331,9 @@ nouveau_uvmm_bind_job_submit(struct nouv + if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP)) + continue; + +- ret = nouveau_bo_validate(nouveau_gem_object(obj), +- true, false); ++ nvbo = nouveau_gem_object(obj); ++ nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0); ++ ret = nouveau_bo_validate(nvbo, true, false); + if (ret) { + op = list_last_op(&bind_job->ops); + goto unwind; diff --git a/queue-6.6/series b/queue-6.6/series index 208392351a3..7d6b2c270fe 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -176,3 +176,6 @@ btrfs-fix-corruption-after-buffer-fault-in-during-direct-io-append-write.patch netfilter-nf_tables-prefer-nft_chain_validate.patch ipv6-fix-source-address-selection-with-route-leak.patch tools-headers-arm64-sync-arm64-s-cputype.h-with-the-kernel-sources.patch +mm-hugetlb-fix-potential-race-in-__update_and_free_hugetlb_folio.patch +nouveau-set-placement-to-original-placement-on-uvmm-validate.patch +xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch diff --git a/queue-6.6/xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch b/queue-6.6/xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch new file mode 100644 index 00000000000..89ccc539423 --- /dev/null +++ b/queue-6.6/xfs-fix-log-recovery-buffer-allocation-for-the-legacy-h_size-fixup.patch @@ -0,0 +1,72 @@ +From 45cf976008ddef4a9c9a30310c9b4fb2a9a6602a Mon Sep 17 00:00:00 2001 +From: Christoph Hellwig +Date: Tue, 30 Apr 2024 06:07:55 +0200 +Subject: xfs: fix log recovery buffer allocation for the legacy h_size fixup + +From: Christoph Hellwig + +commit 45cf976008ddef4a9c9a30310c9b4fb2a9a6602a upstream. + +Commit a70f9fe52daa ("xfs: detect and handle invalid iclog size set by +mkfs") added a fixup for incorrect h_size values used for the initial +umount record in old xfsprogs versions. Later commit 0c771b99d6c9 +("xfs: clean up calculation of LR header blocks") cleaned up the log +reover buffer calculation, but stoped using the fixed up h_size value +to size the log recovery buffer, which can lead to an out of bounds +access when the incorrect h_size does not come from the old mkfs +tool, but a fuzzer. + +Fix this by open coding xlog_logrec_hblks and taking the fixed h_size +into account for this calculation. + +Fixes: 0c771b99d6c9 ("xfs: clean up calculation of LR header blocks") +Reported-by: Sam Sun +Signed-off-by: Christoph Hellwig +Reviewed-by: Brian Foster +Reviewed-by: "Darrick J. Wong" +Signed-off-by: Chandan Babu R +Signed-off-by: Kevin Berry +Signed-off-by: Greg Kroah-Hartman +--- + fs/xfs/xfs_log_recover.c | 20 ++++++++++++++------ + 1 file changed, 14 insertions(+), 6 deletions(-) + +--- a/fs/xfs/xfs_log_recover.c ++++ b/fs/xfs/xfs_log_recover.c +@@ -2965,7 +2965,7 @@ xlog_do_recovery_pass( + int error = 0, h_size, h_len; + int error2 = 0; + int bblks, split_bblks; +- int hblks, split_hblks, wrapped_hblks; ++ int hblks = 1, split_hblks, wrapped_hblks; + int i; + struct hlist_head rhash[XLOG_RHASH_SIZE]; + LIST_HEAD (buffer_list); +@@ -3021,14 +3021,22 @@ xlog_do_recovery_pass( + if (error) + goto bread_err1; + +- hblks = xlog_logrec_hblks(log, rhead); +- if (hblks != 1) { +- kmem_free(hbp); +- hbp = xlog_alloc_buffer(log, hblks); ++ /* ++ * This open codes xlog_logrec_hblks so that we can reuse the ++ * fixed up h_size value calculated above. Without that we'd ++ * still allocate the buffer based on the incorrect on-disk ++ * size. ++ */ ++ if (h_size > XLOG_HEADER_CYCLE_SIZE && ++ (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) { ++ hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE); ++ if (hblks > 1) { ++ kmem_free(hbp); ++ hbp = xlog_alloc_buffer(log, hblks); ++ } + } + } else { + ASSERT(log->l_sectBBsize == 1); +- hblks = 1; + hbp = xlog_alloc_buffer(log, 1); + h_size = XLOG_BIG_RECORD_BSIZE; + }