From: Greg Kroah-Hartman Date: Fri, 23 Feb 2024 16:00:45 +0000 (+0100) Subject: 5.4-stable patches X-Git-Tag: v4.19.308~100 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=cb2d060570ad4b67747ef17bc8fa6364981daf3a;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch --- diff --git a/queue-5.4/nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch b/queue-5.4/nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch new file mode 100644 index 00000000000..08024dce182 --- /dev/null +++ b/queue-5.4/nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch @@ -0,0 +1,82 @@ +From 5124a0a549857c4b87173280e192eea24dea72ad Mon Sep 17 00:00:00 2001 +From: Ryusuke Konishi +Date: Fri, 27 Jan 2023 01:41:14 +0900 +Subject: nilfs2: replace WARN_ONs for invalid DAT metadata block requests + +From: Ryusuke Konishi + +commit 5124a0a549857c4b87173280e192eea24dea72ad upstream. + +If DAT metadata file block access fails due to corruption of the DAT file +or abnormal virtual block numbers held by b-trees or inodes, a kernel +warning is generated. + +This replaces the WARN_ONs by error output, so that a kernel, booted with +panic_on_warn, does not panic. This patch also replaces the detected +return code -ENOENT with another internal code -EINVAL to notify the bmap +layer of metadata corruption. When the bmap layer sees -EINVAL, it +handles the abnormal situation with nilfs_bmap_convert_error() and finally +returns code -EIO as it should. + +Link: https://lkml.kernel.org/r/0000000000005cc3d205ea23ddcf@google.com +Link: https://lkml.kernel.org/r/20230126164114.6911-1-konishi.ryusuke@gmail.com +Signed-off-by: Ryusuke Konishi +Reported-by: +Tested-by: Ryusuke Konishi +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + fs/nilfs2/dat.c | 27 +++++++++++++++++---------- + 1 file changed, 17 insertions(+), 10 deletions(-) + +--- a/fs/nilfs2/dat.c ++++ b/fs/nilfs2/dat.c +@@ -40,8 +40,21 @@ static inline struct nilfs_dat_info *NIL + static int nilfs_dat_prepare_entry(struct inode *dat, + struct nilfs_palloc_req *req, int create) + { +- return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, +- create, &req->pr_entry_bh); ++ int ret; ++ ++ ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, ++ create, &req->pr_entry_bh); ++ if (unlikely(ret == -ENOENT)) { ++ nilfs_msg(dat->i_sb, KERN_ERR, ++ "DAT doesn't have a block to manage vblocknr = %llu", ++ (unsigned long long)req->pr_entry_nr); ++ /* ++ * Return internal code -EINVAL to notify bmap layer of ++ * metadata corruption. ++ */ ++ ret = -EINVAL; ++ } ++ return ret; + } + + static void nilfs_dat_commit_entry(struct inode *dat, +@@ -123,11 +136,7 @@ static void nilfs_dat_commit_free(struct + + int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req) + { +- int ret; +- +- ret = nilfs_dat_prepare_entry(dat, req, 0); +- WARN_ON(ret == -ENOENT); +- return ret; ++ return nilfs_dat_prepare_entry(dat, req, 0); + } + + void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, +@@ -154,10 +163,8 @@ int nilfs_dat_prepare_end(struct inode * + int ret; + + ret = nilfs_dat_prepare_entry(dat, req, 0); +- if (ret < 0) { +- WARN_ON(ret == -ENOENT); ++ if (ret < 0) + return ret; +- } + + kaddr = kmap_atomic(req->pr_entry_bh->b_page); + entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, diff --git a/queue-5.4/series b/queue-5.4/series index b9ae37e2a0e..04f0e7cab68 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -3,3 +3,5 @@ net-sched-retire-atm-qdisc.patch net-sched-retire-dsmark-qdisc.patch sched-rt-sysctl_sched_rr_timeslice-show-default-timeslice-after-reset.patch memcg-add-refcnt-for-pcpu-stock-to-avoid-uaf-problem-in-drain_all_stock.patch +nilfs2-replace-warn_ons-for-invalid-dat-metadata-block-requests.patch +userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch diff --git a/queue-5.4/userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch b/queue-5.4/userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch new file mode 100644 index 00000000000..e01a8947390 --- /dev/null +++ b/queue-5.4/userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch @@ -0,0 +1,80 @@ +From 67695f18d55924b2013534ef3bdc363bc9e14605 Mon Sep 17 00:00:00 2001 +From: Lokesh Gidra +Date: Wed, 17 Jan 2024 14:37:29 -0800 +Subject: userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb + +From: Lokesh Gidra + +commit 67695f18d55924b2013534ef3bdc363bc9e14605 upstream. + +In mfill_atomic_hugetlb(), mmap_changing isn't being checked +again if we drop mmap_lock and reacquire it. When the lock is not held, +mmap_changing could have been incremented. This is also inconsistent +with the behavior in mfill_atomic(). + +Link: https://lkml.kernel.org/r/20240117223729.1444522-1-lokeshgidra@google.com +Fixes: df2cc96e77011 ("userfaultfd: prevent non-cooperative events vs mcopy_atomic races") +Signed-off-by: Lokesh Gidra +Cc: Andrea Arcangeli +Cc: Mike Rapoport +Cc: Axel Rasmussen +Cc: Brian Geffon +Cc: David Hildenbrand +Cc: Jann Horn +Cc: Kalesh Singh +Cc: Matthew Wilcox (Oracle) +Cc: Nicolas Geoffray +Cc: Peter Xu +Cc: Suren Baghdasaryan +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Mike Rapoport (IBM) +Signed-off-by: Greg Kroah-Hartman +--- + mm/userfaultfd.c | 14 +++++++++++++- + 1 file changed, 13 insertions(+), 1 deletion(-) + +--- a/mm/userfaultfd.c ++++ b/mm/userfaultfd.c +@@ -177,6 +177,7 @@ static __always_inline ssize_t __mcopy_a + unsigned long dst_start, + unsigned long src_start, + unsigned long len, ++ bool *mmap_changing, + bool zeropage) + { + int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; +@@ -308,6 +309,15 @@ retry: + goto out; + } + down_read(&dst_mm->mmap_sem); ++ /* ++ * If memory mappings are changing because of non-cooperative ++ * operation (e.g. mremap) running in parallel, bail out and ++ * request the user to retry later ++ */ ++ if (mmap_changing && READ_ONCE(*mmap_changing)) { ++ err = -EAGAIN; ++ break; ++ } + + dst_vma = NULL; + goto retry; +@@ -389,6 +399,7 @@ extern ssize_t __mcopy_atomic_hugetlb(st + unsigned long dst_start, + unsigned long src_start, + unsigned long len, ++ bool *mmap_changing, + bool zeropage); + #endif /* CONFIG_HUGETLB_PAGE */ + +@@ -506,7 +517,8 @@ retry: + */ + if (is_vm_hugetlb_page(dst_vma)) + return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, +- src_start, len, zeropage); ++ src_start, len, mmap_changing, ++ zeropage); + + if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) + goto out_unlock;