From: Greg Kroah-Hartman Date: Fri, 15 Nov 2024 05:34:37 +0000 (+0100) Subject: 5.4-stable patches X-Git-Tag: v4.19.324~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a778578d7989b31c0fe9f5e955505173a66272ee;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: mm-add-remap_pfn_range_notrack.patch mm-clarify-a-confusing-comment-for-remap_pfn_range.patch mm-fix-ambiguous-comments-for-better-code-readability.patch mm-memory.c-make-remap_pfn_range-reject-unaligned-addr.patch --- diff --git a/queue-5.4/mm-add-remap_pfn_range_notrack.patch b/queue-5.4/mm-add-remap_pfn_range_notrack.patch new file mode 100644 index 00000000000..6e44541e2cf --- /dev/null +++ b/queue-5.4/mm-add-remap_pfn_range_notrack.patch @@ -0,0 +1,138 @@ +From 74ffa5a3e68504dd289135b1cf0422c19ffb3f2e Mon Sep 17 00:00:00 2001 +From: Christoph Hellwig +Date: Thu, 29 Apr 2021 22:57:29 -0700 +Subject: mm: add remap_pfn_range_notrack + +From: Christoph Hellwig + +commit 74ffa5a3e68504dd289135b1cf0422c19ffb3f2e upstream. + +Patch series "add remap_pfn_range_notrack instead of reinventing it in i915", v2. + +i915 has some reason to want to avoid the track_pfn_remap overhead in +remap_pfn_range. Add a function to the core VM to do just that rather +than reinventing the functionality poorly in the driver. + +Note that the remap_io_sg path does get exercises when using Xorg on my +Thinkpad X1, so this should be considered lightly tested, I've not managed +to hit the remap_io_mapping path at all. + +This patch (of 4): + +Add a version of remap_pfn_range that does not call track_pfn_range. This +will be used to fix horrible abuses of VM internals in the i915 driver. + +Link: https://lkml.kernel.org/r/20210326055505.1424432-1-hch@lst.de +Link: https://lkml.kernel.org/r/20210326055505.1424432-2-hch@lst.de +Signed-off-by: Christoph Hellwig +Acked-by: Daniel Vetter +Cc: Jani Nikula +Cc: Joonas Lahtinen +Cc: Rodrigo Vivi +Cc: Chris Wilson +Cc: Peter Zijlstra +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +(cherry picked from commit 69d4e1ce9087c8767f2fe9b9426fa2755c8e9072) +Signed-off-by: Harshvardhan Jha +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/mm.h | 2 ++ + mm/memory.c | 51 +++++++++++++++++++++++++++++++-------------------- + 2 files changed, 33 insertions(+), 20 deletions(-) + +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -2566,6 +2566,8 @@ unsigned long change_prot_numa(struct vm + struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); + int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); ++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, unsigned long size, pgprot_t prot); + int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); + int vm_map_pages(struct vm_area_struct *vma, struct page **pages, + unsigned long num); +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1917,26 +1917,17 @@ static inline int remap_p4d_range(struct + return 0; + } + +-/** +- * remap_pfn_range - remap kernel memory to userspace +- * @vma: user vma to map to +- * @addr: target page aligned user address to start at +- * @pfn: page frame number of kernel physical memory address +- * @size: size of mapping area +- * @prot: page protection flags for this mapping +- * +- * Note: this is only safe if the mm semaphore is held when called. +- * +- * Return: %0 on success, negative error code otherwise. ++/* ++ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller ++ * must have pre-validated the caching bits of the pgprot_t. + */ +-int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, +- unsigned long pfn, unsigned long size, pgprot_t prot) ++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, unsigned long size, pgprot_t prot) + { + pgd_t *pgd; + unsigned long next; + unsigned long end = addr + PAGE_ALIGN(size); + struct mm_struct *mm = vma->vm_mm; +- unsigned long remap_pfn = pfn; + int err; + + if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) +@@ -1966,10 +1957,6 @@ int remap_pfn_range(struct vm_area_struc + vma->vm_pgoff = pfn; + } + +- err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); +- if (err) +- return -EINVAL; +- + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + + BUG_ON(addr >= end); +@@ -1981,12 +1968,36 @@ int remap_pfn_range(struct vm_area_struc + err = remap_p4d_range(mm, pgd, addr, next, + pfn + (addr >> PAGE_SHIFT), prot); + if (err) +- break; ++ return err; + } while (pgd++, addr = next, addr != end); + ++ return 0; ++} ++ ++/** ++ * remap_pfn_range - remap kernel memory to userspace ++ * @vma: user vma to map to ++ * @addr: target page aligned user address to start at ++ * @pfn: page frame number of kernel physical memory address ++ * @size: size of mapping area ++ * @prot: page protection flags for this mapping ++ * ++ * Note: this is only safe if the mm semaphore is held when called. ++ * ++ * Return: %0 on success, negative error code otherwise. ++ */ ++int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn, unsigned long size, pgprot_t prot) ++{ ++ int err; ++ ++ err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); + if (err) +- untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); ++ return -EINVAL; + ++ err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); ++ if (err) ++ untrack_pfn(vma, pfn, PAGE_ALIGN(size)); + return err; + } + EXPORT_SYMBOL(remap_pfn_range); diff --git a/queue-5.4/mm-clarify-a-confusing-comment-for-remap_pfn_range.patch b/queue-5.4/mm-clarify-a-confusing-comment-for-remap_pfn_range.patch new file mode 100644 index 00000000000..cbd9b79c379 --- /dev/null +++ b/queue-5.4/mm-clarify-a-confusing-comment-for-remap_pfn_range.patch @@ -0,0 +1,37 @@ +From 86a76331d94c4cfa72fe1831dbe4b492f66fdb81 Mon Sep 17 00:00:00 2001 +From: WANG Wenhu +Date: Wed, 1 Apr 2020 21:09:03 -0700 +Subject: mm: clarify a confusing comment for remap_pfn_range() + +From: WANG Wenhu + +commit 86a76331d94c4cfa72fe1831dbe4b492f66fdb81 upstream. + +It really made me scratch my head. Replace the comment with an accurate +and consistent description. + +The parameter pfn actually refers to the page frame number which is +right-shifted by PAGE_SHIFT from the physical address. + +Signed-off-by: WANG Wenhu +Signed-off-by: Andrew Morton +Reviewed-by: Andrew Morton +Link: http://lkml.kernel.org/r/20200310073955.43415-1-wenhu.wang@vivo.com +Signed-off-by: Linus Torvalds +Signed-off-by: Harshvardhan Jha +Signed-off-by: Greg Kroah-Hartman +--- + mm/memory.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1921,7 +1921,7 @@ static inline int remap_p4d_range(struct + * remap_pfn_range - remap kernel memory to userspace + * @vma: user vma to map to + * @addr: target user address to start at +- * @pfn: physical address of kernel memory ++ * @pfn: page frame number of kernel physical memory address + * @size: size of map area + * @prot: page protection flags for this mapping + * diff --git a/queue-5.4/mm-fix-ambiguous-comments-for-better-code-readability.patch b/queue-5.4/mm-fix-ambiguous-comments-for-better-code-readability.patch new file mode 100644 index 00000000000..d995b847d05 --- /dev/null +++ b/queue-5.4/mm-fix-ambiguous-comments-for-better-code-readability.patch @@ -0,0 +1,52 @@ +From 552657b7b3343851916fde7e4fd6bfb6516d2bcb Mon Sep 17 00:00:00 2001 +From: chenqiwu +Date: Mon, 6 Apr 2020 20:08:33 -0700 +Subject: mm: fix ambiguous comments for better code readability + +From: chenqiwu + +commit 552657b7b3343851916fde7e4fd6bfb6516d2bcb upstream. + +The parameter of remap_pfn_range() @pfn passed from the caller is actually +a page-frame number converted by corresponding physical address of kernel +memory, the original comment is ambiguous that may mislead the users. + +Meanwhile, there is an ambiguous typo "VMM" in the comment of +vm_area_struct. So fixing them will make the code more readable. + +Signed-off-by: chenqiwu +Signed-off-by: Andrew Morton +Reviewed-by: Andrew Morton +Link: http://lkml.kernel.org/r/1583026921-15279-1-git-send-email-qiwuchen55@gmail.com +Signed-off-by: Linus Torvalds +Signed-off-by: Harshvardhan Jha +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/mm_types.h | 4 ++-- + mm/memory.c | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -284,8 +284,8 @@ struct vm_userfaultfd_ctx {}; + #endif /* CONFIG_USERFAULTFD */ + + /* +- * This struct defines a memory VMM memory area. There is one of these +- * per VM-area/task. A VM area is any part of the process virtual memory ++ * This struct describes a virtual memory area. There is one of these ++ * per VM-area/task. A VM area is any part of the process virtual memory + * space that has a special rule for the page-fault handlers (ie a shared + * library, the executable area etc). + */ +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1922,7 +1922,7 @@ static inline int remap_p4d_range(struct + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: page frame number of kernel physical memory address +- * @size: size of map area ++ * @size: size of mapping area + * @prot: page protection flags for this mapping + * + * Note: this is only safe if the mm semaphore is held when called. diff --git a/queue-5.4/mm-memory.c-make-remap_pfn_range-reject-unaligned-addr.patch b/queue-5.4/mm-memory.c-make-remap_pfn_range-reject-unaligned-addr.patch new file mode 100644 index 00000000000..d9cd4e7e1ae --- /dev/null +++ b/queue-5.4/mm-memory.c-make-remap_pfn_range-reject-unaligned-addr.patch @@ -0,0 +1,47 @@ +From 0c4123e3fb82d6014d0a70b52eb38153f658541c Mon Sep 17 00:00:00 2001 +From: Alex Zhang +Date: Thu, 6 Aug 2020 23:22:24 -0700 +Subject: mm/memory.c: make remap_pfn_range() reject unaligned addr + +From: Alex Zhang + +commit 0c4123e3fb82d6014d0a70b52eb38153f658541c upstream. + +This function implicitly assumes that the addr passed in is page aligned. +A non page aligned addr could ultimately cause a kernel bug in +remap_pte_range as the exit condition in the logic loop may never be +satisfied. This patch documents the need for the requirement, as well as +explicitly adds a check for it. + +Signed-off-by: Alex Zhang +Signed-off-by: Andrew Morton +Reviewed-by: Andrew Morton +Link: http://lkml.kernel.org/r/20200617233512.177519-1-zhangalex@google.com +Signed-off-by: Linus Torvalds +Signed-off-by: Harshvardhan Jha +Signed-off-by: Greg Kroah-Hartman +--- + mm/memory.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -1920,7 +1920,7 @@ static inline int remap_p4d_range(struct + /** + * remap_pfn_range - remap kernel memory to userspace + * @vma: user vma to map to +- * @addr: target user address to start at ++ * @addr: target page aligned user address to start at + * @pfn: page frame number of kernel physical memory address + * @size: size of mapping area + * @prot: page protection flags for this mapping +@@ -1939,6 +1939,9 @@ int remap_pfn_range(struct vm_area_struc + unsigned long remap_pfn = pfn; + int err; + ++ if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) ++ return -EINVAL; ++ + /* + * Physically remapped pages are special. Tell the + * rest of the world about it: diff --git a/queue-5.4/series b/queue-5.4/series index 95a1a90960d..4a88168102c 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -59,3 +59,7 @@ powerpc-powernv-free-name-on-error-in-opal_event_ini.patch fs-fix-uninitialized-value-issue-in-from_kuid-and-fr.patch net-usb-qmi_wwan-add-fibocom-fg132-0x0112-compositio.patch md-raid10-improve-code-of-mrdev-in-raid10_sync_request.patch +mm-clarify-a-confusing-comment-for-remap_pfn_range.patch +mm-fix-ambiguous-comments-for-better-code-readability.patch +mm-memory.c-make-remap_pfn_range-reject-unaligned-addr.patch +mm-add-remap_pfn_range_notrack.patch