--- /dev/null
+From 74ffa5a3e68504dd289135b1cf0422c19ffb3f2e Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Thu, 29 Apr 2021 22:57:29 -0700
+Subject: mm: add remap_pfn_range_notrack
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 74ffa5a3e68504dd289135b1cf0422c19ffb3f2e upstream.
+
+Patch series "add remap_pfn_range_notrack instead of reinventing it in i915", v2.
+
+i915 has some reason to want to avoid the track_pfn_remap overhead in
+remap_pfn_range. Add a function to the core VM to do just that rather
+than reinventing the functionality poorly in the driver.
+
+Note that the remap_io_sg path does get exercises when using Xorg on my
+Thinkpad X1, so this should be considered lightly tested, I've not managed
+to hit the remap_io_mapping path at all.
+
+This patch (of 4):
+
+Add a version of remap_pfn_range that does not call track_pfn_range. This
+will be used to fix horrible abuses of VM internals in the i915 driver.
+
+Link: https://lkml.kernel.org/r/20210326055505.1424432-1-hch@lst.de
+Link: https://lkml.kernel.org/r/20210326055505.1424432-2-hch@lst.de
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+(cherry picked from commit 69d4e1ce9087c8767f2fe9b9426fa2755c8e9072)
+Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 2 ++
+ mm/memory.c | 51 +++++++++++++++++++++++++++++++--------------------
+ 2 files changed, 33 insertions(+), 20 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2566,6 +2566,8 @@ unsigned long change_prot_numa(struct vm
+ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot);
+ int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+ int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+ unsigned long num);
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1917,26 +1917,17 @@ static inline int remap_p4d_range(struct
+ return 0;
+ }
+
+-/**
+- * remap_pfn_range - remap kernel memory to userspace
+- * @vma: user vma to map to
+- * @addr: target page aligned user address to start at
+- * @pfn: page frame number of kernel physical memory address
+- * @size: size of mapping area
+- * @prot: page protection flags for this mapping
+- *
+- * Note: this is only safe if the mm semaphore is held when called.
+- *
+- * Return: %0 on success, negative error code otherwise.
++/*
++ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
++ * must have pre-validated the caching bits of the pgprot_t.
+ */
+-int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+- unsigned long pfn, unsigned long size, pgprot_t prot)
++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot)
+ {
+ pgd_t *pgd;
+ unsigned long next;
+ unsigned long end = addr + PAGE_ALIGN(size);
+ struct mm_struct *mm = vma->vm_mm;
+- unsigned long remap_pfn = pfn;
+ int err;
+
+ if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
+@@ -1966,10 +1957,6 @@ int remap_pfn_range(struct vm_area_struc
+ vma->vm_pgoff = pfn;
+ }
+
+- err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
+- if (err)
+- return -EINVAL;
+-
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+ BUG_ON(addr >= end);
+@@ -1981,12 +1968,36 @@ int remap_pfn_range(struct vm_area_struc
+ err = remap_p4d_range(mm, pgd, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot);
+ if (err)
+- break;
++ return err;
+ } while (pgd++, addr = next, addr != end);
+
++ return 0;
++}
++
++/**
++ * remap_pfn_range - remap kernel memory to userspace
++ * @vma: user vma to map to
++ * @addr: target page aligned user address to start at
++ * @pfn: page frame number of kernel physical memory address
++ * @size: size of mapping area
++ * @prot: page protection flags for this mapping
++ *
++ * Note: this is only safe if the mm semaphore is held when called.
++ *
++ * Return: %0 on success, negative error code otherwise.
++ */
++int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot)
++{
++ int err;
++
++ err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
+ if (err)
+- untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
++ return -EINVAL;
+
++ err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
++ if (err)
++ untrack_pfn(vma, pfn, PAGE_ALIGN(size));
+ return err;
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
--- /dev/null
+From 86a76331d94c4cfa72fe1831dbe4b492f66fdb81 Mon Sep 17 00:00:00 2001
+From: WANG Wenhu <wenhu.wang@vivo.com>
+Date: Wed, 1 Apr 2020 21:09:03 -0700
+Subject: mm: clarify a confusing comment for remap_pfn_range()
+
+From: WANG Wenhu <wenhu.wang@vivo.com>
+
+commit 86a76331d94c4cfa72fe1831dbe4b492f66fdb81 upstream.
+
+It really made me scratch my head. Replace the comment with an accurate
+and consistent description.
+
+The parameter pfn actually refers to the page frame number which is
+right-shifted by PAGE_SHIFT from the physical address.
+
+Signed-off-by: WANG Wenhu <wenhu.wang@vivo.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Link: http://lkml.kernel.org/r/20200310073955.43415-1-wenhu.wang@vivo.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1921,7 +1921,7 @@ static inline int remap_p4d_range(struct
+ * remap_pfn_range - remap kernel memory to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+- * @pfn: physical address of kernel memory
++ * @pfn: page frame number of kernel physical memory address
+ * @size: size of map area
+ * @prot: page protection flags for this mapping
+ *
--- /dev/null
+From 552657b7b3343851916fde7e4fd6bfb6516d2bcb Mon Sep 17 00:00:00 2001
+From: chenqiwu <chenqiwu@xiaomi.com>
+Date: Mon, 6 Apr 2020 20:08:33 -0700
+Subject: mm: fix ambiguous comments for better code readability
+
+From: chenqiwu <chenqiwu@xiaomi.com>
+
+commit 552657b7b3343851916fde7e4fd6bfb6516d2bcb upstream.
+
+The parameter of remap_pfn_range() @pfn passed from the caller is actually
+a page-frame number converted by corresponding physical address of kernel
+memory, the original comment is ambiguous that may mislead the users.
+
+Meanwhile, there is an ambiguous typo "VMM" in the comment of
+vm_area_struct. So fixing them will make the code more readable.
+
+Signed-off-by: chenqiwu <chenqiwu@xiaomi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Link: http://lkml.kernel.org/r/1583026921-15279-1-git-send-email-qiwuchen55@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm_types.h | 4 ++--
+ mm/memory.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -284,8 +284,8 @@ struct vm_userfaultfd_ctx {};
+ #endif /* CONFIG_USERFAULTFD */
+
+ /*
+- * This struct defines a memory VMM memory area. There is one of these
+- * per VM-area/task. A VM area is any part of the process virtual memory
++ * This struct describes a virtual memory area. There is one of these
++ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1922,7 +1922,7 @@ static inline int remap_p4d_range(struct
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: page frame number of kernel physical memory address
+- * @size: size of map area
++ * @size: size of mapping area
+ * @prot: page protection flags for this mapping
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
--- /dev/null
+From 0c4123e3fb82d6014d0a70b52eb38153f658541c Mon Sep 17 00:00:00 2001
+From: Alex Zhang <zhangalex@google.com>
+Date: Thu, 6 Aug 2020 23:22:24 -0700
+Subject: mm/memory.c: make remap_pfn_range() reject unaligned addr
+
+From: Alex Zhang <zhangalex@google.com>
+
+commit 0c4123e3fb82d6014d0a70b52eb38153f658541c upstream.
+
+This function implicitly assumes that the addr passed in is page aligned.
+A non page aligned addr could ultimately cause a kernel bug in
+remap_pte_range as the exit condition in the logic loop may never be
+satisfied. This patch documents the need for the requirement, as well as
+explicitly adds a check for it.
+
+Signed-off-by: Alex Zhang <zhangalex@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Link: http://lkml.kernel.org/r/20200617233512.177519-1-zhangalex@google.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1920,7 +1920,7 @@ static inline int remap_p4d_range(struct
+ /**
+ * remap_pfn_range - remap kernel memory to userspace
+ * @vma: user vma to map to
+- * @addr: target user address to start at
++ * @addr: target page aligned user address to start at
+ * @pfn: page frame number of kernel physical memory address
+ * @size: size of mapping area
+ * @prot: page protection flags for this mapping
+@@ -1939,6 +1939,9 @@ int remap_pfn_range(struct vm_area_struc
+ unsigned long remap_pfn = pfn;
+ int err;
+
++ if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
++ return -EINVAL;
++
+ /*
+ * Physically remapped pages are special. Tell the
+ * rest of the world about it: