--- /dev/null
+From 35770ca6180caa24a2b258c99a87bd437a1ee10f Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Wed, 11 Sep 2024 17:11:23 -0700
+Subject: mm: avoid leaving partial pfn mappings around in error case
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 79a61cc3fc0466ad2b7b89618a6157785f0293b3 upstream.
+
+As Jann points out, PFN mappings are special, because unlike normal
+memory mappings, there is no lifetime information associated with the
+mapping - it is just a raw mapping of PFNs with no reference counting of
+a 'struct page'.
+
+That's all very much intentional, but it does mean that it's easy to
+mess up the cleanup in case of errors. Yes, a failed mmap() will always
+eventually clean up any partial mappings, but without any explicit
+lifetime in the page table mapping itself, it's very easy to do the
+error handling in the wrong order.
+
+In particular, it's easy to mistakenly free the physical backing store
+before the page tables are actually cleaned up and (temporarily) have
+stale dangling PTE entries.
+
+To make this situation less error-prone, just make sure that any partial
+pfn mapping is torn down early, before any other error handling.
+
+Reported-and-tested-by: Jann Horn <jannh@google.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Simona Vetter <simona.vetter@ffwll.ch>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Harshvardhan Jha <harshvardhan.j.jha@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1917,11 +1917,7 @@ static inline int remap_p4d_range(struct
+ return 0;
+ }
+
+-/*
+- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
+- * must have pre-validated the caching bits of the pgprot_t.
+- */
+-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+ {
+ pgd_t *pgd;
+@@ -1974,6 +1970,27 @@ int remap_pfn_range_notrack(struct vm_ar
+ return 0;
+ }
+
++/*
++ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
++ * must have pre-validated the caching bits of the pgprot_t.
++ */
++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot)
++{
++ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
++
++ if (!error)
++ return 0;
++
++ /*
++ * A partial pfn range mapping is dangerous: it does not
++ * maintain page reference counts, and callers may free
++ * pages due to the error. So zap it early.
++ */
++ zap_page_range_single(vma, addr, size, NULL);
++ return error;
++}
++
+ /**
+ * remap_pfn_range - remap kernel memory to userspace
+ * @vma: user vma to map to