]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 31 Aug 2018 20:14:02 +0000 (13:14 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 31 Aug 2018 20:14:02 +0000 (13:14 -0700)
added patches:
arm64-mm-check-for-upper-page_shift-bits-in-pfn_valid.patch

queue-3.18/arm64-mm-check-for-upper-page_shift-bits-in-pfn_valid.patch [new file with mode: 0644]
queue-3.18/series

diff --git a/queue-3.18/arm64-mm-check-for-upper-page_shift-bits-in-pfn_valid.patch b/queue-3.18/arm64-mm-check-for-upper-page_shift-bits-in-pfn_valid.patch
new file mode 100644 (file)
index 0000000..989df20
--- /dev/null
@@ -0,0 +1,59 @@
+From 5ad356eabc47d26a92140a0c4b20eba471c10de3 Mon Sep 17 00:00:00 2001
+From: Greg Hackmann <ghackmann@android.com>
+Date: Wed, 15 Aug 2018 12:51:21 -0700
+Subject: arm64: mm: check for upper PAGE_SHIFT bits in pfn_valid()
+
+From: Greg Hackmann <ghackmann@android.com>
+
+commit 5ad356eabc47d26a92140a0c4b20eba471c10de3 upstream.
+
+ARM64's pfn_valid() shifts away the upper PAGE_SHIFT bits of the input
+before seeing if the PFN is valid.  This leads to false positives when
+some of the upper bits are set, but the lower bits match a valid PFN.
+
+For example, the following userspace code looks up a bogus entry in
+/proc/kpageflags:
+
+    int pagemap = open("/proc/self/pagemap", O_RDONLY);
+    int pageflags = open("/proc/kpageflags", O_RDONLY);
+    uint64_t pfn, val;
+
+    lseek64(pagemap, [...], SEEK_SET);
+    read(pagemap, &pfn, sizeof(pfn));
+    if (pfn & (1UL << 63)) {        /* valid PFN */
+        pfn &= ((1UL << 55) - 1);   /* clear flag bits */
+        pfn |= (1UL << 55);
+        lseek64(pageflags, pfn * sizeof(uint64_t), SEEK_SET);
+        read(pageflags, &val, sizeof(val));
+    }
+
+On ARM64 this causes the userspace process to crash with SIGSEGV rather
+than reading (1 << KPF_NOPAGE).  kpageflags_read() treats the offset as
+valid, and stable_page_flags() will try to access an address between the
+user and kernel address ranges.
+
+Fixes: c1cc1552616d ("arm64: MMU initialisation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/init.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -116,7 +116,11 @@ static void __init zone_sizes_init(unsig
+ #ifdef CONFIG_HAVE_ARCH_PFN_VALID
+ int pfn_valid(unsigned long pfn)
+ {
+-      return memblock_is_memory(pfn << PAGE_SHIFT);
++      phys_addr_t addr = pfn << PAGE_SHIFT;
++
++      if ((addr >> PAGE_SHIFT) != pfn)
++              return 0;
++      return memblock_is_memory(addr);
+ }
+ EXPORT_SYMBOL(pfn_valid);
+ #endif
index 13208416233c3e9e5f4c3ff682895fb2c3c3f833..2a503485cedab87a21cf2b3ca36f93d628cbb4d9 100644 (file)
@@ -36,3 +36,4 @@ mm-memory.c-check-return-value-of-ioremap_prot.patch
 btrfs-don-t-leak-ret-from-do_chunk_alloc.patch
 s390-kvm-fix-deadlock-when-killed-by-oom.patch
 ext4-reset-error-code-in-ext4_find_entry-in-fallback.patch
+arm64-mm-check-for-upper-page_shift-bits-in-pfn_valid.patch