]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 Apr 2022 16:41:53 +0000 (18:41 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 11 Apr 2022 16:41:53 +0000 (18:41 +0200)
added patches:
powerpc-fix-virt_addr_valid-for-64-bit-book3e-32-bit.patch

queue-5.17/powerpc-fix-virt_addr_valid-for-64-bit-book3e-32-bit.patch [new file with mode: 0644]
queue-5.17/series

diff --git a/queue-5.17/powerpc-fix-virt_addr_valid-for-64-bit-book3e-32-bit.patch b/queue-5.17/powerpc-fix-virt_addr_valid-for-64-bit-book3e-32-bit.patch
new file mode 100644 (file)
index 0000000..0a9395c
--- /dev/null
@@ -0,0 +1,91 @@
+From ffa0b64e3be58519ae472ea29a1a1ad681e32f48 Mon Sep 17 00:00:00 2001
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Date: Thu, 7 Apr 2022 00:57:57 +1000
+Subject: powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+commit ffa0b64e3be58519ae472ea29a1a1ad681e32f48 upstream.
+
+mpe: On 64-bit Book3E vmalloc space starts at 0x8000000000000000.
+
+Because of the way __pa() works we have:
+  __pa(0x8000000000000000) == 0, and therefore
+  virt_to_pfn(0x8000000000000000) == 0, and therefore
+  virt_addr_valid(0x8000000000000000) == true
+
+Which is wrong, virt_addr_valid() should be false for vmalloc space.
+In fact all vmalloc addresses that alias with a valid PFN will return
+true from virt_addr_valid(). That can cause bugs with hardened usercopy
+as described below by Kefeng Wang:
+
+  When running ethtool eth0 on 64-bit Book3E, a BUG occurred:
+
+    usercopy: Kernel memory exposure attempt detected from SLUB object not in SLUB page?! (offset 0, size 1048)!
+    kernel BUG at mm/usercopy.c:99
+    ...
+    usercopy_abort+0x64/0xa0 (unreliable)
+    __check_heap_object+0x168/0x190
+    __check_object_size+0x1a0/0x200
+    dev_ethtool+0x2494/0x2b20
+    dev_ioctl+0x5d0/0x770
+    sock_do_ioctl+0xf0/0x1d0
+    sock_ioctl+0x3ec/0x5a0
+    __se_sys_ioctl+0xf0/0x160
+    system_call_exception+0xfc/0x1f0
+    system_call_common+0xf8/0x200
+
+  The code shows below,
+
+    data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+    copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+
+  The data is alloced by vmalloc(), virt_addr_valid(ptr) will return true
+  on 64-bit Book3E, which leads to the panic.
+
+  As commit 4dd7554a6456 ("powerpc/64: Add VIRTUAL_BUG_ON checks for __va
+  and __pa addresses") does, make sure the virt addr above PAGE_OFFSET in
+  the virt_addr_valid() for 64-bit, also add upper limit check to make
+  sure the virt is below high_memory.
+
+  Meanwhile, for 32-bit PAGE_OFFSET is the virtual address of the start
+  of lowmem, high_memory is the upper low virtual address, the check is
+  suitable for 32-bit, this will fix the issue mentioned in commit
+  602946ec2f90 ("powerpc: Set max_mapnr correctly") too.
+
+On 32-bit there is a similar problem with high memory, that was fixed in
+commit 602946ec2f90 ("powerpc: Set max_mapnr correctly"), but that
+commit breaks highmem and needs to be reverted.
+
+We can't easily fix __pa(), we have code that relies on its current
+behaviour. So for now add extra checks to virt_addr_valid().
+
+For 64-bit Book3S the extra checks are not necessary, the combination of
+virt_to_pfn() and pfn_valid() should yield the correct result, but they
+are harmless.
+
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+[mpe: Add additional change log detail]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220406145802.538416-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/page.h |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned lo
+ #define virt_to_page(kaddr)   pfn_to_page(virt_to_pfn(kaddr))
+ #define pfn_to_kaddr(pfn)     __va((pfn) << PAGE_SHIFT)
+-#define virt_addr_valid(kaddr)        pfn_valid(virt_to_pfn(kaddr))
++#define virt_addr_valid(vaddr)        ({                                      \
++      unsigned long _addr = (unsigned long)vaddr;                     \
++      _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory &&   \
++      pfn_valid(virt_to_pfn(_addr));                                  \
++})
+ /*
+  * On Book-E parts we need __va to parse the device tree and we can't
index 648c4e132b4b6800467039fc507f87f3383b2a65..c4bacf0ffceb6c87ee52ccb33ce61abf31ffc8f3 100644 (file)
@@ -324,6 +324,7 @@ tools-build-use-shell-instead-of-to-get-embedded-libperl-s-ccopts.patch
 dmaengine-revert-dmaengine-shdma-fix-runtime-pm-imbalance-on-error.patch
 kvm-avoid-null-pointer-dereference-in-kvm_dirty_ring_push.patch
 drivers-hv-vmbus-replace-smp_store_mb-with-virt_store_mb.patch
+powerpc-fix-virt_addr_valid-for-64-bit-book3e-32-bit.patch
 revert-powerpc-set-max_mapnr-correctly.patch
 x86-bug-prevent-shadowing-in-__warn_flags.patch
 objtool-fix-sls-validation-for-kcov-tail-call-replacement.patch