]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Mar 2021 10:03:15 +0000 (11:03 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Mar 2021 10:03:15 +0000 (11:03 +0100)
added patches:
powerpc-603-fix-protection-of-user-pages-mapped-with-prot_none.patch

queue-5.4/powerpc-603-fix-protection-of-user-pages-mapped-with-prot_none.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/powerpc-603-fix-protection-of-user-pages-mapped-with-prot_none.patch b/queue-5.4/powerpc-603-fix-protection-of-user-pages-mapped-with-prot_none.patch
new file mode 100644 (file)
index 0000000..800f30a
--- /dev/null
@@ -0,0 +1,96 @@
+From c119565a15a628efdfa51352f9f6c5186e506a1c Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Mon, 1 Feb 2021 06:29:50 +0000
+Subject: powerpc/603: Fix protection of user pages mapped with PROT_NONE
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit c119565a15a628efdfa51352f9f6c5186e506a1c upstream.
+
+On book3s/32, page protection is defined by the PP bits in the PTE
+which provide the following protection depending on the access
+keys defined in the matching segment register:
+- PP 00 means RW with key 0 and N/A with key 1.
+- PP 01 means RW with key 0 and RO with key 1.
+- PP 10 means RW with both key 0 and key 1.
+- PP 11 means RO with both key 0 and key 1.
+
+Since the implementation of kernel userspace access protection,
+PP bits have been set as follows:
+- PP00 for pages without _PAGE_USER
+- PP01 for pages with _PAGE_USER and _PAGE_RW
+- PP11 for pages with _PAGE_USER and without _PAGE_RW
+
+For kernelspace segments, kernel accesses are performed with key 0
+and user accesses are performed with key 1. As PP00 is used for
+non _PAGE_USER pages, user can't access kernel pages not flagged
+_PAGE_USER while kernel can.
+
+For userspace segments, both kernel and user accesses are performed
+with key 0, therefore pages not flagged _PAGE_USER are still
+accessible to the user.
+
+This shouldn't be an issue, because userspace is expected to be
+accessible to the user. But unlike most other architectures, powerpc
+implements PROT_NONE protection by removing _PAGE_USER flag instead of
+flagging the page as not valid. This means that pages in userspace
+that are not flagged _PAGE_USER shall remain inaccessible.
+
+To get the expected behaviour, just mimic other architectures in the
+TLB miss handler by checking _PAGE_USER permission on userspace
+accesses as if it was the _PAGE_PRESENT bit.
+
+Note that this problem only is only for 603 cores. The 604+ have
+an hash table, and hash_page() function already implement the
+verification of _PAGE_USER permission on userspace pages.
+
+Fixes: f342adca3afc ("powerpc/32s: Prepare Kernel Userspace Access Protection")
+Cc: stable@vger.kernel.org # v5.2+
+Reported-by: Christoph Plattner <christoph.plattner@thalesgroup.com>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/4a0c6e3bb8f0c162457bf54d9bc6fd8d7b55129f.1612160907.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/head_32.S |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kernel/head_32.S
++++ b/arch/powerpc/kernel/head_32.S
+@@ -418,10 +418,11 @@ InstructionTLBMiss:
+       cmplw   0,r1,r3
+ #endif
+       mfspr   r2, SPRN_SPRG_PGDIR
+-      li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
++      li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
+ #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
+       bge-    112f
+       lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
++      li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+       addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
+ #endif
+ 112:  rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
+@@ -480,9 +481,10 @@ DataLoadTLBMiss:
+       lis     r1,PAGE_OFFSET@h                /* check if kernel address */
+       cmplw   0,r1,r3
+       mfspr   r2, SPRN_SPRG_PGDIR
+-      li      r1, _PAGE_PRESENT | _PAGE_ACCESSED
++      li      r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+       bge-    112f
+       lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
++      li      r1, _PAGE_PRESENT | _PAGE_ACCESSED
+       addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
+ 112:  rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
+       lwz     r2,0(r2)                /* get pmd entry */
+@@ -556,9 +558,10 @@ DataStoreTLBMiss:
+       lis     r1,PAGE_OFFSET@h                /* check if kernel address */
+       cmplw   0,r1,r3
+       mfspr   r2, SPRN_SPRG_PGDIR
+-      li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
++      li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+       bge-    112f
+       lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
++      li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
+       addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
+ 112:  rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
+       lwz     r2,0(r2)                /* get pmd entry */
index 6684ef28aa0967fef8a31ef5ae5557e332dfcfb5..091499e66e0ce1eb98d96f9ea75ca32e4e825767 100644 (file)
@@ -20,3 +20,4 @@ selftests-bpf-mask-bpf_csum_diff-return-value-to-16-bits-in-test_verifier.patch
 samples-bpf-add-missing-munmap-in-xdpsock.patch
 ibmvnic-always-store-valid-mac-address.patch
 mt76-dma-do-not-report-truncated-frames-to-mac80211.patch
+powerpc-603-fix-protection-of-user-pages-mapped-with-prot_none.patch