From: Greg Kroah-Hartman Date: Fri, 20 Nov 2020 07:19:43 +0000 (+0100) Subject: 4.19-stable patches X-Git-Tag: v4.4.245~22 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=2f88201543e07735dea132d40501b17f2bb30a3f;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: powerpc-64s-flush-l1d-after-user-accesses.patch powerpc-64s-flush-l1d-on-kernel-entry.patch powerpc-64s-move-some-exception-handlers-out-of-line.patch powerpc-add-a-framework-for-user-access-tracking.patch powerpc-fix-__clear_user-with-kuap-enabled.patch powerpc-implement-user_access_begin-and-friends.patch powerpc-uaccess-evaluate-macro-arguments-once-before-user-access-is-allowed.patch --- diff --git a/queue-4.19/powerpc-64s-flush-l1d-after-user-accesses.patch b/queue-4.19/powerpc-64s-flush-l1d-after-user-accesses.patch new file mode 100644 index 00000000000..f5f837d8121 --- /dev/null +++ b/queue-4.19/powerpc-64s-flush-l1d-after-user-accesses.patch @@ -0,0 +1,506 @@ +From foo@baz Fri Nov 20 08:17:41 AM CET 2020 +From: Daniel Axtens +Date: Fri, 20 Nov 2020 10:42:03 +1100 +Subject: powerpc/64s: flush L1D after user accesses +To: stable@vger.kernel.org +Cc: dja@axtens.net +Message-ID: <20201119234203.370400-8-dja@axtens.net> + +From: Nicholas Piggin + +commit 9a32a7e78bd0cd9a9b6332cbdc345ee5ffd0c5de upstream. + +IBM Power9 processors can speculatively operate on data in the L1 cache before +it has been completely validated, via a way-prediction mechanism. It is not possible +for an attacker to determine the contents of impermissible memory using this method, +since these systems implement a combination of hardware and software security measures +to prevent scenarios where protected data could be leaked. + +However these measures don't address the scenario where an attacker induces +the operating system to speculatively execute instructions using data that the +attacker controls. This can be used for example to speculatively bypass "kernel +user access prevention" techniques, as discovered by Anthony Steinhauser of +Google's Safeside Project. This is not an attack by itself, but there is a possibility +it could be used in conjunction with side-channels or other weaknesses in the +privileged code to construct an attack. + +This issue can be mitigated by flushing the L1 cache between privilege boundaries +of concern. This patch flushes the L1 cache after user accesses. + +This is part of the fix for CVE-2020-4788. + +Signed-off-by: Nicholas Piggin +Signed-off-by: Daniel Axtens +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/kernel-parameters.txt | 4 + + arch/powerpc/include/asm/book3s/64/kup-radix.h | 22 ++++++ + arch/powerpc/include/asm/feature-fixups.h | 9 ++ + arch/powerpc/include/asm/kup.h | 4 + + arch/powerpc/include/asm/security_features.h | 3 + arch/powerpc/include/asm/setup.h | 1 + arch/powerpc/kernel/exceptions-64s.S | 81 +++++++----------------- + arch/powerpc/kernel/setup_64.c | 62 ++++++++++++++++++ + arch/powerpc/kernel/vmlinux.lds.S | 7 ++ + arch/powerpc/lib/feature-fixups.c | 50 ++++++++++++++ + arch/powerpc/platforms/powernv/setup.c | 10 ++ + arch/powerpc/platforms/pseries/setup.c | 4 + + 12 files changed, 198 insertions(+), 59 deletions(-) + create mode 100644 arch/powerpc/include/asm/book3s/64/kup-radix.h + +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2561,6 +2561,7 @@ + tsx_async_abort=off [X86] + kvm.nx_huge_pages=off [X86] + no_entry_flush [PPC] ++ no_uaccess_flush [PPC] + + Exceptions: + This does not have any effect on +@@ -2922,6 +2923,9 @@ + nospec_store_bypass_disable + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability + ++ no_uaccess_flush ++ [PPC] Don't flush the L1-D cache after accessing user data. ++ + noxsave [BUGS=X86] Disables x86 extended register state save + and restore using xsave. The kernel will fallback to + enabling legacy floating-point and sse state. +--- /dev/null ++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h +@@ -0,0 +1,22 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H ++#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H ++ ++DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); ++ ++/* Prototype for function defined in exceptions-64s.S */ ++void do_uaccess_flush(void); ++ ++static __always_inline void allow_user_access(void __user *to, const void __user *from, ++ unsigned long size) ++{ ++} ++ ++static inline void prevent_user_access(void __user *to, const void __user *from, ++ unsigned long size) ++{ ++ if (static_branch_unlikely(&uaccess_flush_key)) ++ do_uaccess_flush(); ++} ++ ++#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */ +--- a/arch/powerpc/include/asm/feature-fixups.h ++++ b/arch/powerpc/include/asm/feature-fixups.h +@@ -205,6 +205,14 @@ label##3: \ + FTR_ENTRY_OFFSET 955b-956b; \ + .popsection; + ++#define UACCESS_FLUSH_FIXUP_SECTION \ ++959: \ ++ .pushsection __uaccess_flush_fixup,"a"; \ ++ .align 2; \ ++960: \ ++ FTR_ENTRY_OFFSET 959b-960b; \ ++ .popsection; ++ + #define ENTRY_FLUSH_FIXUP_SECTION \ + 957: \ + .pushsection __entry_flush_fixup,"a"; \ +@@ -248,6 +256,7 @@ extern long stf_barrier_fallback; + extern long entry_flush_fallback; + extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; + extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; ++extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup; + extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; +--- a/arch/powerpc/include/asm/kup.h ++++ b/arch/powerpc/include/asm/kup.h +@@ -6,10 +6,14 @@ + + #include + ++#ifdef CONFIG_PPC_BOOK3S_64 ++#include ++#else + static inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size) { } + static inline void prevent_user_access(void __user *to, const void __user *from, + unsigned long size) { } ++#endif /* CONFIG_PPC_BOOK3S_64 */ + + static inline void allow_read_from_user(const void __user *from, unsigned long size) + { +--- a/arch/powerpc/include/asm/security_features.h ++++ b/arch/powerpc/include/asm/security_features.h +@@ -87,6 +87,8 @@ static inline bool security_ftr_enabled( + // The L1-D cache should be flushed when entering the kernel + #define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull + ++// The L1-D cache should be flushed after user accesses from the kernel ++#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull + + // Features enabled by default + #define SEC_FTR_DEFAULT \ +@@ -94,6 +96,7 @@ static inline bool security_ftr_enabled( + SEC_FTR_L1D_FLUSH_PR | \ + SEC_FTR_BNDS_CHK_SPEC_BAR | \ + SEC_FTR_L1D_FLUSH_ENTRY | \ ++ SEC_FTR_L1D_FLUSH_UACCESS | \ + SEC_FTR_FAVOUR_SECURITY) + + #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -60,6 +60,7 @@ void setup_barrier_nospec(void); + #else + static inline void setup_barrier_nospec(void) { }; + #endif ++void do_uaccess_flush_fixups(enum l1d_flush_type types); + void do_entry_flush_fixups(enum l1d_flush_type types); + void do_barrier_nospec_fixups(bool enable); + extern bool barrier_nospec_enabled; +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -1529,11 +1529,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback) + .endr + blr + +-TRAMP_REAL_BEGIN(entry_flush_fallback) +- std r9,PACA_EXRFI+EX_R9(r13) +- std r10,PACA_EXRFI+EX_R10(r13) +- std r11,PACA_EXRFI+EX_R11(r13) +- mfctr r9 ++/* Clobbers r10, r11, ctr */ ++.macro L1D_DISPLACEMENT_FLUSH + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SIZE(r13) + srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ +@@ -1559,7 +1556,14 @@ TRAMP_REAL_BEGIN(entry_flush_fallback) + ld r11,(0x80 + 8)*7(r10) + addi r10,r10,0x80*8 + bdnz 1b ++.endm + ++TRAMP_REAL_BEGIN(entry_flush_fallback) ++ std r9,PACA_EXRFI+EX_R9(r13) ++ std r10,PACA_EXRFI+EX_R10(r13) ++ std r11,PACA_EXRFI+EX_R11(r13) ++ mfctr r9 ++ L1D_DISPLACEMENT_FLUSH + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) +@@ -1575,32 +1579,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 +- ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) +- ld r11,PACA_L1D_FLUSH_SIZE(r13) +- srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ +- mtctr r11 +- DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ +- +- /* order ld/st prior to dcbt stop all streams with flushing */ +- sync +- +- /* +- * The load adresses are at staggered offsets within cachelines, +- * which suits some pipelines better (on others it should not +- * hurt). +- */ +-1: +- ld r11,(0x80 + 8)*0(r10) +- ld r11,(0x80 + 8)*1(r10) +- ld r11,(0x80 + 8)*2(r10) +- ld r11,(0x80 + 8)*3(r10) +- ld r11,(0x80 + 8)*4(r10) +- ld r11,(0x80 + 8)*5(r10) +- ld r11,(0x80 + 8)*6(r10) +- ld r11,(0x80 + 8)*7(r10) +- addi r10,r10,0x80*8 +- bdnz 1b +- ++ L1D_DISPLACEMENT_FLUSH + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) +@@ -1618,32 +1597,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 +- ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) +- ld r11,PACA_L1D_FLUSH_SIZE(r13) +- srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ +- mtctr r11 +- DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ +- +- /* order ld/st prior to dcbt stop all streams with flushing */ +- sync +- +- /* +- * The load adresses are at staggered offsets within cachelines, +- * which suits some pipelines better (on others it should not +- * hurt). +- */ +-1: +- ld r11,(0x80 + 8)*0(r10) +- ld r11,(0x80 + 8)*1(r10) +- ld r11,(0x80 + 8)*2(r10) +- ld r11,(0x80 + 8)*3(r10) +- ld r11,(0x80 + 8)*4(r10) +- ld r11,(0x80 + 8)*5(r10) +- ld r11,(0x80 + 8)*6(r10) +- ld r11,(0x80 + 8)*7(r10) +- addi r10,r10,0x80*8 +- bdnz 1b +- ++ L1D_DISPLACEMENT_FLUSH + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) +@@ -1652,6 +1606,19 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) + GET_SCRATCH0(r13); + hrfid + ++USE_TEXT_SECTION() ++ ++_GLOBAL(do_uaccess_flush) ++ UACCESS_FLUSH_FIXUP_SECTION ++ nop ++ nop ++ nop ++ blr ++ L1D_DISPLACEMENT_FLUSH ++ blr ++_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) ++EXPORT_SYMBOL(do_uaccess_flush) ++ + /* + * Real mode exceptions actually use this too, but alternate + * instruction code patches (which end up in the common .text area) +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -864,8 +864,12 @@ static enum l1d_flush_type enabled_flush + static void *l1d_flush_fallback_area; + static bool no_rfi_flush; + static bool no_entry_flush; ++static bool no_uaccess_flush; + bool rfi_flush; + bool entry_flush; ++bool uaccess_flush; ++DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); ++EXPORT_SYMBOL(uaccess_flush_key); + + static int __init handle_no_rfi_flush(char *p) + { +@@ -883,6 +887,14 @@ static int __init handle_no_entry_flush( + } + early_param("no_entry_flush", handle_no_entry_flush); + ++static int __init handle_no_uaccess_flush(char *p) ++{ ++ pr_info("uaccess-flush: disabled on command line."); ++ no_uaccess_flush = true; ++ return 0; ++} ++early_param("no_uaccess_flush", handle_no_uaccess_flush); ++ + /* + * The RFI flush is not KPTI, but because users will see doco that says to use + * nopti we hijack that option here to also disable the RFI flush. +@@ -926,6 +938,20 @@ void entry_flush_enable(bool enable) + entry_flush = enable; + } + ++void uaccess_flush_enable(bool enable) ++{ ++ if (enable) { ++ do_uaccess_flush_fixups(enabled_flush_types); ++ static_branch_enable(&uaccess_flush_key); ++ on_each_cpu(do_nothing, NULL, 1); ++ } else { ++ static_branch_disable(&uaccess_flush_key); ++ do_uaccess_flush_fixups(L1D_FLUSH_NONE); ++ } ++ ++ uaccess_flush = enable; ++} ++ + static void __ref init_fallback_flush(void) + { + u64 l1d_size, limit; +@@ -992,6 +1018,15 @@ void setup_entry_flush(bool enable) + entry_flush_enable(enable); + } + ++void setup_uaccess_flush(bool enable) ++{ ++ if (cpu_mitigations_off()) ++ return; ++ ++ if (!no_uaccess_flush) ++ uaccess_flush_enable(enable); ++} ++ + #ifdef CONFIG_DEBUG_FS + static int rfi_flush_set(void *data, u64 val) + { +@@ -1045,10 +1080,37 @@ static int entry_flush_get(void *data, u + + DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); + ++static int uaccess_flush_set(void *data, u64 val) ++{ ++ bool enable; ++ ++ if (val == 1) ++ enable = true; ++ else if (val == 0) ++ enable = false; ++ else ++ return -EINVAL; ++ ++ /* Only do anything if we're changing state */ ++ if (enable != uaccess_flush) ++ uaccess_flush_enable(enable); ++ ++ return 0; ++} ++ ++static int uaccess_flush_get(void *data, u64 *val) ++{ ++ *val = uaccess_flush ? 1 : 0; ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); ++ + static __init int rfi_flush_debugfs_init(void) + { + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); + debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); ++ debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); + return 0; + } + device_initcall(rfi_flush_debugfs_init); +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -141,6 +141,13 @@ SECTIONS + } + + . = ALIGN(8); ++ __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) { ++ __start___uaccess_flush_fixup = .; ++ *(__uaccess_flush_fixup) ++ __stop___uaccess_flush_fixup = .; ++ } ++ ++ . = ALIGN(8); + __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { + __start___entry_flush_fixup = .; + *(__entry_flush_fixup) +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -232,6 +232,56 @@ void do_stf_barrier_fixups(enum stf_barr + do_stf_exit_barrier_fixups(types); + } + ++void do_uaccess_flush_fixups(enum l1d_flush_type types) ++{ ++ unsigned int instrs[4], *dest; ++ long *start, *end; ++ int i; ++ ++ start = PTRRELOC(&__start___uaccess_flush_fixup); ++ end = PTRRELOC(&__stop___uaccess_flush_fixup); ++ ++ instrs[0] = 0x60000000; /* nop */ ++ instrs[1] = 0x60000000; /* nop */ ++ instrs[2] = 0x60000000; /* nop */ ++ instrs[3] = 0x4e800020; /* blr */ ++ ++ i = 0; ++ if (types == L1D_FLUSH_FALLBACK) { ++ instrs[3] = 0x60000000; /* nop */ ++ /* fallthrough to fallback flush */ ++ } ++ ++ if (types & L1D_FLUSH_ORI) { ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ ++ } ++ ++ if (types & L1D_FLUSH_MTTRIG) ++ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ ++ patch_instruction(dest, instrs[0]); ++ ++ patch_instruction((dest + 1), instrs[1]); ++ patch_instruction((dest + 2), instrs[2]); ++ patch_instruction((dest + 3), instrs[3]); ++ } ++ ++ printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, ++ (types == L1D_FLUSH_NONE) ? "no" : ++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : ++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ++ ? "ori+mttrig type" ++ : "ori type" : ++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type" ++ : "unknown"); ++} ++ + void do_entry_flush_fixups(enum l1d_flush_type types) + { + unsigned int instrs[3], *dest; +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -127,10 +127,12 @@ static void pnv_setup_rfi_flush(void) + + /* + * If we are non-Power9 bare metal, we don't need to flush on kernel +- * entry: it fixes a P9 specific vulnerability. ++ * entry or after user access: they fix a P9 specific vulnerability. + */ +- if (!pvr_version_is(PVR_POWER9)) ++ if (!pvr_version_is(PVR_POWER9)) { + security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); ++ } + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ +@@ -142,6 +144,10 @@ static void pnv_setup_rfi_flush(void) + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); + setup_entry_flush(enable); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); ++ setup_uaccess_flush(enable); + } + + static void __init pnv_setup_arch(void) +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -569,6 +569,10 @@ void pseries_setup_rfi_flush(void) + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); + setup_entry_flush(enable); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); ++ setup_uaccess_flush(enable); + } + + #ifdef CONFIG_PCI_IOV diff --git a/queue-4.19/powerpc-64s-flush-l1d-on-kernel-entry.patch b/queue-4.19/powerpc-64s-flush-l1d-on-kernel-entry.patch new file mode 100644 index 00000000000..530e0677b58 --- /dev/null +++ b/queue-4.19/powerpc-64s-flush-l1d-on-kernel-entry.patch @@ -0,0 +1,457 @@ +From foo@baz Fri Nov 20 08:17:41 AM CET 2020 +From: Daniel Axtens +Date: Fri, 20 Nov 2020 10:41:58 +1100 +Subject: powerpc/64s: flush L1D on kernel entry +To: stable@vger.kernel.org +Cc: dja@axtens.net +Message-ID: <20201119234203.370400-3-dja@axtens.net> + +From: Nicholas Piggin + +commit f79643787e0a0762d2409b7b8334e83f22d85695 upstream. + +IBM Power9 processors can speculatively operate on data in the L1 cache before +it has been completely validated, via a way-prediction mechanism. It is not possible +for an attacker to determine the contents of impermissible memory using this method, +since these systems implement a combination of hardware and software security measures +to prevent scenarios where protected data could be leaked. + +However these measures don't address the scenario where an attacker induces +the operating system to speculatively execute instructions using data that the +attacker controls. This can be used for example to speculatively bypass "kernel +user access prevention" techniques, as discovered by Anthony Steinhauser of +Google's Safeside Project. This is not an attack by itself, but there is a possibility +it could be used in conjunction with side-channels or other weaknesses in the +privileged code to construct an attack. + +This issue can be mitigated by flushing the L1 cache between privilege boundaries +of concern. This patch flushes the L1 cache on kernel entry. + +This is part of the fix for CVE-2020-4788. + +Signed-off-by: Nicholas Piggin +Signed-off-by: Daniel Axtens +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/kernel-parameters.txt | 3 + + arch/powerpc/include/asm/exception-64s.h | 9 +++ + arch/powerpc/include/asm/feature-fixups.h | 10 ++++ + arch/powerpc/include/asm/security_features.h | 4 + + arch/powerpc/include/asm/setup.h | 3 + + arch/powerpc/kernel/exceptions-64s.S | 47 ++++++++++++++++-- + arch/powerpc/kernel/setup_64.c | 60 +++++++++++++++++++++++- + arch/powerpc/kernel/vmlinux.lds.S | 7 ++ + arch/powerpc/lib/feature-fixups.c | 54 +++++++++++++++++++++ + arch/powerpc/platforms/powernv/setup.c | 11 ++++ + arch/powerpc/platforms/pseries/setup.c | 4 + + 11 files changed, 205 insertions(+), 7 deletions(-) + +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2560,6 +2560,7 @@ + mds=off [X86] + tsx_async_abort=off [X86] + kvm.nx_huge_pages=off [X86] ++ no_entry_flush [PPC] + + Exceptions: + This does not have any effect on +@@ -2870,6 +2871,8 @@ + + noefi Disable EFI runtime services support. + ++ no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel. ++ + noexec [IA-64] + + noexec [X86] +--- a/arch/powerpc/include/asm/exception-64s.h ++++ b/arch/powerpc/include/asm/exception-64s.h +@@ -90,11 +90,18 @@ + nop; \ + nop + ++#define ENTRY_FLUSH_SLOT \ ++ ENTRY_FLUSH_FIXUP_SECTION; \ ++ nop; \ ++ nop; \ ++ nop; ++ + /* + * r10 must be free to use, r13 must be paca + */ + #define INTERRUPT_TO_KERNEL \ +- STF_ENTRY_BARRIER_SLOT ++ STF_ENTRY_BARRIER_SLOT; \ ++ ENTRY_FLUSH_SLOT + + /* + * Macros for annotating the expected destination of (h)rfid +--- a/arch/powerpc/include/asm/feature-fixups.h ++++ b/arch/powerpc/include/asm/feature-fixups.h +@@ -205,6 +205,14 @@ label##3: \ + FTR_ENTRY_OFFSET 955b-956b; \ + .popsection; + ++#define ENTRY_FLUSH_FIXUP_SECTION \ ++957: \ ++ .pushsection __entry_flush_fixup,"a"; \ ++ .align 2; \ ++958: \ ++ FTR_ENTRY_OFFSET 957b-958b; \ ++ .popsection; ++ + #define RFI_FLUSH_FIXUP_SECTION \ + 951: \ + .pushsection __rfi_flush_fixup,"a"; \ +@@ -237,8 +245,10 @@ label##3: \ + #include + + extern long stf_barrier_fallback; ++extern long entry_flush_fallback; + extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; + extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; ++extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; + extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; +--- a/arch/powerpc/include/asm/security_features.h ++++ b/arch/powerpc/include/asm/security_features.h +@@ -84,12 +84,16 @@ static inline bool security_ftr_enabled( + // Software required to flush link stack on context switch + #define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull + ++// The L1-D cache should be flushed when entering the kernel ++#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull ++ + + // Features enabled by default + #define SEC_FTR_DEFAULT \ + (SEC_FTR_L1D_FLUSH_HV | \ + SEC_FTR_L1D_FLUSH_PR | \ + SEC_FTR_BNDS_CHK_SPEC_BAR | \ ++ SEC_FTR_L1D_FLUSH_ENTRY | \ + SEC_FTR_FAVOUR_SECURITY) + + #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -52,12 +52,15 @@ enum l1d_flush_type { + }; + + void setup_rfi_flush(enum l1d_flush_type, bool enable); ++void setup_entry_flush(bool enable); ++void setup_uaccess_flush(bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); + #ifdef CONFIG_PPC_BARRIER_NOSPEC + void setup_barrier_nospec(void); + #else + static inline void setup_barrier_nospec(void) { }; + #endif ++void do_entry_flush_fixups(enum l1d_flush_type types); + void do_barrier_nospec_fixups(bool enable); + extern bool barrier_nospec_enabled; + +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -540,7 +540,7 @@ EXC_COMMON_BEGIN(unrecover_mce) + b 1b + + +-EXC_REAL(data_access, 0x300, 0x80) ++EXC_REAL_OOL(data_access, 0x300, 0x80) + EXC_VIRT(data_access, 0x4300, 0x80, 0x300) + TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) + +@@ -596,7 +596,7 @@ EXC_VIRT_END(data_access_slb, 0x4380, 0x + TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) + + +-EXC_REAL(instruction_access, 0x400, 0x80) ++EXC_REAL_OOL(instruction_access, 0x400, 0x80) + EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400) + TRAMP_KVM(PACA_EXGEN, 0x400) + +@@ -889,13 +889,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) + + + EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED) +-EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) ++EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) + TRAMP_KVM(PACA_EXGEN, 0x900) + EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) + + +-EXC_REAL_HV(hdecrementer, 0x980, 0x80) +-EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980) ++EXC_REAL_OOL_HV(hdecrementer, 0x980, 0x80) ++EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x80, 0x980) + TRAMP_KVM_HV(PACA_EXGEN, 0x980) + EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt) + +@@ -1529,6 +1529,43 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback) + .endr + blr + ++TRAMP_REAL_BEGIN(entry_flush_fallback) ++ std r9,PACA_EXRFI+EX_R9(r13) ++ std r10,PACA_EXRFI+EX_R10(r13) ++ std r11,PACA_EXRFI+EX_R11(r13) ++ mfctr r9 ++ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ++ ld r11,PACA_L1D_FLUSH_SIZE(r13) ++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ ++ mtctr r11 ++ DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ ++ ++ /* order ld/st prior to dcbt stop all streams with flushing */ ++ sync ++ ++ /* ++ * The load addresses are at staggered offsets within cachelines, ++ * which suits some pipelines better (on others it should not ++ * hurt). ++ */ ++1: ++ ld r11,(0x80 + 8)*0(r10) ++ ld r11,(0x80 + 8)*1(r10) ++ ld r11,(0x80 + 8)*2(r10) ++ ld r11,(0x80 + 8)*3(r10) ++ ld r11,(0x80 + 8)*4(r10) ++ ld r11,(0x80 + 8)*5(r10) ++ ld r11,(0x80 + 8)*6(r10) ++ ld r11,(0x80 + 8)*7(r10) ++ addi r10,r10,0x80*8 ++ bdnz 1b ++ ++ mtctr r9 ++ ld r9,PACA_EXRFI+EX_R9(r13) ++ ld r10,PACA_EXRFI+EX_R10(r13) ++ ld r11,PACA_EXRFI+EX_R11(r13) ++ blr ++ + TRAMP_REAL_BEGIN(rfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -863,7 +863,9 @@ early_initcall(disable_hardlockup_detect + static enum l1d_flush_type enabled_flush_types; + static void *l1d_flush_fallback_area; + static bool no_rfi_flush; ++static bool no_entry_flush; + bool rfi_flush; ++bool entry_flush; + + static int __init handle_no_rfi_flush(char *p) + { +@@ -873,6 +875,14 @@ static int __init handle_no_rfi_flush(ch + } + early_param("no_rfi_flush", handle_no_rfi_flush); + ++static int __init handle_no_entry_flush(char *p) ++{ ++ pr_info("entry-flush: disabled on command line."); ++ no_entry_flush = true; ++ return 0; ++} ++early_param("no_entry_flush", handle_no_entry_flush); ++ + /* + * The RFI flush is not KPTI, but because users will see doco that says to use + * nopti we hijack that option here to also disable the RFI flush. +@@ -904,6 +914,18 @@ void rfi_flush_enable(bool enable) + rfi_flush = enable; + } + ++void entry_flush_enable(bool enable) ++{ ++ if (enable) { ++ do_entry_flush_fixups(enabled_flush_types); ++ on_each_cpu(do_nothing, NULL, 1); ++ } else { ++ do_entry_flush_fixups(L1D_FLUSH_NONE); ++ } ++ ++ entry_flush = enable; ++} ++ + static void __ref init_fallback_flush(void) + { + u64 l1d_size, limit; +@@ -957,10 +979,19 @@ void setup_rfi_flush(enum l1d_flush_type + + enabled_flush_types = types; + +- if (!no_rfi_flush && !cpu_mitigations_off()) ++ if (!cpu_mitigations_off() && !no_rfi_flush) + rfi_flush_enable(enable); + } + ++void setup_entry_flush(bool enable) ++{ ++ if (cpu_mitigations_off()) ++ return; ++ ++ if (!no_entry_flush) ++ entry_flush_enable(enable); ++} ++ + #ifdef CONFIG_DEBUG_FS + static int rfi_flush_set(void *data, u64 val) + { +@@ -988,9 +1019,36 @@ static int rfi_flush_get(void *data, u64 + + DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); + ++static int entry_flush_set(void *data, u64 val) ++{ ++ bool enable; ++ ++ if (val == 1) ++ enable = true; ++ else if (val == 0) ++ enable = false; ++ else ++ return -EINVAL; ++ ++ /* Only do anything if we're changing state */ ++ if (enable != entry_flush) ++ entry_flush_enable(enable); ++ ++ return 0; ++} ++ ++static int entry_flush_get(void *data, u64 *val) ++{ ++ *val = entry_flush ? 1 : 0; ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); ++ + static __init int rfi_flush_debugfs_init(void) + { + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); ++ debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); + return 0; + } + device_initcall(rfi_flush_debugfs_init); +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -141,6 +141,13 @@ SECTIONS + } + + . = ALIGN(8); ++ __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { ++ __start___entry_flush_fixup = .; ++ *(__entry_flush_fixup) ++ __stop___entry_flush_fixup = .; ++ } ++ ++ . = ALIGN(8); + __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { + __start___stf_exit_barrier_fixup = .; + *(__stf_exit_barrier_fixup) +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -232,6 +232,60 @@ void do_stf_barrier_fixups(enum stf_barr + do_stf_exit_barrier_fixups(types); + } + ++void do_entry_flush_fixups(enum l1d_flush_type types) ++{ ++ unsigned int instrs[3], *dest; ++ long *start, *end; ++ int i; ++ ++ start = PTRRELOC(&__start___entry_flush_fixup); ++ end = PTRRELOC(&__stop___entry_flush_fixup); ++ ++ instrs[0] = 0x60000000; /* nop */ ++ instrs[1] = 0x60000000; /* nop */ ++ instrs[2] = 0x60000000; /* nop */ ++ ++ i = 0; ++ if (types == L1D_FLUSH_FALLBACK) { ++ instrs[i++] = 0x7d4802a6; /* mflr r10 */ ++ instrs[i++] = 0x60000000; /* branch patched below */ ++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */ ++ } ++ ++ if (types & L1D_FLUSH_ORI) { ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ ++ } ++ ++ if (types & L1D_FLUSH_MTTRIG) ++ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ ++ patch_instruction(dest, instrs[0]); ++ ++ if (types == L1D_FLUSH_FALLBACK) ++ patch_branch((dest + 1), (unsigned long)&entry_flush_fallback, ++ BRANCH_SET_LINK); ++ else ++ patch_instruction((dest + 1), instrs[1]); ++ ++ patch_instruction((dest + 2), instrs[2]); ++ } ++ ++ printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, ++ (types == L1D_FLUSH_NONE) ? "no" : ++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : ++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ++ ? "ori+mttrig type" ++ : "ori type" : ++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type" ++ : "unknown"); ++} ++ + void do_rfi_flush_fixups(enum l1d_flush_type types) + { + unsigned int instrs[3], *dest; +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -125,12 +125,23 @@ static void pnv_setup_rfi_flush(void) + type = L1D_FLUSH_ORI; + } + ++ /* ++ * If we are non-Power9 bare metal, we don't need to flush on kernel ++ * entry: it fixes a P9 specific vulnerability. ++ */ ++ if (!pvr_version_is(PVR_POWER9)) ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); ++ + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); + setup_count_cache_flush(); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); ++ setup_entry_flush(enable); + } + + static void __init pnv_setup_arch(void) +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -565,6 +565,10 @@ void pseries_setup_rfi_flush(void) + + setup_rfi_flush(types, enable); + setup_count_cache_flush(); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); ++ setup_entry_flush(enable); + } + + #ifdef CONFIG_PCI_IOV diff --git a/queue-4.19/powerpc-64s-move-some-exception-handlers-out-of-line.patch b/queue-4.19/powerpc-64s-move-some-exception-handlers-out-of-line.patch new file mode 100644 index 00000000000..332211880d7 --- /dev/null +++ b/queue-4.19/powerpc-64s-move-some-exception-handlers-out-of-line.patch @@ -0,0 +1,62 @@ +From foo@baz Fri Nov 20 08:17:41 AM CET 2020 +From: Daniel Axtens +Date: Fri, 20 Nov 2020 10:41:57 +1100 +Subject: powerpc/64s: move some exception handlers out of line +To: stable@vger.kernel.org +Cc: dja@axtens.net +Message-ID: <20201119234203.370400-2-dja@axtens.net> + +From: Daniel Axtens + +(backport only) + +We're about to grow the exception handlers, which will make a bunch of them +no longer fit within the space available. We move them out of line. + +This is a fiddly and error-prone business, so in the interests of reviewability +I haven't merged this in with the addition of the entry flush. + +Signed-off-by: Daniel Axtens +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/exceptions-64s.S | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -572,13 +572,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TY + EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) ++ b tramp_data_access_slb ++EXC_REAL_END(data_access_slb, 0x380, 0x80) ++ ++TRAMP_REAL_BEGIN(tramp_data_access_slb) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) + mr r12,r3 /* save r3 */ + mfspr r3,SPRN_DAR + mfspr r11,SPRN_SRR1 + crset 4*cr6+eq + BRANCH_TO_COMMON(r10, slb_miss_common) +-EXC_REAL_END(data_access_slb, 0x380, 0x80) + + EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) + SET_SCRATCH0(r13) +@@ -616,13 +619,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TY + EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) ++ b tramp_instruction_access_slb ++EXC_REAL_END(instruction_access_slb, 0x480, 0x80) ++ ++TRAMP_REAL_BEGIN(tramp_instruction_access_slb) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) + mr r12,r3 /* save r3 */ + mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ + mfspr r11,SPRN_SRR1 + crclr 4*cr6+eq + BRANCH_TO_COMMON(r10, slb_miss_common) +-EXC_REAL_END(instruction_access_slb, 0x480, 0x80) + + EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) + SET_SCRATCH0(r13) diff --git a/queue-4.19/powerpc-add-a-framework-for-user-access-tracking.patch b/queue-4.19/powerpc-add-a-framework-for-user-access-tracking.patch new file mode 100644 index 00000000000..0cac0c4d213 --- /dev/null +++ b/queue-4.19/powerpc-add-a-framework-for-user-access-tracking.patch @@ -0,0 +1,276 @@ +From foo@baz Fri Nov 20 08:17:41 AM CET 2020 +From: Daniel Axtens +Date: Fri, 20 Nov 2020 10:41:59 +1100 +Subject: powerpc: Add a framework for user access tracking +To: stable@vger.kernel.org +Cc: dja@axtens.net +Message-ID: <20201119234203.370400-4-dja@axtens.net> + +From: Christophe Leroy + +Backported from commit de78a9c42a79 ("powerpc: Add a framework +for Kernel Userspace Access Protection"). Here we don't try to +add the KUAP framework, we just want the helper functions +because we want to put uaccess flush helpers in them. + +In terms of fixes, we don't need commit 1d8f739b07bd ("powerpc/kuap: +Fix set direction in allow/prevent_user_access()") as we don't have +real KUAP. Likewise as all our allows are noops and all our prevents +are just flushes, we don't need commit 9dc086f1e9ef ("powerpc/futex: +Fix incorrect user access blocking") The other 2 fixes we do need. + +The original description is: + +This patch implements a framework for Kernel Userspace Access +Protection. + +Then subarches will have the possibility to provide their own +implementation by providing setup_kuap() and +allow/prevent_user_access(). + +Some platforms will need to know the area accessed and whether it is +accessed from read, write or both. Therefore source, destination and +size and handed over to the two functions. + +mpe: Rename to allow/prevent rather than unlock/lock, and add +read/write wrappers. Drop the 32-bit code for now until we have an +implementation for it. Add kuap to pt_regs for 64-bit as well as +32-bit. Don't split strings, use pr_crit_ratelimited(). + +Signed-off-by: Christophe Leroy +Signed-off-by: Russell Currey +Signed-off-by: Michael Ellerman +Signed-off-by: Daniel Axtens +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/futex.h | 4 +++ + arch/powerpc/include/asm/kup.h | 36 +++++++++++++++++++++++++++++++++ + arch/powerpc/include/asm/uaccess.h | 38 +++++++++++++++++++++++++++-------- + arch/powerpc/lib/checksum_wrappers.c | 4 +++ + 4 files changed, 74 insertions(+), 8 deletions(-) + create mode 100644 arch/powerpc/include/asm/kup.h + +--- a/arch/powerpc/include/asm/futex.h ++++ b/arch/powerpc/include/asm/futex.h +@@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_i + { + int oldval = 0, ret; + ++ allow_write_to_user(uaddr, sizeof(*uaddr)); + pagefault_disable(); + + switch (op) { +@@ -61,6 +62,7 @@ static inline int arch_futex_atomic_op_i + + *oval = oldval; + ++ prevent_write_to_user(uaddr, sizeof(*uaddr)); + return ret; + } + +@@ -74,6 +76,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + ++ allow_write_to_user(uaddr, sizeof(*uaddr)); + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER + "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ +@@ -94,6 +97,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, + : "cc", "memory"); + + *uval = prev; ++ prevent_write_to_user(uaddr, sizeof(*uaddr)); + return ret; + } + +--- /dev/null ++++ b/arch/powerpc/include/asm/kup.h +@@ -0,0 +1,36 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_POWERPC_KUP_H_ ++#define _ASM_POWERPC_KUP_H_ ++ ++#ifndef __ASSEMBLY__ ++ ++#include ++ ++static inline void allow_user_access(void __user *to, const void __user *from, ++ unsigned long size) { } ++static inline void prevent_user_access(void __user *to, const void __user *from, ++ unsigned long size) { } ++ ++static inline void allow_read_from_user(const void __user *from, unsigned long size) ++{ ++ allow_user_access(NULL, from, size); ++} ++ ++static inline void allow_write_to_user(void __user *to, unsigned long size) ++{ ++ allow_user_access(to, NULL, size); ++} ++ ++static inline void prevent_read_from_user(const void __user *from, unsigned long size) ++{ ++ prevent_user_access(NULL, from, size); ++} ++ ++static inline void prevent_write_to_user(void __user *to, unsigned long size) ++{ ++ prevent_user_access(to, NULL, size); ++} ++ ++#endif /* !__ASSEMBLY__ */ ++ ++#endif /* _ASM_POWERPC_KUP_H_ */ +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + + /* + * The fs value determines whether argument validity checking should be +@@ -141,6 +142,7 @@ extern long __put_user_bad(void); + #define __put_user_size(x, ptr, size, retval) \ + do { \ + retval = 0; \ ++ allow_write_to_user(ptr, size); \ + switch (size) { \ + case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ + case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ +@@ -148,6 +150,7 @@ do { \ + case 8: __put_user_asm2(x, ptr, retval); break; \ + default: __put_user_bad(); \ + } \ ++ prevent_write_to_user(ptr, size); \ + } while (0) + + #define __put_user_nocheck(x, ptr, size) \ +@@ -240,6 +243,7 @@ do { \ + __chk_user_ptr(ptr); \ + if (size > sizeof(x)) \ + (x) = __get_user_bad(); \ ++ allow_read_from_user(ptr, size); \ + switch (size) { \ + case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ + case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ +@@ -247,6 +251,7 @@ do { \ + case 8: __get_user_asm2(x, ptr, retval); break; \ + default: (x) = __get_user_bad(); \ + } \ ++ prevent_read_from_user(ptr, size); \ + } while (0) + + /* +@@ -306,16 +311,22 @@ extern unsigned long __copy_tofrom_user( + static inline unsigned long + raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) + { ++ unsigned long ret; ++ + barrier_nospec(); +- return __copy_tofrom_user(to, from, n); ++ allow_user_access(to, from, n); ++ ret = __copy_tofrom_user(to, from, n); ++ prevent_user_access(to, from, n); ++ return ret; + } + #endif /* __powerpc64__ */ + + static inline unsigned long raw_copy_from_user(void *to, + const void __user *from, unsigned long n) + { ++ unsigned long ret; + if (__builtin_constant_p(n) && (n <= 8)) { +- unsigned long ret = 1; ++ ret = 1; + + switch (n) { + case 1: +@@ -340,14 +351,18 @@ static inline unsigned long raw_copy_fro + } + + barrier_nospec(); +- return __copy_tofrom_user((__force void __user *)to, from, n); ++ allow_read_from_user(from, n); ++ ret = __copy_tofrom_user((__force void __user *)to, from, n); ++ prevent_read_from_user(from, n); ++ return ret; + } + + static inline unsigned long raw_copy_to_user(void __user *to, + const void *from, unsigned long n) + { ++ unsigned long ret; + if (__builtin_constant_p(n) && (n <= 8)) { +- unsigned long ret = 1; ++ ret = 1; + + switch (n) { + case 1: +@@ -367,17 +382,24 @@ static inline unsigned long raw_copy_to_ + return 0; + } + +- return __copy_tofrom_user(to, (__force const void __user *)from, n); ++ allow_write_to_user(to, n); ++ ret = __copy_tofrom_user(to, (__force const void __user *)from, n); ++ prevent_write_to_user(to, n); ++ return ret; + } + + extern unsigned long __clear_user(void __user *addr, unsigned long size); + + static inline unsigned long clear_user(void __user *addr, unsigned long size) + { ++ unsigned long ret = size; + might_fault(); +- if (likely(access_ok(VERIFY_WRITE, addr, size))) +- return __clear_user(addr, size); +- return size; ++ if (likely(access_ok(VERIFY_WRITE, addr, size))) { ++ allow_write_to_user(addr, size); ++ ret = __clear_user(addr, size); ++ prevent_write_to_user(addr, size); ++ } ++ return ret; + } + + extern long strncpy_from_user(char *dst, const char __user *src, long count); +--- a/arch/powerpc/lib/checksum_wrappers.c ++++ b/arch/powerpc/lib/checksum_wrappers.c +@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const voi + unsigned int csum; + + might_sleep(); ++ allow_read_from_user(src, len); + + *err_ptr = 0; + +@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const voi + } + + out: ++ prevent_read_from_user(src, len); + return (__force __wsum)csum; + } + EXPORT_SYMBOL(csum_and_copy_from_user); +@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void + unsigned int csum; + + might_sleep(); ++ allow_write_to_user(dst, len); + + *err_ptr = 0; + +@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void + } + + out: ++ prevent_write_to_user(dst, len); + return (__force __wsum)csum; + } + EXPORT_SYMBOL(csum_and_copy_to_user); diff --git a/queue-4.19/powerpc-fix-__clear_user-with-kuap-enabled.patch b/queue-4.19/powerpc-fix-__clear_user-with-kuap-enabled.patch new file mode 100644 index 00000000000..0caf5496c2a --- /dev/null +++ b/queue-4.19/powerpc-fix-__clear_user-with-kuap-enabled.patch @@ -0,0 +1,113 @@ +From foo@baz Fri Nov 20 08:17:41 AM CET 2020 +From: Daniel Axtens +Date: Fri, 20 Nov 2020 10:42:01 +1100 +Subject: powerpc: Fix __clear_user() with KUAP enabled +To: stable@vger.kernel.org +Cc: dja@axtens.net +Message-ID: <20201119234203.370400-6-dja@axtens.net> + +From: Andrew Donnellan + +commit 61e3acd8c693a14fc69b824cb5b08d02cb90a6e7 upstream. + +The KUAP implementation adds calls in clear_user() to enable and +disable access to userspace memory. However, it doesn't add these to +__clear_user(), which is used in the ptrace regset code. + +As there's only one direct user of __clear_user() (the regset code), +and the time taken to set the AMR for KUAP purposes is going to +dominate the cost of a quick access_ok(), there's not much point +having a separate path. + +Rename __clear_user() to __arch_clear_user(), and make __clear_user() +just call clear_user(). + +Reported-by: syzbot+f25ecf4b2982d8c7a640@syzkaller-ppc64.appspotmail.com +Reported-by: Daniel Axtens +Suggested-by: Michael Ellerman +Fixes: de78a9c42a79 ("powerpc: Add a framework for Kernel Userspace Access Protection") +Signed-off-by: Andrew Donnellan +[mpe: Use __arch_clear_user() for the asm version like arm64 & nds32] +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20191209132221.15328-1-ajd@linux.ibm.com +Signed-off-by: Daniel Axtens +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/uaccess.h | 9 +++++++-- + arch/powerpc/lib/string_32.S | 4 ++-- + arch/powerpc/lib/string_64.S | 6 +++--- + 3 files changed, 12 insertions(+), 7 deletions(-) + +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -416,7 +416,7 @@ raw_copy_to_user(void __user *to, const + return ret; + } + +-extern unsigned long __clear_user(void __user *addr, unsigned long size); ++unsigned long __arch_clear_user(void __user *addr, unsigned long size); + + static inline unsigned long clear_user(void __user *addr, unsigned long size) + { +@@ -424,12 +424,17 @@ static inline unsigned long clear_user(v + might_fault(); + if (likely(access_ok(VERIFY_WRITE, addr, size))) { + allow_write_to_user(addr, size); +- ret = __clear_user(addr, size); ++ ret = __arch_clear_user(addr, size); + prevent_write_to_user(addr, size); + } + return ret; + } + ++static inline unsigned long __clear_user(void __user *addr, unsigned long size) ++{ ++ return clear_user(addr, size); ++} ++ + extern long strncpy_from_user(char *dst, const char __user *src, long count); + extern __must_check long strnlen_user(const char __user *str, long n); + +--- a/arch/powerpc/lib/string_32.S ++++ b/arch/powerpc/lib/string_32.S +@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES + LG_CACHELINE_BYTES = L1_CACHE_SHIFT + CACHELINE_MASK = (L1_CACHE_BYTES-1) + +-_GLOBAL(__clear_user) ++_GLOBAL(__arch_clear_user) + /* + * Use dcbz on the complete cache lines in the destination + * to set them to zero. This requires that the destination +@@ -87,4 +87,4 @@ _GLOBAL(__clear_user) + EX_TABLE(8b, 91b) + EX_TABLE(9b, 91b) + +-EXPORT_SYMBOL(__clear_user) ++EXPORT_SYMBOL(__arch_clear_user) +--- a/arch/powerpc/lib/string_64.S ++++ b/arch/powerpc/lib/string_64.S +@@ -29,7 +29,7 @@ PPC64_CACHES: + .section ".text" + + /** +- * __clear_user: - Zero a block of memory in user space, with less checking. ++ * __arch_clear_user: - Zero a block of memory in user space, with less checking. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * +@@ -70,7 +70,7 @@ err3; stb r0,0(r3) + mr r3,r4 + blr + +-_GLOBAL_TOC(__clear_user) ++_GLOBAL_TOC(__arch_clear_user) + cmpdi r4,32 + neg r6,r3 + li r0,0 +@@ -193,4 +193,4 @@ err1; dcbz 0,r3 + cmpdi r4,32 + blt .Lshort_clear + b .Lmedium_clear +-EXPORT_SYMBOL(__clear_user) ++EXPORT_SYMBOL(__arch_clear_user) diff --git a/queue-4.19/powerpc-implement-user_access_begin-and-friends.patch b/queue-4.19/powerpc-implement-user_access_begin-and-friends.patch new file mode 100644 index 00000000000..d3960152812 --- /dev/null +++ b/queue-4.19/powerpc-implement-user_access_begin-and-friends.patch @@ -0,0 +1,208 @@ +From foo@baz Fri Nov 20 08:17:41 AM CET 2020 +From: Daniel Axtens +Date: Fri, 20 Nov 2020 10:42:00 +1100 +Subject: powerpc: Implement user_access_begin and friends +To: stable@vger.kernel.org +Cc: dja@axtens.net +Message-ID: <20201119234203.370400-5-dja@axtens.net> + +From: Christophe Leroy + +commit 5cd623333e7cf4e3a334c70529268b65f2a6c2c7 upstream. + +Today, when a function like strncpy_from_user() is called, +the userspace access protection is de-activated and re-activated +for every word read. + +By implementing user_access_begin and friends, the protection +is de-activated at the beginning of the copy and re-activated at the +end. + +Implement user_access_begin(), user_access_end() and +unsafe_get_user(), unsafe_put_user() and unsafe_copy_to_user() + +For the time being, we keep user_access_save() and +user_access_restore() as nops. + +Signed-off-by: Christophe Leroy +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/36d4fbf9e56a75994aca4ee2214c77b26a5a8d35.1579866752.git.christophe.leroy@c-s.fr +Signed-off-by: Daniel Axtens +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/uaccess.h | 75 +++++++++++++++++++++++++++---------- + 1 file changed, 56 insertions(+), 19 deletions(-) + +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -92,9 +92,14 @@ static inline int __access_ok(unsigned l + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + + #define __get_user(x, ptr) \ +- __get_user_nocheck((x), (ptr), sizeof(*(ptr))) ++ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) + #define __put_user(x, ptr) \ +- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) ++ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) ++ ++#define __get_user_allowed(x, ptr) \ ++ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) ++#define __put_user_allowed(x, ptr) \ ++ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) + + #define __get_user_inatomic(x, ptr) \ + __get_user_nosleep((x), (ptr), sizeof(*(ptr))) +@@ -139,10 +144,9 @@ extern long __put_user_bad(void); + : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) + #endif /* __powerpc64__ */ + +-#define __put_user_size(x, ptr, size, retval) \ ++#define __put_user_size_allowed(x, ptr, size, retval) \ + do { \ + retval = 0; \ +- allow_write_to_user(ptr, size); \ + switch (size) { \ + case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ + case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ +@@ -150,17 +154,26 @@ do { \ + case 8: __put_user_asm2(x, ptr, retval); break; \ + default: __put_user_bad(); \ + } \ ++} while (0) ++ ++#define __put_user_size(x, ptr, size, retval) \ ++do { \ ++ allow_write_to_user(ptr, size); \ ++ __put_user_size_allowed(x, ptr, size, retval); \ + prevent_write_to_user(ptr, size); \ + } while (0) + +-#define __put_user_nocheck(x, ptr, size) \ ++#define __put_user_nocheck(x, ptr, size, do_allow) \ + ({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (!is_kernel_addr((unsigned long)__pu_addr)) \ + might_fault(); \ + __chk_user_ptr(ptr); \ +- __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ if (do_allow) \ ++ __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ else \ ++ __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ + }) + +@@ -237,13 +250,12 @@ extern long __get_user_bad(void); + : "b" (addr), "i" (-EFAULT), "0" (err)) + #endif /* __powerpc64__ */ + +-#define __get_user_size(x, ptr, size, retval) \ ++#define __get_user_size_allowed(x, ptr, size, retval) \ + do { \ + retval = 0; \ + __chk_user_ptr(ptr); \ + if (size > sizeof(x)) \ + (x) = __get_user_bad(); \ +- allow_read_from_user(ptr, size); \ + switch (size) { \ + case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ + case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ +@@ -251,6 +263,12 @@ do { \ + case 8: __get_user_asm2(x, ptr, retval); break; \ + default: (x) = __get_user_bad(); \ + } \ ++} while (0) ++ ++#define __get_user_size(x, ptr, size, retval) \ ++do { \ ++ allow_read_from_user(ptr, size); \ ++ __get_user_size_allowed(x, ptr, size, retval); \ + prevent_read_from_user(ptr, size); \ + } while (0) + +@@ -261,7 +279,7 @@ do { \ + #define __long_type(x) \ + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + +-#define __get_user_nocheck(x, ptr, size) \ ++#define __get_user_nocheck(x, ptr, size, do_allow) \ + ({ \ + long __gu_err; \ + __long_type(*(ptr)) __gu_val; \ +@@ -270,7 +288,10 @@ do { \ + if (!is_kernel_addr((unsigned long)__gu_addr)) \ + might_fault(); \ + barrier_nospec(); \ +- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ if (do_allow) \ ++ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ else \ ++ __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ + }) +@@ -357,33 +378,40 @@ static inline unsigned long raw_copy_fro + return ret; + } + +-static inline unsigned long raw_copy_to_user(void __user *to, +- const void *from, unsigned long n) ++static inline unsigned long ++raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n) + { +- unsigned long ret; + if (__builtin_constant_p(n) && (n <= 8)) { +- ret = 1; ++ unsigned long ret = 1; + + switch (n) { + case 1: +- __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); ++ __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret); + break; + case 2: +- __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); ++ __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret); + break; + case 4: +- __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); ++ __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret); + break; + case 8: +- __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); ++ __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret); + break; + } + if (ret == 0) + return 0; + } + ++ return __copy_tofrom_user(to, (__force const void __user *)from, n); ++} ++ ++static inline unsigned long ++raw_copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ unsigned long ret; ++ + allow_write_to_user(to, n); +- ret = __copy_tofrom_user(to, (__force const void __user *)from, n); ++ ret = raw_copy_to_user_allowed(to, from, n); + prevent_write_to_user(to, n); + return ret; + } +@@ -410,4 +438,13 @@ extern long __copy_from_user_flushcache( + extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len); + ++#define user_access_begin(type, ptr, len) access_ok(type, ptr, len) ++#define user_access_end() prevent_user_access(NULL, NULL, ~0ul) ++ ++#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) ++#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) ++#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) ++#define unsafe_copy_to_user(d, s, l, e) \ ++ unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) ++ + #endif /* _ARCH_POWERPC_UACCESS_H */ diff --git a/queue-4.19/powerpc-uaccess-evaluate-macro-arguments-once-before-user-access-is-allowed.patch b/queue-4.19/powerpc-uaccess-evaluate-macro-arguments-once-before-user-access-is-allowed.patch new file mode 100644 index 00000000000..4ca170392b1 --- /dev/null +++ b/queue-4.19/powerpc-uaccess-evaluate-macro-arguments-once-before-user-access-is-allowed.patch @@ -0,0 +1,153 @@ +From foo@baz Fri Nov 20 08:17:41 AM CET 2020 +From: Daniel Axtens +Date: Fri, 20 Nov 2020 10:42:02 +1100 +Subject: powerpc/uaccess: Evaluate macro arguments once, before user access is allowed +To: stable@vger.kernel.org +Cc: dja@axtens.net +Message-ID: <20201119234203.370400-7-dja@axtens.net> + +From: Nicholas Piggin + +commit d02f6b7dab8228487268298ea1f21081c0b4b3eb upstream. + +get/put_user() can be called with nontrivial arguments. fs/proc/page.c +has a good example: + + if (put_user(stable_page_flags(ppage), out)) { + +stable_page_flags() is quite a lot of code, including spin locks in +the page allocator. + +Ensure these arguments are evaluated before user access is allowed. + +This improves security by reducing code with access to userspace, but +it also fixes a PREEMPT bug with KUAP on powerpc/64s: +stable_page_flags() is currently called with AMR set to allow writes, +it ends up calling spin_unlock(), which can call preempt_schedule. But +the task switch code can not be called with AMR set (it relies on +interrupts saving the register), so this blows up. + +It's fine if the code inside allow_user_access() is preemptible, +because a timer or IPI will save the AMR, but it's not okay to +explicitly cause a reschedule. + +Fixes: de78a9c42a79 ("powerpc: Add a framework for Kernel Userspace Access Protection") +Signed-off-by: Nicholas Piggin +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20200407041245.600651-1-npiggin@gmail.com +Signed-off-by: Daniel Axtens +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/uaccess.h | 49 ++++++++++++++++++++++++++----------- + 1 file changed, 35 insertions(+), 14 deletions(-) + +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -167,13 +167,17 @@ do { \ + ({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ ++ __typeof__(*(ptr)) __pu_val = (x); \ ++ __typeof__(size) __pu_size = (size); \ ++ \ + if (!is_kernel_addr((unsigned long)__pu_addr)) \ + might_fault(); \ +- __chk_user_ptr(ptr); \ ++ __chk_user_ptr(__pu_addr); \ + if (do_allow) \ +- __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + else \ +- __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \ ++ __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ ++ \ + __pu_err; \ + }) + +@@ -181,9 +185,13 @@ do { \ + ({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ ++ __typeof__(*(ptr)) __pu_val = (x); \ ++ __typeof__(size) __pu_size = (size); \ ++ \ + might_fault(); \ +- if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ +- __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \ ++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ ++ \ + __pu_err; \ + }) + +@@ -191,8 +199,12 @@ do { \ + ({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ +- __chk_user_ptr(ptr); \ +- __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ __typeof__(*(ptr)) __pu_val = (x); \ ++ __typeof__(size) __pu_size = (size); \ ++ \ ++ __chk_user_ptr(__pu_addr); \ ++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ ++ \ + __pu_err; \ + }) + +@@ -284,15 +296,18 @@ do { \ + long __gu_err; \ + __long_type(*(ptr)) __gu_val; \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ +- __chk_user_ptr(ptr); \ ++ __typeof__(size) __gu_size = (size); \ ++ \ ++ __chk_user_ptr(__gu_addr); \ + if (!is_kernel_addr((unsigned long)__gu_addr)) \ + might_fault(); \ + barrier_nospec(); \ + if (do_allow) \ +- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ + else \ +- __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \ ++ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ ++ \ + __gu_err; \ + }) + +@@ -301,12 +316,15 @@ do { \ + long __gu_err = -EFAULT; \ + __long_type(*(ptr)) __gu_val = 0; \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ ++ __typeof__(size) __gu_size = (size); \ ++ \ + might_fault(); \ +- if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ ++ if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \ + barrier_nospec(); \ +- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ + } \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ \ + __gu_err; \ + }) + +@@ -315,10 +333,13 @@ do { \ + long __gu_err; \ + __long_type(*(ptr)) __gu_val; \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ +- __chk_user_ptr(ptr); \ ++ __typeof__(size) __gu_size = (size); \ ++ \ ++ __chk_user_ptr(__gu_addr); \ + barrier_nospec(); \ +- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ \ + __gu_err; \ + }) + diff --git a/queue-4.19/series b/queue-4.19/series new file mode 100644 index 00000000000..d46578172c3 --- /dev/null +++ b/queue-4.19/series @@ -0,0 +1,7 @@ +powerpc-64s-move-some-exception-handlers-out-of-line.patch +powerpc-64s-flush-l1d-on-kernel-entry.patch +powerpc-add-a-framework-for-user-access-tracking.patch +powerpc-implement-user_access_begin-and-friends.patch +powerpc-fix-__clear_user-with-kuap-enabled.patch +powerpc-uaccess-evaluate-macro-arguments-once-before-user-access-is-allowed.patch +powerpc-64s-flush-l1d-after-user-accesses.patch