]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Nov 2020 06:58:09 +0000 (07:58 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Nov 2020 06:58:09 +0000 (07:58 +0100)
added patches:
powerpc-64s-flush-l1d-after-user-accesses.patch
powerpc-64s-flush-l1d-on-kernel-entry.patch
powerpc-only-include-kup-radix.h-for-64-bit-book3s.patch
selftests-powerpc-entry-flush-test.patch
selftests-powerpc-rfi_flush-disable-entry-flush-if-present.patch

queue-5.9/powerpc-64s-flush-l1d-after-user-accesses.patch [new file with mode: 0644]
queue-5.9/powerpc-64s-flush-l1d-on-kernel-entry.patch [new file with mode: 0644]
queue-5.9/powerpc-only-include-kup-radix.h-for-64-bit-book3s.patch [new file with mode: 0644]
queue-5.9/selftests-powerpc-entry-flush-test.patch [new file with mode: 0644]
queue-5.9/selftests-powerpc-rfi_flush-disable-entry-flush-if-present.patch [new file with mode: 0644]
queue-5.9/series [new file with mode: 0644]

diff --git a/queue-5.9/powerpc-64s-flush-l1d-after-user-accesses.patch b/queue-5.9/powerpc-64s-flush-l1d-after-user-accesses.patch
new file mode 100644 (file)
index 0000000..ff02c9f
--- /dev/null
@@ -0,0 +1,628 @@
+From foo@baz Fri Nov 20 07:57:18 AM CET 2020
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 20 Nov 2020 10:22:48 +1100
+Subject: powerpc/64s: flush L1D after user accesses
+To: stable@vger.kernel.org
+Cc: dja@axtens.net
+Message-ID: <20201119232250.365304-4-dja@axtens.net>
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 9a32a7e78bd0cd9a9b6332cbdc345ee5ffd0c5de upstream.
+
+IBM Power9 processors can speculatively operate on data in the L1 cache
+before it has been completely validated, via a way-prediction mechanism. It
+is not possible for an attacker to determine the contents of impermissible
+memory using this method, since these systems implement a combination of
+hardware and software security measures to prevent scenarios where
+protected data could be leaked.
+
+However these measures don't address the scenario where an attacker induces
+the operating system to speculatively execute instructions using data that
+the attacker controls. This can be used for example to speculatively bypass
+"kernel user access prevention" techniques, as discovered by Anthony
+Steinhauser of Google's Safeside Project. This is not an attack by itself,
+but there is a possibility it could be used in conjunction with
+side-channels or other weaknesses in the privileged code to construct an
+attack.
+
+This issue can be mitigated by flushing the L1 cache between privilege
+boundaries of concern. This patch flushes the L1 cache after user accesses.
+
+This is part of the fix for CVE-2020-4788.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt |    4 +
+ arch/powerpc/include/asm/book3s/64/kup-radix.h  |   66 +++++++++++-------
+ arch/powerpc/include/asm/exception-64s.h        |    3 
+ arch/powerpc/include/asm/feature-fixups.h       |    9 ++
+ arch/powerpc/include/asm/kup.h                  |   19 +++--
+ arch/powerpc/include/asm/security_features.h    |    3 
+ arch/powerpc/include/asm/setup.h                |    1 
+ arch/powerpc/kernel/exceptions-64s.S            |   85 +++++++-----------------
+ arch/powerpc/kernel/setup_64.c                  |   62 +++++++++++++++++
+ arch/powerpc/kernel/vmlinux.lds.S               |    7 +
+ arch/powerpc/lib/feature-fixups.c               |   50 ++++++++++++++
+ arch/powerpc/platforms/powernv/setup.c          |   10 ++
+ arch/powerpc/platforms/pseries/setup.c          |    4 +
+ 13 files changed, 233 insertions(+), 90 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2834,6 +2834,7 @@
+                                              tsx_async_abort=off [X86]
+                                              kvm.nx_huge_pages=off [X86]
+                                              no_entry_flush [PPC]
++                                             no_uaccess_flush [PPC]
+                               Exceptions:
+                                              This does not have any effect on
+@@ -3209,6 +3210,9 @@
+       nospec_store_bypass_disable
+                       [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
++      no_uaccess_flush
++                      [PPC] Don't flush the L1-D cache after accessing user data.
++
+       noxsave         [BUGS=X86] Disables x86 extended register state save
+                       and restore using xsave. The kernel will fallback to
+                       enabling legacy floating-point and sse state.
+--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+@@ -61,6 +61,8 @@
+ #else /* !__ASSEMBLY__ */
++DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
++
+ #ifdef CONFIG_PPC_KUAP
+ #include <asm/mmu.h>
+@@ -103,8 +105,16 @@ static inline void kuap_check_amr(void)
+ static inline unsigned long get_kuap(void)
+ {
++      /*
++       * We return AMR_KUAP_BLOCKED when we don't support KUAP because
++       * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
++       * cause restore_user_access to do a flush.
++       *
++       * This has no effect in terms of actually blocking things on hash,
++       * so it doesn't break anything.
++       */
+       if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
+-              return 0;
++              return AMR_KUAP_BLOCKED;
+       return mfspr(SPRN_AMR);
+ }
+@@ -123,6 +133,31 @@ static inline void set_kuap(unsigned lon
+       isync();
+ }
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
++{
++      return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
++                  (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
++                  "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
++}
++#else /* CONFIG_PPC_KUAP */
++static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
++
++static inline unsigned long kuap_get_and_check_amr(void)
++{
++      return 0UL;
++}
++
++static inline void kuap_check_amr(void) { }
++
++static inline unsigned long get_kuap(void)
++{
++      return AMR_KUAP_BLOCKED;
++}
++
++static inline void set_kuap(unsigned long value) { }
++#endif /* !CONFIG_PPC_KUAP */
++
+ static __always_inline void allow_user_access(void __user *to, const void __user *from,
+                                             unsigned long size, unsigned long dir)
+ {
+@@ -142,6 +177,8 @@ static inline void prevent_user_access(v
+                                      unsigned long size, unsigned long dir)
+ {
+       set_kuap(AMR_KUAP_BLOCKED);
++      if (static_branch_unlikely(&uaccess_flush_key))
++              do_uaccess_flush();
+ }
+ static inline unsigned long prevent_user_access_return(void)
+@@ -149,6 +186,8 @@ static inline unsigned long prevent_user
+       unsigned long flags = get_kuap();
+       set_kuap(AMR_KUAP_BLOCKED);
++      if (static_branch_unlikely(&uaccess_flush_key))
++              do_uaccess_flush();
+       return flags;
+ }
+@@ -156,30 +195,9 @@ static inline unsigned long prevent_user
+ static inline void restore_user_access(unsigned long flags)
+ {
+       set_kuap(flags);
++      if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
++              do_uaccess_flush();
+ }
+-
+-static inline bool
+-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+-{
+-      return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
+-                  (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
+-                  "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
+-}
+-#else /* CONFIG_PPC_KUAP */
+-static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
+-{
+-}
+-
+-static inline void kuap_check_amr(void)
+-{
+-}
+-
+-static inline unsigned long kuap_get_and_check_amr(void)
+-{
+-      return 0;
+-}
+-#endif /* CONFIG_PPC_KUAP */
+-
+ #endif /* __ASSEMBLY__ */
+ #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -144,6 +144,9 @@
+       RFSCV;                                                          \
+       b       rfscv_flush_fallback
++#else /* __ASSEMBLY__ */
++/* Prototype for function defined in exceptions-64s.S */
++void do_uaccess_flush(void);
+ #endif /* __ASSEMBLY__ */
+ #endif        /* _ASM_POWERPC_EXCEPTION_H */
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -205,6 +205,14 @@ label##3:                                         \
+       FTR_ENTRY_OFFSET 955b-956b;                     \
+       .popsection;
++#define UACCESS_FLUSH_FIXUP_SECTION                   \
++959:                                                  \
++      .pushsection __uaccess_flush_fixup,"a";         \
++      .align 2;                                       \
++960:                                                  \
++      FTR_ENTRY_OFFSET 959b-960b;                     \
++      .popsection;
++
+ #define ENTRY_FLUSH_FIXUP_SECTION                     \
+ 957:                                                  \
+       .pushsection __entry_flush_fixup,"a";           \
+@@ -248,6 +256,7 @@ extern long stf_barrier_fallback;
+ extern long entry_flush_fallback;
+ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
++extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
+ extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+--- a/arch/powerpc/include/asm/kup.h
++++ b/arch/powerpc/include/asm/kup.h
+@@ -53,17 +53,26 @@ static inline void setup_kuep(bool disab
+ void setup_kuap(bool disabled);
+ #else
+ static inline void setup_kuap(bool disabled) { }
++
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
++{
++      return false;
++}
++
++/*
++ * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
++ * the L1D cache after user accesses. Only include the empty stubs for other
++ * platforms.
++ */
++#ifndef CONFIG_PPC64
+ static inline void allow_user_access(void __user *to, const void __user *from,
+                                    unsigned long size, unsigned long dir) { }
+ static inline void prevent_user_access(void __user *to, const void __user *from,
+                                      unsigned long size, unsigned long dir) { }
+ static inline unsigned long prevent_user_access_return(void) { return 0UL; }
+ static inline void restore_user_access(unsigned long flags) { }
+-static inline bool
+-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+-{
+-      return false;
+-}
++#endif /* CONFIG_PPC64 */
+ #endif /* CONFIG_PPC_KUAP */
+ static inline void allow_read_from_user(const void __user *from, unsigned long size)
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -89,6 +89,8 @@ static inline bool security_ftr_enabled(
+ // The L1-D cache should be flushed when entering the kernel
+ #define SEC_FTR_L1D_FLUSH_ENTRY               0x0000000000004000ull
++// The L1-D cache should be flushed after user accesses from the kernel
++#define SEC_FTR_L1D_FLUSH_UACCESS     0x0000000000008000ull
+ // Features enabled by default
+ #define SEC_FTR_DEFAULT \
+@@ -96,6 +98,7 @@ static inline bool security_ftr_enabled(
+        SEC_FTR_L1D_FLUSH_PR | \
+        SEC_FTR_BNDS_CHK_SPEC_BAR | \
+        SEC_FTR_L1D_FLUSH_ENTRY | \
++       SEC_FTR_L1D_FLUSH_UACCESS | \
+        SEC_FTR_FAVOUR_SECURITY)
+ #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -60,6 +60,7 @@ void setup_barrier_nospec(void);
+ #else
+ static inline void setup_barrier_nospec(void) { };
+ #endif
++void do_uaccess_flush_fixups(enum l1d_flush_type types);
+ void do_entry_flush_fixups(enum l1d_flush_type types);
+ void do_barrier_nospec_fixups(bool enable);
+ extern bool barrier_nospec_enabled;
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -2951,11 +2951,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
+       .endr
+       blr
+-TRAMP_REAL_BEGIN(entry_flush_fallback)
+-      std     r9,PACA_EXRFI+EX_R9(r13)
+-      std     r10,PACA_EXRFI+EX_R10(r13)
+-      std     r11,PACA_EXRFI+EX_R11(r13)
+-      mfctr   r9
++/* Clobbers r10, r11, ctr */
++.macro L1D_DISPLACEMENT_FLUSH
+       ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+       ld      r11,PACA_L1D_FLUSH_SIZE(r13)
+       srdi    r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+@@ -2981,7 +2978,14 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
+       ld      r11,(0x80 + 8)*7(r10)
+       addi    r10,r10,0x80*8
+       bdnz    1b
++.endm
++TRAMP_REAL_BEGIN(entry_flush_fallback)
++      std     r9,PACA_EXRFI+EX_R9(r13)
++      std     r10,PACA_EXRFI+EX_R10(r13)
++      std     r11,PACA_EXRFI+EX_R11(r13)
++      mfctr   r9
++      L1D_DISPLACEMENT_FLUSH
+       mtctr   r9
+       ld      r9,PACA_EXRFI+EX_R9(r13)
+       ld      r10,PACA_EXRFI+EX_R10(r13)
+@@ -2997,32 +3001,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+       std     r10,PACA_EXRFI+EX_R10(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       mfctr   r9
+-      ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+-      ld      r11,PACA_L1D_FLUSH_SIZE(r13)
+-      srdi    r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+-      mtctr   r11
+-      DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+-
+-      /* order ld/st prior to dcbt stop all streams with flushing */
+-      sync
+-
+-      /*
+-       * The load adresses are at staggered offsets within cachelines,
+-       * which suits some pipelines better (on others it should not
+-       * hurt).
+-       */
+-1:
+-      ld      r11,(0x80 + 8)*0(r10)
+-      ld      r11,(0x80 + 8)*1(r10)
+-      ld      r11,(0x80 + 8)*2(r10)
+-      ld      r11,(0x80 + 8)*3(r10)
+-      ld      r11,(0x80 + 8)*4(r10)
+-      ld      r11,(0x80 + 8)*5(r10)
+-      ld      r11,(0x80 + 8)*6(r10)
+-      ld      r11,(0x80 + 8)*7(r10)
+-      addi    r10,r10,0x80*8
+-      bdnz    1b
+-
++      L1D_DISPLACEMENT_FLUSH
+       mtctr   r9
+       ld      r9,PACA_EXRFI+EX_R9(r13)
+       ld      r10,PACA_EXRFI+EX_R10(r13)
+@@ -3040,32 +3019,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+       std     r10,PACA_EXRFI+EX_R10(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       mfctr   r9
+-      ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+-      ld      r11,PACA_L1D_FLUSH_SIZE(r13)
+-      srdi    r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+-      mtctr   r11
+-      DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+-
+-      /* order ld/st prior to dcbt stop all streams with flushing */
+-      sync
+-
+-      /*
+-       * The load adresses are at staggered offsets within cachelines,
+-       * which suits some pipelines better (on others it should not
+-       * hurt).
+-       */
+-1:
+-      ld      r11,(0x80 + 8)*0(r10)
+-      ld      r11,(0x80 + 8)*1(r10)
+-      ld      r11,(0x80 + 8)*2(r10)
+-      ld      r11,(0x80 + 8)*3(r10)
+-      ld      r11,(0x80 + 8)*4(r10)
+-      ld      r11,(0x80 + 8)*5(r10)
+-      ld      r11,(0x80 + 8)*6(r10)
+-      ld      r11,(0x80 + 8)*7(r10)
+-      addi    r10,r10,0x80*8
+-      bdnz    1b
+-
++      L1D_DISPLACEMENT_FLUSH
+       mtctr   r9
+       ld      r9,PACA_EXRFI+EX_R9(r13)
+       ld      r10,PACA_EXRFI+EX_R10(r13)
+@@ -3116,8 +3070,21 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
+       RFSCV
+ USE_TEXT_SECTION()
+-      MASKED_INTERRUPT
+-      MASKED_INTERRUPT hsrr=1
++
++_GLOBAL(do_uaccess_flush)
++      UACCESS_FLUSH_FIXUP_SECTION
++      nop
++      nop
++      nop
++      blr
++      L1D_DISPLACEMENT_FLUSH
++      blr
++_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
++EXPORT_SYMBOL(do_uaccess_flush)
++
++
++MASKED_INTERRUPT
++MASKED_INTERRUPT hsrr=1
+ #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ kvmppc_skip_interrupt:
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -861,8 +861,12 @@ static enum l1d_flush_type enabled_flush
+ static void *l1d_flush_fallback_area;
+ static bool no_rfi_flush;
+ static bool no_entry_flush;
++static bool no_uaccess_flush;
+ bool rfi_flush;
+ bool entry_flush;
++bool uaccess_flush;
++DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
++EXPORT_SYMBOL(uaccess_flush_key);
+ static int __init handle_no_rfi_flush(char *p)
+ {
+@@ -880,6 +884,14 @@ static int __init handle_no_entry_flush(
+ }
+ early_param("no_entry_flush", handle_no_entry_flush);
++static int __init handle_no_uaccess_flush(char *p)
++{
++      pr_info("uaccess-flush: disabled on command line.");
++      no_uaccess_flush = true;
++      return 0;
++}
++early_param("no_uaccess_flush", handle_no_uaccess_flush);
++
+ /*
+  * The RFI flush is not KPTI, but because users will see doco that says to use
+  * nopti we hijack that option here to also disable the RFI flush.
+@@ -923,6 +935,20 @@ void entry_flush_enable(bool enable)
+       entry_flush = enable;
+ }
++void uaccess_flush_enable(bool enable)
++{
++      if (enable) {
++              do_uaccess_flush_fixups(enabled_flush_types);
++              static_branch_enable(&uaccess_flush_key);
++              on_each_cpu(do_nothing, NULL, 1);
++      } else {
++              static_branch_disable(&uaccess_flush_key);
++              do_uaccess_flush_fixups(L1D_FLUSH_NONE);
++      }
++
++      uaccess_flush = enable;
++}
++
+ static void __ref init_fallback_flush(void)
+ {
+       u64 l1d_size, limit;
+@@ -994,6 +1020,15 @@ void setup_entry_flush(bool enable)
+               entry_flush_enable(enable);
+ }
++void setup_uaccess_flush(bool enable)
++{
++      if (cpu_mitigations_off())
++              return;
++
++      if (!no_uaccess_flush)
++              uaccess_flush_enable(enable);
++}
++
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
+@@ -1047,10 +1082,37 @@ static int entry_flush_get(void *data, u
+ DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
++static int uaccess_flush_set(void *data, u64 val)
++{
++      bool enable;
++
++      if (val == 1)
++              enable = true;
++      else if (val == 0)
++              enable = false;
++      else
++              return -EINVAL;
++
++      /* Only do anything if we're changing state */
++      if (enable != uaccess_flush)
++              uaccess_flush_enable(enable);
++
++      return 0;
++}
++
++static int uaccess_flush_get(void *data, u64 *val)
++{
++      *val = uaccess_flush ? 1 : 0;
++      return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
++
+ static __init int rfi_flush_debugfs_init(void)
+ {
+       debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+       debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
++      debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
+       return 0;
+ }
+ device_initcall(rfi_flush_debugfs_init);
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -132,6 +132,13 @@ SECTIONS
+       }
+       . = ALIGN(8);
++      __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
++              __start___uaccess_flush_fixup = .;
++              *(__uaccess_flush_fixup)
++              __stop___uaccess_flush_fixup = .;
++      }
++
++      . = ALIGN(8);
+       __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
+               __start___entry_flush_fixup = .;
+               *(__entry_flush_fixup)
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -234,6 +234,56 @@ void do_stf_barrier_fixups(enum stf_barr
+       do_stf_exit_barrier_fixups(types);
+ }
++void do_uaccess_flush_fixups(enum l1d_flush_type types)
++{
++      unsigned int instrs[4], *dest;
++      long *start, *end;
++      int i;
++
++      start = PTRRELOC(&__start___uaccess_flush_fixup);
++      end = PTRRELOC(&__stop___uaccess_flush_fixup);
++
++      instrs[0] = 0x60000000; /* nop */
++      instrs[1] = 0x60000000; /* nop */
++      instrs[2] = 0x60000000; /* nop */
++      instrs[3] = 0x4e800020; /* blr */
++
++      i = 0;
++      if (types == L1D_FLUSH_FALLBACK) {
++              instrs[3] = 0x60000000; /* nop */
++              /* fallthrough to fallback flush */
++      }
++
++      if (types & L1D_FLUSH_ORI) {
++              instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++              instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++      }
++
++      if (types & L1D_FLUSH_MTTRIG)
++              instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++      for (i = 0; start < end; start++, i++) {
++              dest = (void *)start + *start;
++
++              pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++              patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
++
++              patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
++              patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
++              patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
++      }
++
++      printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
++              (types == L1D_FLUSH_NONE)       ? "no" :
++              (types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
++              (types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
++                                                      ? "ori+mttrig type"
++                                                      : "ori type" :
++              (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
++                                              : "unknown");
++}
++
+ void do_entry_flush_fixups(enum l1d_flush_type types)
+ {
+       unsigned int instrs[3], *dest;
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -124,10 +124,12 @@ static void pnv_setup_rfi_flush(void)
+       /*
+        * If we are non-Power9 bare metal, we don't need to flush on kernel
+-       * entry: it fixes a P9 specific vulnerability.
++       * entry or after user access: they fix a P9 specific vulnerability.
+        */
+-      if (!pvr_version_is(PVR_POWER9))
++      if (!pvr_version_is(PVR_POWER9)) {
+               security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
++              security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
++      }
+       enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
+                (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)   || \
+@@ -139,6 +141,10 @@ static void pnv_setup_rfi_flush(void)
+       enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+                security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+       setup_entry_flush(enable);
++
++      enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++               security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
++      setup_uaccess_flush(enable);
+ }
+ static void __init pnv_setup_arch(void)
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -577,6 +577,10 @@ void pseries_setup_rfi_flush(void)
+       enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+                security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+       setup_entry_flush(enable);
++
++      enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++               security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
++      setup_uaccess_flush(enable);
+ }
+ #ifdef CONFIG_PCI_IOV
diff --git a/queue-5.9/powerpc-64s-flush-l1d-on-kernel-entry.patch b/queue-5.9/powerpc-64s-flush-l1d-on-kernel-entry.patch
new file mode 100644 (file)
index 0000000..7e9fb24
--- /dev/null
@@ -0,0 +1,424 @@
+From foo@baz Fri Nov 20 07:57:18 AM CET 2020
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 20 Nov 2020 10:22:47 +1100
+Subject: powerpc/64s: flush L1D on kernel entry
+To: stable@vger.kernel.org
+Cc: dja@axtens.net
+Message-ID: <20201119232250.365304-3-dja@axtens.net>
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit f79643787e0a0762d2409b7b8334e83f22d85695 upstream.
+
+IBM Power9 processors can speculatively operate on data in the L1 cache
+before it has been completely validated, via a way-prediction mechanism. It
+is not possible for an attacker to determine the contents of impermissible
+memory using this method, since these systems implement a combination of
+hardware and software security measures to prevent scenarios where
+protected data could be leaked.
+
+However these measures don't address the scenario where an attacker induces
+the operating system to speculatively execute instructions using data that
+the attacker controls. This can be used for example to speculatively bypass
+"kernel user access prevention" techniques, as discovered by Anthony
+Steinhauser of Google's Safeside Project. This is not an attack by itself,
+but there is a possibility it could be used in conjunction with
+side-channels or other weaknesses in the privileged code to construct an
+attack.
+
+This issue can be mitigated by flushing the L1 cache between privilege
+boundaries of concern. This patch flushes the L1 cache on kernel entry.
+
+This is part of the fix for CVE-2020-4788.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt |    3 +
+ arch/powerpc/include/asm/exception-64s.h        |    9 +++
+ arch/powerpc/include/asm/feature-fixups.h       |   10 ++++
+ arch/powerpc/include/asm/security_features.h    |    4 +
+ arch/powerpc/include/asm/setup.h                |    3 +
+ arch/powerpc/kernel/exceptions-64s.S            |   37 ++++++++++++++
+ arch/powerpc/kernel/setup_64.c                  |   60 +++++++++++++++++++++++-
+ arch/powerpc/kernel/vmlinux.lds.S               |    7 ++
+ arch/powerpc/lib/feature-fixups.c               |   54 +++++++++++++++++++++
+ arch/powerpc/platforms/powernv/setup.c          |   11 ++++
+ arch/powerpc/platforms/pseries/setup.c          |    4 +
+ 11 files changed, 200 insertions(+), 2 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2833,6 +2833,7 @@
+                                              mds=off [X86]
+                                              tsx_async_abort=off [X86]
+                                              kvm.nx_huge_pages=off [X86]
++                                             no_entry_flush [PPC]
+                               Exceptions:
+                                              This does not have any effect on
+@@ -3157,6 +3158,8 @@
+       noefi           Disable EFI runtime services support.
++      no_entry_flush  [PPC] Don't flush the L1-D cache when entering the kernel.
++
+       noexec          [IA-64]
+       noexec          [X86]
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -57,11 +57,18 @@
+       nop;                                                            \
+       nop
++#define ENTRY_FLUSH_SLOT                                              \
++      ENTRY_FLUSH_FIXUP_SECTION;                                      \
++      nop;                                                            \
++      nop;                                                            \
++      nop;
++
+ /*
+  * r10 must be free to use, r13 must be paca
+  */
+ #define INTERRUPT_TO_KERNEL                                           \
+-      STF_ENTRY_BARRIER_SLOT
++      STF_ENTRY_BARRIER_SLOT;                                         \
++      ENTRY_FLUSH_SLOT
+ /*
+  * Macros for annotating the expected destination of (h)rfid
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -205,6 +205,14 @@ label##3:                                         \
+       FTR_ENTRY_OFFSET 955b-956b;                     \
+       .popsection;
++#define ENTRY_FLUSH_FIXUP_SECTION                     \
++957:                                                  \
++      .pushsection __entry_flush_fixup,"a";           \
++      .align 2;                                       \
++958:                                                  \
++      FTR_ENTRY_OFFSET 957b-958b;                     \
++      .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION                               \
+ 951:                                                  \
+       .pushsection __rfi_flush_fixup,"a";             \
+@@ -237,8 +245,10 @@ label##3:                                         \
+ #include <linux/types.h>
+ extern long stf_barrier_fallback;
++extern long entry_flush_fallback;
+ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
++extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+ extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -86,12 +86,16 @@ static inline bool security_ftr_enabled(
+ // Software required to flush link stack on context switch
+ #define SEC_FTR_FLUSH_LINK_STACK      0x0000000000001000ull
++// The L1-D cache should be flushed when entering the kernel
++#define SEC_FTR_L1D_FLUSH_ENTRY               0x0000000000004000ull
++
+ // Features enabled by default
+ #define SEC_FTR_DEFAULT \
+       (SEC_FTR_L1D_FLUSH_HV | \
+        SEC_FTR_L1D_FLUSH_PR | \
+        SEC_FTR_BNDS_CHK_SPEC_BAR | \
++       SEC_FTR_L1D_FLUSH_ENTRY | \
+        SEC_FTR_FAVOUR_SECURITY)
+ #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -52,12 +52,15 @@ enum l1d_flush_type {
+ };
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
++void setup_entry_flush(bool enable);
++void setup_uaccess_flush(bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
+ #ifdef CONFIG_PPC_BARRIER_NOSPEC
+ void setup_barrier_nospec(void);
+ #else
+ static inline void setup_barrier_nospec(void) { };
+ #endif
++void do_entry_flush_fixups(enum l1d_flush_type types);
+ void do_barrier_nospec_fixups(bool enable);
+ extern bool barrier_nospec_enabled;
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -2951,6 +2951,43 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
+       .endr
+       blr
++TRAMP_REAL_BEGIN(entry_flush_fallback)
++      std     r9,PACA_EXRFI+EX_R9(r13)
++      std     r10,PACA_EXRFI+EX_R10(r13)
++      std     r11,PACA_EXRFI+EX_R11(r13)
++      mfctr   r9
++      ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
++      ld      r11,PACA_L1D_FLUSH_SIZE(r13)
++      srdi    r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
++      mtctr   r11
++      DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
++
++      /* order ld/st prior to dcbt stop all streams with flushing */
++      sync
++
++      /*
++       * The load addresses are at staggered offsets within cachelines,
++       * which suits some pipelines better (on others it should not
++       * hurt).
++       */
++1:
++      ld      r11,(0x80 + 8)*0(r10)
++      ld      r11,(0x80 + 8)*1(r10)
++      ld      r11,(0x80 + 8)*2(r10)
++      ld      r11,(0x80 + 8)*3(r10)
++      ld      r11,(0x80 + 8)*4(r10)
++      ld      r11,(0x80 + 8)*5(r10)
++      ld      r11,(0x80 + 8)*6(r10)
++      ld      r11,(0x80 + 8)*7(r10)
++      addi    r10,r10,0x80*8
++      bdnz    1b
++
++      mtctr   r9
++      ld      r9,PACA_EXRFI+EX_R9(r13)
++      ld      r10,PACA_EXRFI+EX_R10(r13)
++      ld      r11,PACA_EXRFI+EX_R11(r13)
++      blr
++
+ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+       SET_SCRATCH0(r13);
+       GET_PACA(r13);
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -860,7 +860,9 @@ early_initcall(disable_hardlockup_detect
+ static enum l1d_flush_type enabled_flush_types;
+ static void *l1d_flush_fallback_area;
+ static bool no_rfi_flush;
++static bool no_entry_flush;
+ bool rfi_flush;
++bool entry_flush;
+ static int __init handle_no_rfi_flush(char *p)
+ {
+@@ -870,6 +872,14 @@ static int __init handle_no_rfi_flush(ch
+ }
+ early_param("no_rfi_flush", handle_no_rfi_flush);
++static int __init handle_no_entry_flush(char *p)
++{
++      pr_info("entry-flush: disabled on command line.");
++      no_entry_flush = true;
++      return 0;
++}
++early_param("no_entry_flush", handle_no_entry_flush);
++
+ /*
+  * The RFI flush is not KPTI, but because users will see doco that says to use
+  * nopti we hijack that option here to also disable the RFI flush.
+@@ -901,6 +911,18 @@ void rfi_flush_enable(bool enable)
+       rfi_flush = enable;
+ }
++void entry_flush_enable(bool enable)
++{
++      if (enable) {
++              do_entry_flush_fixups(enabled_flush_types);
++              on_each_cpu(do_nothing, NULL, 1);
++      } else {
++              do_entry_flush_fixups(L1D_FLUSH_NONE);
++      }
++
++      entry_flush = enable;
++}
++
+ static void __ref init_fallback_flush(void)
+ {
+       u64 l1d_size, limit;
+@@ -959,10 +981,19 @@ void setup_rfi_flush(enum l1d_flush_type
+       enabled_flush_types = types;
+-      if (!no_rfi_flush && !cpu_mitigations_off())
++      if (!cpu_mitigations_off() && !no_rfi_flush)
+               rfi_flush_enable(enable);
+ }
++void setup_entry_flush(bool enable)
++{
++      if (cpu_mitigations_off())
++              return;
++
++      if (!no_entry_flush)
++              entry_flush_enable(enable);
++}
++
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
+@@ -990,9 +1021,36 @@ static int rfi_flush_get(void *data, u64
+ DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
++static int entry_flush_set(void *data, u64 val)
++{
++      bool enable;
++
++      if (val == 1)
++              enable = true;
++      else if (val == 0)
++              enable = false;
++      else
++              return -EINVAL;
++
++      /* Only do anything if we're changing state */
++      if (enable != entry_flush)
++              entry_flush_enable(enable);
++
++      return 0;
++}
++
++static int entry_flush_get(void *data, u64 *val)
++{
++      *val = entry_flush ? 1 : 0;
++      return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
++
+ static __init int rfi_flush_debugfs_init(void)
+ {
+       debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
++      debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
+       return 0;
+ }
+ device_initcall(rfi_flush_debugfs_init);
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -132,6 +132,13 @@ SECTIONS
+       }
+       . = ALIGN(8);
++      __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
++              __start___entry_flush_fixup = .;
++              *(__entry_flush_fixup)
++              __stop___entry_flush_fixup = .;
++      }
++
++      . = ALIGN(8);
+       __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
+               __start___stf_exit_barrier_fixup = .;
+               *(__stf_exit_barrier_fixup)
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -234,6 +234,60 @@ void do_stf_barrier_fixups(enum stf_barr
+       do_stf_exit_barrier_fixups(types);
+ }
++void do_entry_flush_fixups(enum l1d_flush_type types)
++{
++      unsigned int instrs[3], *dest;
++      long *start, *end;
++      int i;
++
++      start = PTRRELOC(&__start___entry_flush_fixup);
++      end = PTRRELOC(&__stop___entry_flush_fixup);
++
++      instrs[0] = 0x60000000; /* nop */
++      instrs[1] = 0x60000000; /* nop */
++      instrs[2] = 0x60000000; /* nop */
++
++      i = 0;
++      if (types == L1D_FLUSH_FALLBACK) {
++              instrs[i++] = 0x7d4802a6; /* mflr r10           */
++              instrs[i++] = 0x60000000; /* branch patched below */
++              instrs[i++] = 0x7d4803a6; /* mtlr r10           */
++      }
++
++      if (types & L1D_FLUSH_ORI) {
++              instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++              instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++      }
++
++      if (types & L1D_FLUSH_MTTRIG)
++              instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++      for (i = 0; start < end; start++, i++) {
++              dest = (void *)start + *start;
++
++              pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++              patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
++
++              if (types == L1D_FLUSH_FALLBACK)
++                      patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
++                                   BRANCH_SET_LINK);
++              else
++                      patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
++
++              patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
++      }
++
++      printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
++              (types == L1D_FLUSH_NONE)       ? "no" :
++              (types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
++              (types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
++                                                      ? "ori+mttrig type"
++                                                      : "ori type" :
++              (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
++                                              : "unknown");
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+       unsigned int instrs[3], *dest;
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -122,12 +122,23 @@ static void pnv_setup_rfi_flush(void)
+                       type = L1D_FLUSH_ORI;
+       }
++      /*
++       * If we are non-Power9 bare metal, we don't need to flush on kernel
++       * entry: it fixes a P9 specific vulnerability.
++       */
++      if (!pvr_version_is(PVR_POWER9))
++              security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
++
+       enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
+                (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)   || \
+                 security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
+       setup_rfi_flush(type, enable);
+       setup_count_cache_flush();
++
++      enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++               security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
++      setup_entry_flush(enable);
+ }
+ static void __init pnv_setup_arch(void)
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -573,6 +573,10 @@ void pseries_setup_rfi_flush(void)
+       setup_rfi_flush(types, enable);
+       setup_count_cache_flush();
++
++      enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++               security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
++      setup_entry_flush(enable);
+ }
+ #ifdef CONFIG_PCI_IOV
diff --git a/queue-5.9/powerpc-only-include-kup-radix.h-for-64-bit-book3s.patch b/queue-5.9/powerpc-only-include-kup-radix.h-for-64-bit-book3s.patch
new file mode 100644 (file)
index 0000000..88f445a
--- /dev/null
@@ -0,0 +1,115 @@
+From foo@baz Fri Nov 20 07:57:18 AM CET 2020
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 20 Nov 2020 10:22:49 +1100
+Subject: powerpc: Only include kup-radix.h for 64-bit Book3S
+To: stable@vger.kernel.org
+Cc: dja@axtens.net
+Message-ID: <20201119232250.365304-5-dja@axtens.net>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 178d52c6e89c38d0553b0ac8b99927b11eb995b0 upstream.
+
+In kup.h we currently include kup-radix.h for all 64-bit builds, which
+includes Book3S and Book3E. The latter doesn't make sense, Book3E
+never uses the Radix MMU.
+
+This has worked up until now, but almost by accident, and the recent
+uaccess flush changes introduced a build breakage on Book3E because of
+the bad structure of the code.
+
+So disentangle things so that we only use kup-radix.h for Book3S. This
+requires some more stubs in kup.h and fixing an include in
+syscall_64.c.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/book3s/64/kup-radix.h |    4 ++--
+ arch/powerpc/include/asm/kup.h                 |   11 ++++++++---
+ arch/powerpc/kernel/syscall_64.c               |    2 +-
+ 3 files changed, 11 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+@@ -27,6 +27,7 @@
+ #endif
+ .endm
++#ifdef CONFIG_PPC_KUAP
+ .macro kuap_check_amr gpr1, gpr2
+ #ifdef CONFIG_PPC_KUAP_DEBUG
+       BEGIN_MMU_FTR_SECTION_NESTED(67)
+@@ -38,6 +39,7 @@
+       END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+ #endif
+ .endm
++#endif
+ .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
+ #ifdef CONFIG_PPC_KUAP
+@@ -148,8 +150,6 @@ static inline unsigned long kuap_get_and
+       return 0UL;
+ }
+-static inline void kuap_check_amr(void) { }
+-
+ static inline unsigned long get_kuap(void)
+ {
+       return AMR_KUAP_BLOCKED;
+--- a/arch/powerpc/include/asm/kup.h
++++ b/arch/powerpc/include/asm/kup.h
+@@ -14,7 +14,7 @@
+ #define KUAP_CURRENT_WRITE    8
+ #define KUAP_CURRENT          (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
+-#ifdef CONFIG_PPC64
++#ifdef CONFIG_PPC_BOOK3S_64
+ #include <asm/book3s/64/kup-radix.h>
+ #endif
+ #ifdef CONFIG_PPC_8xx
+@@ -35,6 +35,9 @@
+ .macro kuap_check     current, gpr
+ .endm
++.macro kuap_check_amr gpr1, gpr2
++.endm
++
+ #endif
+ #else /* !__ASSEMBLY__ */
+@@ -60,19 +63,21 @@ bad_kuap_fault(struct pt_regs *regs, uns
+       return false;
+ }
++static inline void kuap_check_amr(void) { }
++
+ /*
+  * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
+  * the L1D cache after user accesses. Only include the empty stubs for other
+  * platforms.
+  */
+-#ifndef CONFIG_PPC64
++#ifndef CONFIG_PPC_BOOK3S_64
+ static inline void allow_user_access(void __user *to, const void __user *from,
+                                    unsigned long size, unsigned long dir) { }
+ static inline void prevent_user_access(void __user *to, const void __user *from,
+                                      unsigned long size, unsigned long dir) { }
+ static inline unsigned long prevent_user_access_return(void) { return 0UL; }
+ static inline void restore_user_access(unsigned long flags) { }
+-#endif /* CONFIG_PPC64 */
++#endif /* CONFIG_PPC_BOOK3S_64 */
+ #endif /* CONFIG_PPC_KUAP */
+ static inline void allow_read_from_user(const void __user *from, unsigned long size)
+--- a/arch/powerpc/kernel/syscall_64.c
++++ b/arch/powerpc/kernel/syscall_64.c
+@@ -2,7 +2,7 @@
+ #include <linux/err.h>
+ #include <asm/asm-prototypes.h>
+-#include <asm/book3s/64/kup-radix.h>
++#include <asm/kup.h>
+ #include <asm/cputime.h>
+ #include <asm/hw_irq.h>
+ #include <asm/kprobes.h>
diff --git a/queue-5.9/selftests-powerpc-entry-flush-test.patch b/queue-5.9/selftests-powerpc-entry-flush-test.patch
new file mode 100644 (file)
index 0000000..fe053a1
--- /dev/null
@@ -0,0 +1,244 @@
+From foo@baz Fri Nov 20 07:57:18 AM CET 2020
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 20 Nov 2020 10:22:50 +1100
+Subject: selftests/powerpc: entry flush test
+To: stable@vger.kernel.org
+Cc: dja@axtens.net
+Message-ID: <20201119232250.365304-6-dja@axtens.net>
+
+From: Daniel Axtens <dja@axtens.net>
+
+commit 89a83a0c69c81a25ce91002b90ca27ed86132a0a upstream.
+
+Add a test modelled on the RFI flush test which counts the number
+of L1D misses doing a simple syscall with the entry flush on and off.
+
+For simplicity of backporting, this test duplicates a lot of code from
+rfi_flush. We clean that up in the next patch.
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/powerpc/security/.gitignore    |    1 
+ tools/testing/selftests/powerpc/security/Makefile      |    2 
+ tools/testing/selftests/powerpc/security/entry_flush.c |  198 +++++++++++++++++
+ 3 files changed, 200 insertions(+), 1 deletion(-)
+ create mode 100644 tools/testing/selftests/powerpc/security/entry_flush.c
+
+--- a/tools/testing/selftests/powerpc/security/.gitignore
++++ b/tools/testing/selftests/powerpc/security/.gitignore
+@@ -1,2 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ rfi_flush
++entry_flush
+--- a/tools/testing/selftests/powerpc/security/Makefile
++++ b/tools/testing/selftests/powerpc/security/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0+
+-TEST_GEN_PROGS := rfi_flush spectre_v2
++TEST_GEN_PROGS := rfi_flush entry_flush spectre_v2
+ top_srcdir = ../../../../..
+ CFLAGS += -I../../../../../usr/include
+--- /dev/null
++++ b/tools/testing/selftests/powerpc/security/entry_flush.c
+@@ -0,0 +1,198 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++/*
++ * Copyright 2018 IBM Corporation.
++ */
++
++#define __SANE_USERSPACE_TYPES__
++
++#include <sys/types.h>
++#include <stdint.h>
++#include <malloc.h>
++#include <unistd.h>
++#include <signal.h>
++#include <stdlib.h>
++#include <string.h>
++#include <stdio.h>
++#include "utils.h"
++
++#define CACHELINE_SIZE 128
++
++struct perf_event_read {
++      __u64 nr;
++      __u64 l1d_misses;
++};
++
++static inline __u64 load(void *addr)
++{
++      __u64 tmp;
++
++      asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr));
++
++      return tmp;
++}
++
++static void syscall_loop(char *p, unsigned long iterations,
++                       unsigned long zero_size)
++{
++      for (unsigned long i = 0; i < iterations; i++) {
++              for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
++                      load(p + j);
++              getppid();
++      }
++}
++
++static void sigill_handler(int signr, siginfo_t *info, void *unused)
++{
++      static int warned;
++      ucontext_t *ctx = (ucontext_t *)unused;
++      unsigned long *pc = &UCONTEXT_NIA(ctx);
++
++      /* mtspr 3,RS to check for move to DSCR below */
++      if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
++              if (!warned++)
++                      printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
++              *pc += 4;
++      } else {
++              printf("SIGILL at %p\n", pc);
++              abort();
++      }
++}
++
++static void set_dscr(unsigned long val)
++{
++      static int init;
++      struct sigaction sa;
++
++      if (!init) {
++              memset(&sa, 0, sizeof(sa));
++              sa.sa_sigaction = sigill_handler;
++              sa.sa_flags = SA_SIGINFO;
++              if (sigaction(SIGILL, &sa, NULL))
++                      perror("sigill_handler");
++              init = 1;
++      }
++
++      asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
++}
++
++int entry_flush_test(void)
++{
++      char *p;
++      int repetitions = 10;
++      int fd, passes = 0, iter, rc = 0;
++      struct perf_event_read v;
++      __u64 l1d_misses_total = 0;
++      unsigned long iterations = 100000, zero_size = 24 * 1024;
++      unsigned long l1d_misses_expected;
++      int rfi_flush_orig;
++      int entry_flush, entry_flush_orig;
++
++      SKIP_IF(geteuid() != 0);
++
++      // The PMU event we use only works on Power7 or later
++      SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
++
++      if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
++              perror("Unable to read powerpc/rfi_flush debugfs file");
++              SKIP_IF(1);
++      }
++
++      if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
++              perror("Unable to read powerpc/entry_flush debugfs file");
++              SKIP_IF(1);
++      }
++
++      if (rfi_flush_orig != 0) {
++              if (write_debugfs_file("powerpc/rfi_flush", 0) < 0) {
++                      perror("error writing to powerpc/rfi_flush debugfs file");
++                      FAIL_IF(1);
++              }
++      }
++
++      entry_flush = entry_flush_orig;
++
++      fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
++      FAIL_IF(fd < 0);
++
++      p = (char *)memalign(zero_size, CACHELINE_SIZE);
++
++      FAIL_IF(perf_event_enable(fd));
++
++      // disable L1 prefetching
++      set_dscr(1);
++
++      iter = repetitions;
++
++      /*
++       * We expect to see l1d miss for each cacheline access when entry_flush
++       * is set. Allow a small variation on this.
++       */
++      l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
++
++again:
++      FAIL_IF(perf_event_reset(fd));
++
++      syscall_loop(p, iterations, zero_size);
++
++      FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
++
++      if (entry_flush && v.l1d_misses >= l1d_misses_expected)
++              passes++;
++      else if (!entry_flush && v.l1d_misses < (l1d_misses_expected / 2))
++              passes++;
++
++      l1d_misses_total += v.l1d_misses;
++
++      while (--iter)
++              goto again;
++
++      if (passes < repetitions) {
++              printf("FAIL (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d failures]\n",
++                     entry_flush, l1d_misses_total, entry_flush ? '<' : '>',
++                     entry_flush ? repetitions * l1d_misses_expected :
++                     repetitions * l1d_misses_expected / 2,
++                     repetitions - passes, repetitions);
++              rc = 1;
++      } else {
++              printf("PASS (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d pass]\n",
++                     entry_flush, l1d_misses_total, entry_flush ? '>' : '<',
++                     entry_flush ? repetitions * l1d_misses_expected :
++                     repetitions * l1d_misses_expected / 2,
++                     passes, repetitions);
++      }
++
++      if (entry_flush == entry_flush_orig) {
++              entry_flush = !entry_flush_orig;
++              if (write_debugfs_file("powerpc/entry_flush", entry_flush) < 0) {
++                      perror("error writing to powerpc/entry_flush debugfs file");
++                      return 1;
++              }
++              iter = repetitions;
++              l1d_misses_total = 0;
++              passes = 0;
++              goto again;
++      }
++
++      perf_event_disable(fd);
++      close(fd);
++
++      set_dscr(0);
++
++      if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
++              perror("unable to restore original value of powerpc/rfi_flush debugfs file");
++              return 1;
++      }
++
++      if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
++              perror("unable to restore original value of powerpc/entry_flush debugfs file");
++              return 1;
++      }
++
++      return rc;
++}
++
++int main(int argc, char *argv[])
++{
++      return test_harness(entry_flush_test, "entry_flush_test");
++}
diff --git a/queue-5.9/selftests-powerpc-rfi_flush-disable-entry-flush-if-present.patch b/queue-5.9/selftests-powerpc-rfi_flush-disable-entry-flush-if-present.patch
new file mode 100644 (file)
index 0000000..64e4031
--- /dev/null
@@ -0,0 +1,104 @@
+From foo@baz Fri Nov 20 07:57:18 AM CET 2020
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 20 Nov 2020 10:22:46 +1100
+Subject: selftests/powerpc: rfi_flush: disable entry flush if present
+To: stable@vger.kernel.org
+Cc: dja@axtens.net
+Message-ID: <20201119232250.365304-2-dja@axtens.net>
+
+From: Russell Currey <ruscur@russell.cc>
+
+commit fcb48454c23c5679d1a2e252f127642e91b05cbe upstream.
+
+We are about to add an entry flush. The rfi (exit) flush test measures
+the number of L1D flushes over a syscall with the RFI flush enabled and
+disabled. But if the entry flush is also enabled, the effect of enabling
+and disabling the RFI flush is masked.
+
+If there is a debugfs entry for the entry flush, disable it during the RFI
+flush and restore it later.
+
+Reported-by: Spoorthy S <spoorts2@in.ibm.com>
+Signed-off-by: Russell Currey <ruscur@russell.cc>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/powerpc/security/rfi_flush.c |   35 +++++++++++++++----
+ 1 file changed, 29 insertions(+), 6 deletions(-)
+
+--- a/tools/testing/selftests/powerpc/security/rfi_flush.c
++++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
+@@ -50,16 +50,30 @@ int rfi_flush_test(void)
+       __u64 l1d_misses_total = 0;
+       unsigned long iterations = 100000, zero_size = 24 * 1024;
+       unsigned long l1d_misses_expected;
+-      int rfi_flush_org, rfi_flush;
++      int rfi_flush_orig, rfi_flush;
++      int have_entry_flush, entry_flush_orig;
+       SKIP_IF(geteuid() != 0);
+-      if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_org)) {
++      if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
+               perror("Unable to read powerpc/rfi_flush debugfs file");
+               SKIP_IF(1);
+       }
+-      rfi_flush = rfi_flush_org;
++      if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
++              have_entry_flush = 0;
++      } else {
++              have_entry_flush = 1;
++
++              if (entry_flush_orig != 0) {
++                      if (write_debugfs_file("powerpc/entry_flush", 0) < 0) {
++                              perror("error writing to powerpc/entry_flush debugfs file");
++                              return 1;
++                      }
++              }
++      }
++
++      rfi_flush = rfi_flush_orig;
+       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
+       FAIL_IF(fd < 0);
+@@ -68,6 +82,7 @@ int rfi_flush_test(void)
+       FAIL_IF(perf_event_enable(fd));
++      // disable L1 prefetching
+       set_dscr(1);
+       iter = repetitions;
+@@ -109,8 +124,8 @@ again:
+                      repetitions * l1d_misses_expected / 2,
+                      passes, repetitions);
+-      if (rfi_flush == rfi_flush_org) {
+-              rfi_flush = !rfi_flush_org;
++      if (rfi_flush == rfi_flush_orig) {
++              rfi_flush = !rfi_flush_orig;
+               if (write_debugfs_file("powerpc/rfi_flush", rfi_flush) < 0) {
+                       perror("error writing to powerpc/rfi_flush debugfs file");
+                       return 1;
+@@ -126,11 +141,19 @@ again:
+       set_dscr(0);
+-      if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_org) < 0) {
++      if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
+               perror("unable to restore original value of powerpc/rfi_flush debugfs file");
+               return 1;
+       }
++      if (have_entry_flush) {
++              if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
++                      perror("unable to restore original value of powerpc/entry_flush "
++                             "debugfs file");
++                      return 1;
++              }
++      }
++
+       return rc;
+ }
diff --git a/queue-5.9/series b/queue-5.9/series
new file mode 100644 (file)
index 0000000..66fc4ad
--- /dev/null
@@ -0,0 +1,5 @@
+selftests-powerpc-rfi_flush-disable-entry-flush-if-present.patch
+powerpc-64s-flush-l1d-on-kernel-entry.patch
+powerpc-64s-flush-l1d-after-user-accesses.patch
+powerpc-only-include-kup-radix.h-for-64-bit-book3s.patch
+selftests-powerpc-entry-flush-test.patch