From: Greg Kroah-Hartman Date: Fri, 29 Mar 2019 14:54:45 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v3.18.138~64 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=261fc112bc7260d9c2ea4e792b00283c06740d2b;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: powerpc-64-add-config_ppc_barrier_nospec.patch powerpc-64-call-setup_barrier_nospec-from-setup_arch.patch powerpc-64-disable-the-speculation-barrier-from-the-command-line.patch powerpc-64-make-meltdown-reporting-book3s-64-specific.patch powerpc-64-make-stf-barrier-ppc_book3s_64-specific.patch powerpc-64-use-barrier_nospec-in-syscall-entry.patch powerpc-64s-add-new-security-feature-flags-for-count-cache-flush.patch powerpc-64s-add-support-for-ori-barrier_nospec-patching.patch powerpc-64s-add-support-for-software-count-cache-flush.patch powerpc-64s-enable-barrier_nospec-based-on-firmware-settings.patch powerpc-64s-enhance-the-information-in-cpu_show_spectre_v1.patch powerpc-64s-patch-barrier_nospec-in-modules.patch powerpc-asm-add-a-patch_site-macro-helpers-for-patching-instructions.patch powerpc-fsl-add-barrier_nospec-implementation-for-nxp-powerpc-book3e.patch powerpc-fsl-add-infrastructure-to-fixup-branch-predictor-flush.patch powerpc-fsl-add-macro-to-flush-the-branch-predictor.patch powerpc-fsl-add-nospectre_v2-command-line-argument.patch powerpc-fsl-emulate-sprn_bucsr-register.patch powerpc-fsl-enable-runtime-patching-if-nospectre_v2-boot-arg-is-used.patch powerpc-fsl-fix-spectre_v2-mitigations-reporting.patch powerpc-fsl-fix-the-flush-of-branch-predictor.patch powerpc-fsl-fixed-warning-orphan-section-__btb_flush_fixup.patch powerpc-fsl-flush-branch-predictor-when-entering-kvm.patch powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-32-bit.patch powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-64bit.patch powerpc-fsl-sanitize-the-syscall-table-for-nxp-powerpc-32-bit-platforms.patch powerpc-fsl-update-spectre-v2-reporting.patch powerpc-powernv-query-firmware-for-count-cache-flush-settings.patch powerpc-pseries-query-hypervisor-for-count-cache-flush-settings.patch powerpc-security-fix-spectre_v2-reporting.patch powerpc-use-barrier_nospec-in-copy_from_user.patch powerpc64s-show-ori31-availability-in-spectre_v1-sysfs-file-not-v2.patch --- diff --git a/queue-4.14/powerpc-64-add-config_ppc_barrier_nospec.patch b/queue-4.14/powerpc-64-add-config_ppc_barrier_nospec.patch new file mode 100644 index 00000000000..aac4d98dd0f --- /dev/null +++ b/queue-4.14/powerpc-64-add-config_ppc_barrier_nospec.patch @@ -0,0 +1,162 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:58 +1100 +Subject: [PATCH stable v4.14 10/32] powerpc/64: Add CONFIG_PPC_BARRIER_NOSPEC +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-11-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit 179ab1cbf883575c3a585bcfc0f2160f1d22a149 upstream. + +Add a config symbol to encode which platforms support the +barrier_nospec speculation barrier. Currently this is just Book3S 64 +but we will add Book3E in a future patch. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/Kconfig | 7 ++++++- + arch/powerpc/include/asm/barrier.h | 6 +++--- + arch/powerpc/include/asm/setup.h | 2 +- + arch/powerpc/kernel/Makefile | 3 ++- + arch/powerpc/kernel/module.c | 4 +++- + arch/powerpc/kernel/vmlinux.lds.S | 4 +++- + arch/powerpc/lib/feature-fixups.c | 6 ++++-- + 7 files changed, 22 insertions(+), 10 deletions(-) + +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -164,7 +164,7 @@ config PPC + select GENERIC_CLOCKEVENTS_BROADCAST if SMP + select GENERIC_CMOS_UPDATE + select GENERIC_CPU_AUTOPROBE +- select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 ++ select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_SHOW_LEVEL + select GENERIC_SMP_IDLE_THREAD +@@ -236,6 +236,11 @@ config PPC + # Please keep this list sorted alphabetically. + # + ++config PPC_BARRIER_NOSPEC ++ bool ++ default y ++ depends on PPC_BOOK3S_64 ++ + config GENERIC_CSUM + def_bool n + +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -76,7 +76,7 @@ do { \ + ___p1; \ + }) + +-#ifdef CONFIG_PPC_BOOK3S_64 ++#ifdef CONFIG_PPC_BARRIER_NOSPEC + /* + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. +@@ -86,10 +86,10 @@ do { \ + // This also acts as a compiler barrier due to the memory clobber. + #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") + +-#else /* !CONFIG_PPC_BOOK3S_64 */ ++#else /* !CONFIG_PPC_BARRIER_NOSPEC */ + #define barrier_nospec_asm + #define barrier_nospec() +-#endif ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + + #include + +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -56,7 +56,7 @@ void setup_barrier_nospec(void); + void do_barrier_nospec_fixups(bool enable); + extern bool barrier_nospec_enabled; + +-#ifdef CONFIG_PPC_BOOK3S_64 ++#ifdef CONFIG_PPC_BARRIER_NOSPEC + void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); + #else + static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; +--- a/arch/powerpc/kernel/Makefile ++++ b/arch/powerpc/kernel/Makefile +@@ -45,9 +45,10 @@ obj-$(CONFIG_VDSO32) += vdso32/ + obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o + obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o + obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o +-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o ++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o + obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o + obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o ++obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o + obj-$(CONFIG_PPC64) += vdso64/ + obj-$(CONFIG_ALTIVEC) += vecemu.o + obj-$(CONFIG_PPC_970_NAP) += idle_power4.o +--- a/arch/powerpc/kernel/module.c ++++ b/arch/powerpc/kernel/module.c +@@ -72,13 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr, + do_feature_fixups(powerpc_firmware_features, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); ++#endif /* CONFIG_PPC64 */ + ++#ifdef CONFIG_PPC_BARRIER_NOSPEC + sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); + if (sect != NULL) + do_barrier_nospec_fixups_range(barrier_nospec_enabled, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); +-#endif ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + + sect = find_section(hdr, sechdrs, "__lwsync_fixup"); + if (sect != NULL) +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -153,14 +153,16 @@ SECTIONS + *(__rfi_flush_fixup) + __stop___rfi_flush_fixup = .; + } ++#endif /* CONFIG_PPC64 */ + ++#ifdef CONFIG_PPC_BARRIER_NOSPEC + . = ALIGN(8); + __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { + __start___barrier_nospec_fixup = .; + *(__barrier_nospec_fixup) + __stop___barrier_nospec_fixup = .; + } +-#endif ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + + EXCEPTION_TABLE(0) + +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -304,6 +304,9 @@ void do_barrier_nospec_fixups_range(bool + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); + } + ++#endif /* CONFIG_PPC_BOOK3S_64 */ ++ ++#ifdef CONFIG_PPC_BARRIER_NOSPEC + void do_barrier_nospec_fixups(bool enable) + { + void *start, *end; +@@ -313,8 +316,7 @@ void do_barrier_nospec_fixups(bool enabl + + do_barrier_nospec_fixups_range(enable, start, end); + } +- +-#endif /* CONFIG_PPC_BOOK3S_64 */ ++#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) + { diff --git a/queue-4.14/powerpc-64-call-setup_barrier_nospec-from-setup_arch.patch b/queue-4.14/powerpc-64-call-setup_barrier_nospec-from-setup_arch.patch new file mode 100644 index 00000000000..bdfa69fad20 --- /dev/null +++ b/queue-4.14/powerpc-64-call-setup_barrier_nospec-from-setup_arch.patch @@ -0,0 +1,71 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:59 +1100 +Subject: [PATCH stable v4.14 11/32] powerpc/64: Call setup_barrier_nospec() from setup_arch() +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-12-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit af375eefbfb27cbb5b831984e66d724a40d26b5c upstream. + +Currently we require platform code to call setup_barrier_nospec(). But +if we add an empty definition for the !CONFIG_PPC_BARRIER_NOSPEC case +then we can call it in setup_arch(). + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/setup.h | 4 ++++ + arch/powerpc/kernel/setup-common.c | 2 ++ + arch/powerpc/platforms/powernv/setup.c | 1 - + arch/powerpc/platforms/pseries/setup.c | 1 - + 4 files changed, 6 insertions(+), 2 deletions(-) + +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -52,7 +52,11 @@ enum l1d_flush_type { + + void setup_rfi_flush(enum l1d_flush_type, bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); ++#ifdef CONFIG_PPC_BARRIER_NOSPEC + void setup_barrier_nospec(void); ++#else ++static inline void setup_barrier_nospec(void) { }; ++#endif + void do_barrier_nospec_fixups(bool enable); + extern bool barrier_nospec_enabled; + +--- a/arch/powerpc/kernel/setup-common.c ++++ b/arch/powerpc/kernel/setup-common.c +@@ -937,6 +937,8 @@ void __init setup_arch(char **cmdline_p) + if (ppc_md.setup_arch) + ppc_md.setup_arch(); + ++ setup_barrier_nospec(); ++ + paging_init(); + + /* Initialize the MMU context management stuff. */ +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -123,7 +123,6 @@ static void pnv_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); +- setup_barrier_nospec(); + } + + static void __init pnv_setup_arch(void) +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -534,7 +534,6 @@ void pseries_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); + + setup_rfi_flush(types, enable); +- setup_barrier_nospec(); + } + + static void __init pSeries_setup_arch(void) diff --git a/queue-4.14/powerpc-64-disable-the-speculation-barrier-from-the-command-line.patch b/queue-4.14/powerpc-64-disable-the-speculation-barrier-from-the-command-line.patch new file mode 100644 index 00000000000..2de32ad2ed8 --- /dev/null +++ b/queue-4.14/powerpc-64-disable-the-speculation-barrier-from-the-command-line.patch @@ -0,0 +1,54 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:56 +1100 +Subject: [PATCH stable v4.14 08/32] powerpc/64: Disable the speculation barrier from the command line +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-9-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit cf175dc315f90185128fb061dc05b6fbb211aa2f upstream. + +The speculation barrier can be disabled from the command line +with the parameter: "nospectre_v1". + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -16,6 +16,7 @@ + unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; + + bool barrier_nospec_enabled; ++static bool no_nospec; + + static void enable_barrier_nospec(bool enable) + { +@@ -42,9 +43,18 @@ void setup_barrier_nospec(void) + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); + +- enable_barrier_nospec(enable); ++ if (!no_nospec) ++ enable_barrier_nospec(enable); + } + ++static int __init handle_nospectre_v1(char *p) ++{ ++ no_nospec = true; ++ ++ return 0; ++} ++early_param("nospectre_v1", handle_nospectre_v1); ++ + #ifdef CONFIG_DEBUG_FS + static int barrier_nospec_set(void *data, u64 val) + { diff --git a/queue-4.14/powerpc-64-make-meltdown-reporting-book3s-64-specific.patch b/queue-4.14/powerpc-64-make-meltdown-reporting-book3s-64-specific.patch new file mode 100644 index 00000000000..5f554bf3233 --- /dev/null +++ b/queue-4.14/powerpc-64-make-meltdown-reporting-book3s-64-specific.patch @@ -0,0 +1,44 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:00 +1100 +Subject: [PATCH stable v4.14 12/32] powerpc/64: Make meltdown reporting Book3S 64 specific +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-13-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 406d2b6ae3420f5bb2b3db6986dc6f0b6dbb637b upstream. + +In a subsequent patch we will enable building security.c for Book3E. +However the NXP platforms are not vulnerable to Meltdown, so make the +Meltdown vulnerability reporting PPC_BOOK3S_64 specific. + +Signed-off-by: Diana Craciun +[mpe: Split out of larger patch] +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -92,6 +92,7 @@ static __init int barrier_nospec_debugfs + device_initcall(barrier_nospec_debugfs_init); + #endif /* CONFIG_DEBUG_FS */ + ++#ifdef CONFIG_PPC_BOOK3S_64 + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + { + bool thread_priv; +@@ -124,6 +125,7 @@ ssize_t cpu_show_meltdown(struct device + + return sprintf(buf, "Vulnerable\n"); + } ++#endif + + ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) + { diff --git a/queue-4.14/powerpc-64-make-stf-barrier-ppc_book3s_64-specific.patch b/queue-4.14/powerpc-64-make-stf-barrier-ppc_book3s_64-specific.patch new file mode 100644 index 00000000000..64d67e53f7e --- /dev/null +++ b/queue-4.14/powerpc-64-make-stf-barrier-ppc_book3s_64-specific.patch @@ -0,0 +1,39 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:57 +1100 +Subject: [PATCH stable v4.14 09/32] powerpc/64: Make stf barrier PPC_BOOK3S_64 specific. +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-10-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 6453b532f2c8856a80381e6b9a1f5ea2f12294df upstream. + +NXP Book3E platforms are not vulnerable to speculative store +bypass, so make the mitigations PPC_BOOK3S_64 specific. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -176,6 +176,7 @@ ssize_t cpu_show_spectre_v2(struct devic + return s.len; + } + ++#ifdef CONFIG_PPC_BOOK3S_64 + /* + * Store-forwarding barrier support. + */ +@@ -323,3 +324,4 @@ static __init int stf_barrier_debugfs_in + } + device_initcall(stf_barrier_debugfs_init); + #endif /* CONFIG_DEBUG_FS */ ++#endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/queue-4.14/powerpc-64-use-barrier_nospec-in-syscall-entry.patch b/queue-4.14/powerpc-64-use-barrier_nospec-in-syscall-entry.patch new file mode 100644 index 00000000000..e8686a5fef5 --- /dev/null +++ b/queue-4.14/powerpc-64-use-barrier_nospec-in-syscall-entry.patch @@ -0,0 +1,50 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:53 +1100 +Subject: [PATCH stable v4.14 05/32] powerpc/64: Use barrier_nospec in syscall entry +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-6-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit 51973a815c6b46d7b23b68d6af371ad1c9d503ca upstream. + +Our syscall entry is done in assembly so patch in an explicit +barrier_nospec. + +Based on a patch by Michal Suchanek. + +Signed-off-by: Michal Suchanek +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/entry_64.S | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/arch/powerpc/kernel/entry_64.S ++++ b/arch/powerpc/kernel/entry_64.S +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + #include + #ifdef CONFIG_PPC_BOOK3S + #include +@@ -179,6 +180,15 @@ system_call: /* label this so stack tr + clrldi r8,r8,32 + 15: + slwi r0,r0,4 ++ ++ barrier_nospec_asm ++ /* ++ * Prevent the load of the handler below (based on the user-passed ++ * system call number) being speculatively executed until the test ++ * against NR_syscalls and branch to .Lsyscall_enosys above has ++ * committed. ++ */ ++ + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ + mtctr r12 + bctrl /* Call handler */ diff --git a/queue-4.14/powerpc-64s-add-new-security-feature-flags-for-count-cache-flush.patch b/queue-4.14/powerpc-64s-add-new-security-feature-flags-for-count-cache-flush.patch new file mode 100644 index 00000000000..8b419829537 --- /dev/null +++ b/queue-4.14/powerpc-64s-add-new-security-feature-flags-for-count-cache-flush.patch @@ -0,0 +1,44 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:04 +1100 +Subject: [PATCH stable v4.14 16/32] powerpc/64s: Add new security feature flags for count cache flush +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-17-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit dc8c6cce9a26a51fc19961accb978217a3ba8c75 upstream. + +Add security feature flags to indicate the need for software to flush +the count cache on context switch, and for the presence of a hardware +assisted count cache flush. + +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/security_features.h | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/arch/powerpc/include/asm/security_features.h ++++ b/arch/powerpc/include/asm/security_features.h +@@ -59,6 +59,9 @@ static inline bool security_ftr_enabled( + // Indirect branch prediction cache disabled + #define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull + ++// bcctr 2,0,0 triggers a hardware assisted count cache flush ++#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull ++ + + // Features indicating need for Spectre/Meltdown mitigations + +@@ -74,6 +77,9 @@ static inline bool security_ftr_enabled( + // Firmware configuration indicates user favours security over performance + #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull + ++// Software required to flush count cache on context switch ++#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull ++ + + // Features enabled by default + #define SEC_FTR_DEFAULT \ diff --git a/queue-4.14/powerpc-64s-add-support-for-ori-barrier_nospec-patching.patch b/queue-4.14/powerpc-64s-add-support-for-ori-barrier_nospec-patching.patch new file mode 100644 index 00000000000..c0a89d39d5a --- /dev/null +++ b/queue-4.14/powerpc-64s-add-support-for-ori-barrier_nospec-patching.patch @@ -0,0 +1,154 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:49 +1100 +Subject: [PATCH stable v4.14 01/32] powerpc/64s: Add support for ori barrier_nospec patching +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-2-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Michal Suchanek + +commit 2eea7f067f495e33b8b116b35b5988ab2b8aec55 upstream. + +Based on the RFI patching. This is required to be able to disable the +speculation barrier. + +Only one barrier type is supported and it does nothing when the +firmware does not enable it. Also re-patching modules is not supported +So the only meaningful thing that can be done is patching out the +speculation barrier at boot when the user says it is not wanted. + +Signed-off-by: Michal Suchanek +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/barrier.h | 2 +- + arch/powerpc/include/asm/feature-fixups.h | 9 +++++++++ + arch/powerpc/include/asm/setup.h | 1 + + arch/powerpc/kernel/security.c | 9 +++++++++ + arch/powerpc/kernel/vmlinux.lds.S | 7 +++++++ + arch/powerpc/lib/feature-fixups.c | 27 +++++++++++++++++++++++++++ + 6 files changed, 54 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -81,7 +81,7 @@ do { \ + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. + */ +-#define barrier_nospec_asm ori 31,31,0 ++#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop + + // This also acts as a compiler barrier due to the memory clobber. + #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") +--- a/arch/powerpc/include/asm/feature-fixups.h ++++ b/arch/powerpc/include/asm/feature-fixups.h +@@ -211,6 +211,14 @@ label##3: \ + FTR_ENTRY_OFFSET 951b-952b; \ + .popsection; + ++#define NOSPEC_BARRIER_FIXUP_SECTION \ ++953: \ ++ .pushsection __barrier_nospec_fixup,"a"; \ ++ .align 2; \ ++954: \ ++ FTR_ENTRY_OFFSET 953b-954b; \ ++ .popsection; ++ + + #ifndef __ASSEMBLY__ + #include +@@ -219,6 +227,7 @@ extern long stf_barrier_fallback; + extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; + extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; ++extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; + + void apply_feature_fixups(void); + void setup_feature_keys(void); +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -52,6 +52,7 @@ enum l1d_flush_type { + + void setup_rfi_flush(enum l1d_flush_type, bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); ++void do_barrier_nospec_fixups(bool enable); + + #endif /* !__ASSEMBLY__ */ + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -10,10 +10,19 @@ + + #include + #include ++#include + + + unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; + ++static bool barrier_nospec_enabled; ++ ++static void enable_barrier_nospec(bool enable) ++{ ++ barrier_nospec_enabled = enable; ++ do_barrier_nospec_fixups(enable); ++} ++ + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + { + bool thread_priv; +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -153,6 +153,13 @@ SECTIONS + *(__rfi_flush_fixup) + __stop___rfi_flush_fixup = .; + } ++ ++ . = ALIGN(8); ++ __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { ++ __start___barrier_nospec_fixup = .; ++ *(__barrier_nospec_fixup) ++ __stop___barrier_nospec_fixup = .; ++ } + #endif + + EXCEPTION_TABLE(0) +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -277,6 +277,33 @@ void do_rfi_flush_fixups(enum l1d_flush_ + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); + } ++ ++void do_barrier_nospec_fixups(bool enable) ++{ ++ unsigned int instr, *dest; ++ long *start, *end; ++ int i; ++ ++ start = PTRRELOC(&__start___barrier_nospec_fixup), ++ end = PTRRELOC(&__stop___barrier_nospec_fixup); ++ ++ instr = 0x60000000; /* nop */ ++ ++ if (enable) { ++ pr_info("barrier-nospec: using ORI speculation barrier\n"); ++ instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ } ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ patch_instruction(dest, instr); ++ } ++ ++ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); ++} ++ + #endif /* CONFIG_PPC_BOOK3S_64 */ + + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) diff --git a/queue-4.14/powerpc-64s-add-support-for-software-count-cache-flush.patch b/queue-4.14/powerpc-64s-add-support-for-software-count-cache-flush.patch new file mode 100644 index 00000000000..f9a2ff97fc4 --- /dev/null +++ b/queue-4.14/powerpc-64s-add-support-for-software-count-cache-flush.patch @@ -0,0 +1,269 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:05 +1100 +Subject: [PATCH stable v4.14 17/32] powerpc/64s: Add support for software count cache flush +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-18-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit ee13cb249fabdff8b90aaff61add347749280087 upstream. + +Some CPU revisions support a mode where the count cache needs to be +flushed by software on context switch. Additionally some revisions may +have a hardware accelerated flush, in which case the software flush +sequence can be shortened. + +If we detect the appropriate flag from firmware we patch a branch +into _switch() which takes us to a count cache flush sequence. + +That sequence in turn may be patched to return early if we detect that +the CPU supports accelerating the flush sequence in hardware. + +Add debugfs support for reporting the state of the flush, as well as +runtime disabling it. + +And modify the spectre_v2 sysfs file to report the state of the +software flush. + +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/asm-prototypes.h | 6 + + arch/powerpc/include/asm/security_features.h | 1 + arch/powerpc/kernel/entry_64.S | 54 ++++++++++++++ + arch/powerpc/kernel/security.c | 98 +++++++++++++++++++++++++-- + 4 files changed, 154 insertions(+), 5 deletions(-) + +--- a/arch/powerpc/include/asm/asm-prototypes.h ++++ b/arch/powerpc/include/asm/asm-prototypes.h +@@ -126,4 +126,10 @@ extern int __ucmpdi2(u64, u64); + void _mcount(void); + unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); + ++/* Patch sites */ ++extern s32 patch__call_flush_count_cache; ++extern s32 patch__flush_count_cache_return; ++ ++extern long flush_count_cache; ++ + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ +--- a/arch/powerpc/include/asm/security_features.h ++++ b/arch/powerpc/include/asm/security_features.h +@@ -22,6 +22,7 @@ enum stf_barrier_type { + + void setup_stf_barrier(void); + void do_stf_barrier_fixups(enum stf_barrier_type types); ++void setup_count_cache_flush(void); + + static inline void security_ftr_set(unsigned long feature) + { +--- a/arch/powerpc/kernel/entry_64.S ++++ b/arch/powerpc/kernel/entry_64.S +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -497,6 +498,57 @@ _GLOBAL(ret_from_kernel_thread) + li r3,0 + b .Lsyscall_exit + ++#ifdef CONFIG_PPC_BOOK3S_64 ++ ++#define FLUSH_COUNT_CACHE \ ++1: nop; \ ++ patch_site 1b, patch__call_flush_count_cache ++ ++ ++#define BCCTR_FLUSH .long 0x4c400420 ++ ++.macro nops number ++ .rept \number ++ nop ++ .endr ++.endm ++ ++.balign 32 ++.global flush_count_cache ++flush_count_cache: ++ /* Save LR into r9 */ ++ mflr r9 ++ ++ .rept 64 ++ bl .+4 ++ .endr ++ b 1f ++ nops 6 ++ ++ .balign 32 ++ /* Restore LR */ ++1: mtlr r9 ++ li r9,0x7fff ++ mtctr r9 ++ ++ BCCTR_FLUSH ++ ++2: nop ++ patch_site 2b patch__flush_count_cache_return ++ ++ nops 3 ++ ++ .rept 278 ++ .balign 32 ++ BCCTR_FLUSH ++ nops 7 ++ .endr ++ ++ blr ++#else ++#define FLUSH_COUNT_CACHE ++#endif /* CONFIG_PPC_BOOK3S_64 */ ++ + /* + * This routine switches between two different tasks. The process + * state of one is saved on its kernel stack. Then the state +@@ -528,6 +580,8 @@ _GLOBAL(_switch) + std r23,_CCR(r1) + std r1,KSP(r3) /* Set old stack pointer */ + ++ FLUSH_COUNT_CACHE ++ + /* + * On SMP kernels, care must be taken because a task may be + * scheduled off CPUx and on to CPUy. Memory ordering must be +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -9,12 +9,21 @@ + #include + + #include ++#include ++#include + #include + #include + + + unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; + ++enum count_cache_flush_type { ++ COUNT_CACHE_FLUSH_NONE = 0x1, ++ COUNT_CACHE_FLUSH_SW = 0x2, ++ COUNT_CACHE_FLUSH_HW = 0x4, ++}; ++static enum count_cache_flush_type count_cache_flush_type; ++ + bool barrier_nospec_enabled; + static bool no_nospec; + +@@ -159,17 +168,29 @@ ssize_t cpu_show_spectre_v2(struct devic + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); + +- if (bcs || ccd) { ++ if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { ++ bool comma = false; + seq_buf_printf(&s, "Mitigation: "); + +- if (bcs) ++ if (bcs) { + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); ++ comma = true; ++ } ++ ++ if (ccd) { ++ if (comma) ++ seq_buf_printf(&s, ", "); ++ seq_buf_printf(&s, "Indirect branch cache disabled"); ++ comma = true; ++ } + +- if (bcs && ccd) ++ if (comma) + seq_buf_printf(&s, ", "); + +- if (ccd) +- seq_buf_printf(&s, "Indirect branch cache disabled"); ++ seq_buf_printf(&s, "Software count cache flush"); ++ ++ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) ++ seq_buf_printf(&s, "(hardware accelerated)"); + } else + seq_buf_printf(&s, "Vulnerable"); + +@@ -326,4 +347,71 @@ static __init int stf_barrier_debugfs_in + } + device_initcall(stf_barrier_debugfs_init); + #endif /* CONFIG_DEBUG_FS */ ++ ++static void toggle_count_cache_flush(bool enable) ++{ ++ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { ++ patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); ++ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; ++ pr_info("count-cache-flush: software flush disabled.\n"); ++ return; ++ } ++ ++ patch_branch_site(&patch__call_flush_count_cache, ++ (u64)&flush_count_cache, BRANCH_SET_LINK); ++ ++ if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { ++ count_cache_flush_type = COUNT_CACHE_FLUSH_SW; ++ pr_info("count-cache-flush: full software flush sequence enabled.\n"); ++ return; ++ } ++ ++ patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); ++ count_cache_flush_type = COUNT_CACHE_FLUSH_HW; ++ pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); ++} ++ ++void setup_count_cache_flush(void) ++{ ++ toggle_count_cache_flush(true); ++} ++ ++#ifdef CONFIG_DEBUG_FS ++static int count_cache_flush_set(void *data, u64 val) ++{ ++ bool enable; ++ ++ if (val == 1) ++ enable = true; ++ else if (val == 0) ++ enable = false; ++ else ++ return -EINVAL; ++ ++ toggle_count_cache_flush(enable); ++ ++ return 0; ++} ++ ++static int count_cache_flush_get(void *data, u64 *val) ++{ ++ if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) ++ *val = 0; ++ else ++ *val = 1; ++ ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, ++ count_cache_flush_set, "%llu\n"); ++ ++static __init int count_cache_flush_debugfs_init(void) ++{ ++ debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, ++ NULL, &fops_count_cache_flush); ++ return 0; ++} ++device_initcall(count_cache_flush_debugfs_init); ++#endif /* CONFIG_DEBUG_FS */ + #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/queue-4.14/powerpc-64s-enable-barrier_nospec-based-on-firmware-settings.patch b/queue-4.14/powerpc-64s-enable-barrier_nospec-based-on-firmware-settings.patch new file mode 100644 index 00000000000..23c7ac0d727 --- /dev/null +++ b/queue-4.14/powerpc-64s-enable-barrier_nospec-based-on-firmware-settings.patch @@ -0,0 +1,127 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:51 +1100 +Subject: [PATCH stable v4.14 03/32] powerpc/64s: Enable barrier_nospec based on firmware settings +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-4-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Michal Suchanek + +commit cb3d6759a93c6d0aea1c10deb6d00e111c29c19c upstream. + +Check what firmware told us and enable/disable the barrier_nospec as +appropriate. + +We err on the side of enabling the barrier, as it's no-op on older +systems, see the comment for more detail. + +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/setup.h | 1 + arch/powerpc/kernel/security.c | 59 +++++++++++++++++++++++++++++++++ + arch/powerpc/platforms/powernv/setup.c | 1 + arch/powerpc/platforms/pseries/setup.c | 1 + 4 files changed, 62 insertions(+) + +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -52,6 +52,7 @@ enum l1d_flush_type { + + void setup_rfi_flush(enum l1d_flush_type, bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); ++void setup_barrier_nospec(void); + void do_barrier_nospec_fixups(bool enable); + extern bool barrier_nospec_enabled; + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -23,6 +23,65 @@ static void enable_barrier_nospec(bool e + do_barrier_nospec_fixups(enable); + } + ++void setup_barrier_nospec(void) ++{ ++ bool enable; ++ ++ /* ++ * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. ++ * But there's a good reason not to. The two flags we check below are ++ * both are enabled by default in the kernel, so if the hcall is not ++ * functional they will be enabled. ++ * On a system where the host firmware has been updated (so the ori ++ * functions as a barrier), but on which the hypervisor (KVM/Qemu) has ++ * not been updated, we would like to enable the barrier. Dropping the ++ * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is ++ * we potentially enable the barrier on systems where the host firmware ++ * is not updated, but that's harmless as it's a no-op. ++ */ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); ++ ++ enable_barrier_nospec(enable); ++} ++ ++#ifdef CONFIG_DEBUG_FS ++static int barrier_nospec_set(void *data, u64 val) ++{ ++ switch (val) { ++ case 0: ++ case 1: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (!!val == !!barrier_nospec_enabled) ++ return 0; ++ ++ enable_barrier_nospec(!!val); ++ ++ return 0; ++} ++ ++static int barrier_nospec_get(void *data, u64 *val) ++{ ++ *val = barrier_nospec_enabled ? 1 : 0; ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, ++ barrier_nospec_get, barrier_nospec_set, "%llu\n"); ++ ++static __init int barrier_nospec_debugfs_init(void) ++{ ++ debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, ++ &fops_barrier_nospec); ++ return 0; ++} ++device_initcall(barrier_nospec_debugfs_init); ++#endif /* CONFIG_DEBUG_FS */ ++ + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + { + bool thread_priv; +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -123,6 +123,7 @@ static void pnv_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); ++ setup_barrier_nospec(); + } + + static void __init pnv_setup_arch(void) +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -534,6 +534,7 @@ void pseries_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); + + setup_rfi_flush(types, enable); ++ setup_barrier_nospec(); + } + + static void __init pSeries_setup_arch(void) diff --git a/queue-4.14/powerpc-64s-enhance-the-information-in-cpu_show_spectre_v1.patch b/queue-4.14/powerpc-64s-enhance-the-information-in-cpu_show_spectre_v1.patch new file mode 100644 index 00000000000..c5b3c6104fa --- /dev/null +++ b/queue-4.14/powerpc-64s-enhance-the-information-in-cpu_show_spectre_v1.patch @@ -0,0 +1,36 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:54 +1100 +Subject: [PATCH stable v4.14 06/32] powerpc/64s: Enhance the information in cpu_show_spectre_v1() +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-7-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Michal Suchanek + +commit a377514519b9a20fa1ea9adddbb4129573129cef upstream. + +We now have barrier_nospec as mitigation so print it in +cpu_show_spectre_v1() when enabled. + +Signed-off-by: Michal Suchanek +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -120,6 +120,9 @@ ssize_t cpu_show_spectre_v1(struct devic + if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) + return sprintf(buf, "Not affected\n"); + ++ if (barrier_nospec_enabled) ++ return sprintf(buf, "Mitigation: __user pointer sanitization\n"); ++ + return sprintf(buf, "Vulnerable\n"); + } + diff --git a/queue-4.14/powerpc-64s-patch-barrier_nospec-in-modules.patch b/queue-4.14/powerpc-64s-patch-barrier_nospec-in-modules.patch new file mode 100644 index 00000000000..532801d94c1 --- /dev/null +++ b/queue-4.14/powerpc-64s-patch-barrier_nospec-in-modules.patch @@ -0,0 +1,111 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:50 +1100 +Subject: [PATCH stable v4.14 02/32] powerpc/64s: Patch barrier_nospec in modules +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-3-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Michal Suchanek + +commit 815069ca57c142eb71d27439bc27f41a433a67b3 upstream. + +Note that unlike RFI which is patched only in kernel the nospec state +reflects settings at the time the module was loaded. + +Iterating all modules and re-patching every time the settings change +is not implemented. + +Based on lwsync patching. + +Signed-off-by: Michal Suchanek +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/setup.h | 7 +++++++ + arch/powerpc/kernel/module.c | 6 ++++++ + arch/powerpc/kernel/security.c | 2 +- + arch/powerpc/lib/feature-fixups.c | 16 +++++++++++++--- + 4 files changed, 27 insertions(+), 4 deletions(-) + +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -53,6 +53,13 @@ enum l1d_flush_type { + void setup_rfi_flush(enum l1d_flush_type, bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); + void do_barrier_nospec_fixups(bool enable); ++extern bool barrier_nospec_enabled; ++ ++#ifdef CONFIG_PPC_BOOK3S_64 ++void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); ++#else ++static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; ++#endif + + #endif /* !__ASSEMBLY__ */ + +--- a/arch/powerpc/kernel/module.c ++++ b/arch/powerpc/kernel/module.c +@@ -72,6 +72,12 @@ int module_finalize(const Elf_Ehdr *hdr, + do_feature_fixups(powerpc_firmware_features, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); ++ ++ sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); ++ if (sect != NULL) ++ do_barrier_nospec_fixups_range(barrier_nospec_enabled, ++ (void *)sect->sh_addr, ++ (void *)sect->sh_addr + sect->sh_size); + #endif + + sect = find_section(hdr, sechdrs, "__lwsync_fixup"); +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -15,7 +15,7 @@ + + unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; + +-static bool barrier_nospec_enabled; ++bool barrier_nospec_enabled; + + static void enable_barrier_nospec(bool enable) + { +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -278,14 +278,14 @@ void do_rfi_flush_fixups(enum l1d_flush_ + : "unknown"); + } + +-void do_barrier_nospec_fixups(bool enable) ++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) + { + unsigned int instr, *dest; + long *start, *end; + int i; + +- start = PTRRELOC(&__start___barrier_nospec_fixup), +- end = PTRRELOC(&__stop___barrier_nospec_fixup); ++ start = fixup_start; ++ end = fixup_end; + + instr = 0x60000000; /* nop */ + +@@ -304,6 +304,16 @@ void do_barrier_nospec_fixups(bool enabl + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); + } + ++void do_barrier_nospec_fixups(bool enable) ++{ ++ void *start, *end; ++ ++ start = PTRRELOC(&__start___barrier_nospec_fixup), ++ end = PTRRELOC(&__stop___barrier_nospec_fixup); ++ ++ do_barrier_nospec_fixups_range(enable, start, end); ++} ++ + #endif /* CONFIG_PPC_BOOK3S_64 */ + + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) diff --git a/queue-4.14/powerpc-asm-add-a-patch_site-macro-helpers-for-patching-instructions.patch b/queue-4.14/powerpc-asm-add-a-patch_site-macro-helpers-for-patching-instructions.patch new file mode 100644 index 00000000000..cf4dbacebb9 --- /dev/null +++ b/queue-4.14/powerpc-asm-add-a-patch_site-macro-helpers-for-patching-instructions.patch @@ -0,0 +1,93 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:03 +1100 +Subject: [PATCH stable v4.14 15/32] powerpc/asm: Add a patch_site macro & helpers for patching instructions +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-16-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit 06d0bbc6d0f56dacac3a79900e9a9a0d5972d818 upstream. + +Add a macro and some helper C functions for patching single asm +instructions. + +The gas macro means we can do something like: + + 1: nop + patch_site 1b, patch__foo + +Which is less visually distracting than defining a GLOBAL symbol at 1, +and also doesn't pollute the symbol table which can confuse eg. perf. + +These are obviously similar to our existing feature sections, but are +not automatically patched based on CPU/MMU features, rather they are +designed to be manually patched by C code at some arbitrary point. + +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/code-patching-asm.h | 18 ++++++++++++++++++ + arch/powerpc/include/asm/code-patching.h | 2 ++ + arch/powerpc/lib/code-patching.c | 16 ++++++++++++++++ + 3 files changed, 36 insertions(+) + create mode 100644 arch/powerpc/include/asm/code-patching-asm.h + +--- /dev/null ++++ b/arch/powerpc/include/asm/code-patching-asm.h +@@ -0,0 +1,18 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Copyright 2018, Michael Ellerman, IBM Corporation. ++ */ ++#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H ++#define _ASM_POWERPC_CODE_PATCHING_ASM_H ++ ++/* Define a "site" that can be patched */ ++.macro patch_site label name ++ .pushsection ".rodata" ++ .balign 4 ++ .global \name ++\name: ++ .4byte \label - . ++ .popsection ++.endm ++ ++#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */ +--- a/arch/powerpc/include/asm/code-patching.h ++++ b/arch/powerpc/include/asm/code-patching.h +@@ -32,6 +32,8 @@ unsigned int create_cond_branch(const un + int patch_branch(unsigned int *addr, unsigned long target, int flags); + int patch_instruction(unsigned int *addr, unsigned int instr); + int raw_patch_instruction(unsigned int *addr, unsigned int instr); ++int patch_instruction_site(s32 *addr, unsigned int instr); ++int patch_branch_site(s32 *site, unsigned long target, int flags); + + int instr_is_relative_branch(unsigned int instr); + int instr_is_relative_link_branch(unsigned int instr); +--- a/arch/powerpc/lib/code-patching.c ++++ b/arch/powerpc/lib/code-patching.c +@@ -206,6 +206,22 @@ int patch_branch(unsigned int *addr, uns + return patch_instruction(addr, create_branch(addr, target, flags)); + } + ++int patch_branch_site(s32 *site, unsigned long target, int flags) ++{ ++ unsigned int *addr; ++ ++ addr = (unsigned int *)((unsigned long)site + *site); ++ return patch_instruction(addr, create_branch(addr, target, flags)); ++} ++ ++int patch_instruction_site(s32 *site, unsigned int instr) ++{ ++ unsigned int *addr; ++ ++ addr = (unsigned int *)((unsigned long)site + *site); ++ return patch_instruction(addr, instr); ++} ++ + bool is_offset_in_branch_range(long offset) + { + /* diff --git a/queue-4.14/powerpc-fsl-add-barrier_nospec-implementation-for-nxp-powerpc-book3e.patch b/queue-4.14/powerpc-fsl-add-barrier_nospec-implementation-for-nxp-powerpc-book3e.patch new file mode 100644 index 00000000000..cb0f1e3b0c9 --- /dev/null +++ b/queue-4.14/powerpc-fsl-add-barrier_nospec-implementation-for-nxp-powerpc-book3e.patch @@ -0,0 +1,100 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:01 +1100 +Subject: [PATCH stable v4.14 13/32] powerpc/fsl: Add barrier_nospec implementation for NXP PowerPC Book3E +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-14-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit ebcd1bfc33c7a90df941df68a6e5d4018c022fba upstream. + +Implement the barrier_nospec as a isync;sync instruction sequence. +The implementation uses the infrastructure built for BOOK3S 64. + +Signed-off-by: Diana Craciun +[mpe: Split out of larger patch] +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/Kconfig | 2 +- + arch/powerpc/include/asm/barrier.h | 8 +++++++- + arch/powerpc/lib/feature-fixups.c | 31 +++++++++++++++++++++++++++++++ + 3 files changed, 39 insertions(+), 2 deletions(-) + +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -239,7 +239,7 @@ config PPC + config PPC_BARRIER_NOSPEC + bool + default y +- depends on PPC_BOOK3S_64 ++ depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E + + config GENERIC_CSUM + def_bool n +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -76,12 +76,18 @@ do { \ + ___p1; \ + }) + ++#ifdef CONFIG_PPC_BOOK3S_64 ++#define NOSPEC_BARRIER_SLOT nop ++#elif defined(CONFIG_PPC_FSL_BOOK3E) ++#define NOSPEC_BARRIER_SLOT nop; nop ++#endif ++ + #ifdef CONFIG_PPC_BARRIER_NOSPEC + /* + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. + */ +-#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop ++#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT + + // This also acts as a compiler barrier due to the memory clobber. + #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -318,6 +318,37 @@ void do_barrier_nospec_fixups(bool enabl + } + #endif /* CONFIG_PPC_BARRIER_NOSPEC */ + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) ++{ ++ unsigned int instr[2], *dest; ++ long *start, *end; ++ int i; ++ ++ start = fixup_start; ++ end = fixup_end; ++ ++ instr[0] = PPC_INST_NOP; ++ instr[1] = PPC_INST_NOP; ++ ++ if (enable) { ++ pr_info("barrier-nospec: using isync; sync as speculation barrier\n"); ++ instr[0] = PPC_INST_ISYNC; ++ instr[1] = PPC_INST_SYNC; ++ } ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ patch_instruction(dest, instr[0]); ++ patch_instruction(dest + 1, instr[1]); ++ } ++ ++ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); ++} ++#endif /* CONFIG_PPC_FSL_BOOK3E */ ++ + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) + { + long *start, *end; diff --git a/queue-4.14/powerpc-fsl-add-infrastructure-to-fixup-branch-predictor-flush.patch b/queue-4.14/powerpc-fsl-add-infrastructure-to-fixup-branch-predictor-flush.patch new file mode 100644 index 00000000000..7fac827ab25 --- /dev/null +++ b/queue-4.14/powerpc-fsl-add-infrastructure-to-fixup-branch-predictor-flush.patch @@ -0,0 +1,120 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:08 +1100 +Subject: [PATCH stable v4.14 20/32] powerpc/fsl: Add infrastructure to fixup branch predictor flush +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-21-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 76a5eaa38b15dda92cd6964248c39b5a6f3a4e9d upstream. + +In order to protect against speculation attacks (Spectre +variant 2) on NXP PowerPC platforms, the branch predictor +should be flushed when the privillege level is changed. +This patch is adding the infrastructure to fixup at runtime +the code sections that are performing the branch predictor flush +depending on a boot arg parameter which is added later in a +separate patch. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/feature-fixups.h | 12 ++++++++++++ + arch/powerpc/include/asm/setup.h | 2 ++ + arch/powerpc/kernel/vmlinux.lds.S | 8 ++++++++ + arch/powerpc/lib/feature-fixups.c | 23 +++++++++++++++++++++++ + 4 files changed, 45 insertions(+) + +--- a/arch/powerpc/include/asm/feature-fixups.h ++++ b/arch/powerpc/include/asm/feature-fixups.h +@@ -219,6 +219,17 @@ label##3: \ + FTR_ENTRY_OFFSET 953b-954b; \ + .popsection; + ++#define START_BTB_FLUSH_SECTION \ ++955: \ ++ ++#define END_BTB_FLUSH_SECTION \ ++956: \ ++ .pushsection __btb_flush_fixup,"a"; \ ++ .align 2; \ ++957: \ ++ FTR_ENTRY_OFFSET 955b-957b; \ ++ FTR_ENTRY_OFFSET 956b-957b; \ ++ .popsection; + + #ifndef __ASSEMBLY__ + #include +@@ -228,6 +239,7 @@ extern long __start___stf_entry_barrier_ + extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; ++extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; + + void apply_feature_fixups(void); + void setup_feature_keys(void); +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -66,6 +66,8 @@ void do_barrier_nospec_fixups_range(bool + static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; + #endif + ++void do_btb_flush_fixups(void); ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_POWERPC_SETUP_H */ +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -164,6 +164,14 @@ SECTIONS + } + #endif /* CONFIG_PPC_BARRIER_NOSPEC */ + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++ . = ALIGN(8); ++ __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { ++ __start__btb_flush_fixup = .; ++ *(__btb_flush_fixup) ++ __stop__btb_flush_fixup = .; ++ } ++#endif + EXCEPTION_TABLE(0) + + NOTES :kernel :notes +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -347,6 +347,29 @@ void do_barrier_nospec_fixups_range(bool + + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); + } ++ ++static void patch_btb_flush_section(long *curr) ++{ ++ unsigned int *start, *end; ++ ++ start = (void *)curr + *curr; ++ end = (void *)curr + *(curr + 1); ++ for (; start < end; start++) { ++ pr_devel("patching dest %lx\n", (unsigned long)start); ++ patch_instruction(start, PPC_INST_NOP); ++ } ++} ++ ++void do_btb_flush_fixups(void) ++{ ++ long *start, *end; ++ ++ start = PTRRELOC(&__start__btb_flush_fixup); ++ end = PTRRELOC(&__stop__btb_flush_fixup); ++ ++ for (; start < end; start += 2) ++ patch_btb_flush_section(start); ++} + #endif /* CONFIG_PPC_FSL_BOOK3E */ + + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) diff --git a/queue-4.14/powerpc-fsl-add-macro-to-flush-the-branch-predictor.patch b/queue-4.14/powerpc-fsl-add-macro-to-flush-the-branch-predictor.patch new file mode 100644 index 00000000000..fcfab2f5ef1 --- /dev/null +++ b/queue-4.14/powerpc-fsl-add-macro-to-flush-the-branch-predictor.patch @@ -0,0 +1,41 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:09 +1100 +Subject: [PATCH stable v4.14 21/32] powerpc/fsl: Add macro to flush the branch predictor +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-22-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 1cbf8990d79ff69da8ad09e8a3df014e1494462b upstream. + +The BUCSR register can be used to invalidate the entries in the +branch prediction mechanisms. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/ppc_asm.h | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/arch/powerpc/include/asm/ppc_asm.h ++++ b/arch/powerpc/include/asm/ppc_asm.h +@@ -802,4 +802,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) + stringify_in_c(.long (_target) - . ;) \ + stringify_in_c(.previous) + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++#define BTB_FLUSH(reg) \ ++ lis reg,BUCSR_INIT@h; \ ++ ori reg,reg,BUCSR_INIT@l; \ ++ mtspr SPRN_BUCSR,reg; \ ++ isync; ++#else ++#define BTB_FLUSH(reg) ++#endif /* CONFIG_PPC_FSL_BOOK3E */ ++ + #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/queue-4.14/powerpc-fsl-add-nospectre_v2-command-line-argument.patch b/queue-4.14/powerpc-fsl-add-nospectre_v2-command-line-argument.patch new file mode 100644 index 00000000000..ec4b6ead995 --- /dev/null +++ b/queue-4.14/powerpc-fsl-add-nospectre_v2-command-line-argument.patch @@ -0,0 +1,76 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:12 +1100 +Subject: [PATCH stable v4.14 24/32] powerpc/fsl: Add nospectre_v2 command line argument +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-25-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit f633a8ad636efb5d4bba1a047d4a0f1ef719aa06 upstream. + +When the command line argument is present, the Spectre variant 2 +mitigations are disabled. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/setup.h | 5 +++++ + arch/powerpc/kernel/security.c | 21 +++++++++++++++++++++ + 2 files changed, 26 insertions(+) + +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -66,6 +66,11 @@ void do_barrier_nospec_fixups_range(bool + static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; + #endif + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++void setup_spectre_v2(void); ++#else ++static inline void setup_spectre_v2(void) {}; ++#endif + void do_btb_flush_fixups(void); + + #endif /* !__ASSEMBLY__ */ +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -26,6 +26,10 @@ static enum count_cache_flush_type count + + bool barrier_nospec_enabled; + static bool no_nospec; ++static bool btb_flush_enabled; ++#ifdef CONFIG_PPC_FSL_BOOK3E ++static bool no_spectrev2; ++#endif + + static void enable_barrier_nospec(bool enable) + { +@@ -101,6 +105,23 @@ static __init int barrier_nospec_debugfs + device_initcall(barrier_nospec_debugfs_init); + #endif /* CONFIG_DEBUG_FS */ + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++static int __init handle_nospectre_v2(char *p) ++{ ++ no_spectrev2 = true; ++ ++ return 0; ++} ++early_param("nospectre_v2", handle_nospectre_v2); ++void setup_spectre_v2(void) ++{ ++ if (no_spectrev2) ++ do_btb_flush_fixups(); ++ else ++ btb_flush_enabled = true; ++} ++#endif /* CONFIG_PPC_FSL_BOOK3E */ ++ + #ifdef CONFIG_PPC_BOOK3S_64 + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + { diff --git a/queue-4.14/powerpc-fsl-emulate-sprn_bucsr-register.patch b/queue-4.14/powerpc-fsl-emulate-sprn_bucsr-register.patch new file mode 100644 index 00000000000..1bcf794a301 --- /dev/null +++ b/queue-4.14/powerpc-fsl-emulate-sprn_bucsr-register.patch @@ -0,0 +1,44 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:11 +1100 +Subject: [PATCH stable v4.14 23/32] powerpc/fsl: Emulate SPRN_BUCSR register +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-24-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 98518c4d8728656db349f875fcbbc7c126d4c973 upstream. + +In order to flush the branch predictor the guest kernel performs +writes to the BUCSR register which is hypervisor privilleged. However, +the branch predictor is flushed at each KVM entry, so the branch +predictor has been already flushed, so just return as soon as possible +to guest. + +Signed-off-by: Diana Craciun +[mpe: Tweak comment formatting] +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kvm/e500_emulate.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/powerpc/kvm/e500_emulate.c ++++ b/arch/powerpc/kvm/e500_emulate.c +@@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struc + vcpu->arch.pwrmgtcr0 = spr_val; + break; + ++ case SPRN_BUCSR: ++ /* ++ * If we are here, it means that we have already flushed the ++ * branch predictor, so just return to guest. ++ */ ++ break; ++ + /* extra exceptions */ + #ifdef CONFIG_SPE_POSSIBLE + case SPRN_IVOR32: diff --git a/queue-4.14/powerpc-fsl-enable-runtime-patching-if-nospectre_v2-boot-arg-is-used.patch b/queue-4.14/powerpc-fsl-enable-runtime-patching-if-nospectre_v2-boot-arg-is-used.patch new file mode 100644 index 00000000000..48e618f2b55 --- /dev/null +++ b/queue-4.14/powerpc-fsl-enable-runtime-patching-if-nospectre_v2-boot-arg-is-used.patch @@ -0,0 +1,34 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:16 +1100 +Subject: [PATCH stable v4.14 28/32] powerpc/fsl: Enable runtime patching if nospectre_v2 boot arg is used +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-29-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 3bc8ea8603ae4c1e09aca8de229ad38b8091fcb3 upstream. + +If the user choses not to use the mitigations, replace +the code sequence with nops. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/setup-common.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/powerpc/kernel/setup-common.c ++++ b/arch/powerpc/kernel/setup-common.c +@@ -938,6 +938,7 @@ void __init setup_arch(char **cmdline_p) + ppc_md.setup_arch(); + + setup_barrier_nospec(); ++ setup_spectre_v2(); + + paging_init(); + diff --git a/queue-4.14/powerpc-fsl-fix-spectre_v2-mitigations-reporting.patch b/queue-4.14/powerpc-fsl-fix-spectre_v2-mitigations-reporting.patch new file mode 100644 index 00000000000..3ef3c696656 --- /dev/null +++ b/queue-4.14/powerpc-fsl-fix-spectre_v2-mitigations-reporting.patch @@ -0,0 +1,41 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:10 +1100 +Subject: [PATCH stable v4.14 22/32] powerpc/fsl: Fix spectre_v2 mitigations reporting +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-23-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 7d8bad99ba5a22892f0cad6881289fdc3875a930 upstream. + +Currently for CONFIG_PPC_FSL_BOOK3E the spectre_v2 file is incorrect: + + $ cat /sys/devices/system/cpu/vulnerabilities/spectre_v2 + "Mitigation: Software count cache flush" + +Which is wrong. Fix it to report vulnerable for now. + +Fixes: ee13cb249fab ("powerpc/64s: Add support for software count cache flush") +Cc: stable@vger.kernel.org # v4.19+ +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -22,7 +22,7 @@ enum count_cache_flush_type { + COUNT_CACHE_FLUSH_SW = 0x2, + COUNT_CACHE_FLUSH_HW = 0x4, + }; +-static enum count_cache_flush_type count_cache_flush_type; ++static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + + bool barrier_nospec_enabled; + static bool no_nospec; diff --git a/queue-4.14/powerpc-fsl-fix-the-flush-of-branch-predictor.patch b/queue-4.14/powerpc-fsl-fix-the-flush-of-branch-predictor.patch new file mode 100644 index 00000000000..7e3cce18c72 --- /dev/null +++ b/queue-4.14/powerpc-fsl-fix-the-flush-of-branch-predictor.patch @@ -0,0 +1,47 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:19 +1100 +Subject: [PATCH stable v4.14 31/32] powerpc/fsl: Fix the flush of branch predictor. +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-32-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Christophe Leroy + +commit 27da80719ef132cf8c80eb406d5aeb37dddf78cc upstream. + +The commit identified below adds MC_BTB_FLUSH macro only when +CONFIG_PPC_FSL_BOOK3E is defined. This results in the following error +on some configs (seen several times with kisskb randconfig_defconfig) + +arch/powerpc/kernel/exceptions-64e.S:576: Error: Unrecognized opcode: `mc_btb_flush' +make[3]: *** [scripts/Makefile.build:367: arch/powerpc/kernel/exceptions-64e.o] Error 1 +make[2]: *** [scripts/Makefile.build:492: arch/powerpc/kernel] Error 2 +make[1]: *** [Makefile:1043: arch/powerpc] Error 2 +make: *** [Makefile:152: sub-make] Error 2 + +This patch adds a blank definition of MC_BTB_FLUSH for other cases. + +Fixes: 10c5e83afd4a ("powerpc/fsl: Flush the branch predictor at each kernel entry (64bit)") +Cc: Diana Craciun +Signed-off-by: Christophe Leroy +Reviewed-by: Daniel Axtens +Reviewed-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/exceptions-64e.S | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -348,6 +348,7 @@ ret_from_mc_except: + #define GEN_BTB_FLUSH + #define CRIT_BTB_FLUSH + #define DBG_BTB_FLUSH ++#define MC_BTB_FLUSH + #define GDBELL_BTB_FLUSH + #endif + diff --git a/queue-4.14/powerpc-fsl-fixed-warning-orphan-section-__btb_flush_fixup.patch b/queue-4.14/powerpc-fsl-fixed-warning-orphan-section-__btb_flush_fixup.patch new file mode 100644 index 00000000000..32fa74d4789 --- /dev/null +++ b/queue-4.14/powerpc-fsl-fixed-warning-orphan-section-__btb_flush_fixup.patch @@ -0,0 +1,67 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:18 +1100 +Subject: [PATCH stable v4.14 30/32] powerpc/fsl: Fixed warning: orphan section `__btb_flush_fixup' +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-31-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 039daac5526932ec731e4499613018d263af8b3e upstream. + +Fixed the following build warning: +powerpc-linux-gnu-ld: warning: orphan section `__btb_flush_fixup' from +`arch/powerpc/kernel/head_44x.o' being placed in section +`__btb_flush_fixup'. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/head_booke.h | 18 ++++++++++++------ + 1 file changed, 12 insertions(+), 6 deletions(-) + +--- a/arch/powerpc/kernel/head_booke.h ++++ b/arch/powerpc/kernel/head_booke.h +@@ -32,6 +32,16 @@ + */ + #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++#define BOOKE_CLEAR_BTB(reg) \ ++START_BTB_FLUSH_SECTION \ ++ BTB_FLUSH(reg) \ ++END_BTB_FLUSH_SECTION ++#else ++#define BOOKE_CLEAR_BTB(reg) ++#endif ++ ++ + #define NORMAL_EXCEPTION_PROLOG(intno) \ + mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ + mfspr r10, SPRN_SPRG_THREAD; \ +@@ -43,9 +53,7 @@ + andi. r11, r11, MSR_PR; /* check whether user or kernel */\ + mr r11, r1; \ + beq 1f; \ +-START_BTB_FLUSH_SECTION \ +- BTB_FLUSH(r11) \ +-END_BTB_FLUSH_SECTION \ ++ BOOKE_CLEAR_BTB(r11) \ + /* if from user, start at top of this thread's kernel stack */ \ + lwz r11, THREAD_INFO-THREAD(r10); \ + ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ +@@ -131,9 +139,7 @@ END_BTB_FLUSH_SECTION \ + stw r9,_CCR(r8); /* save CR on stack */\ + mfspr r11,exc_level_srr1; /* check whether user or kernel */\ + DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ +-START_BTB_FLUSH_SECTION \ +- BTB_FLUSH(r10) \ +-END_BTB_FLUSH_SECTION \ ++ BOOKE_CLEAR_BTB(r10) \ + andi. r11,r11,MSR_PR; \ + mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ diff --git a/queue-4.14/powerpc-fsl-flush-branch-predictor-when-entering-kvm.patch b/queue-4.14/powerpc-fsl-flush-branch-predictor-when-entering-kvm.patch new file mode 100644 index 00000000000..bf262803a56 --- /dev/null +++ b/queue-4.14/powerpc-fsl-flush-branch-predictor-when-entering-kvm.patch @@ -0,0 +1,38 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:15 +1100 +Subject: [PATCH stable v4.14 27/32] powerpc/fsl: Flush branch predictor when entering KVM +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-28-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit e7aa61f47b23afbec41031bc47ca8d6cb6516abc upstream. + +Switching from the guest to host is another place +where the speculative accesses can be exploited. +Flush the branch predictor when entering KVM. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kvm/bookehv_interrupts.S | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/powerpc/kvm/bookehv_interrupts.S ++++ b/arch/powerpc/kvm/bookehv_interrupts.S +@@ -75,6 +75,10 @@ + PPC_LL r1, VCPU_HOST_STACK(r4) + PPC_LL r2, HOST_R2(r1) + ++START_BTB_FLUSH_SECTION ++ BTB_FLUSH(r10) ++END_BTB_FLUSH_SECTION ++ + mfspr r10, SPRN_PID + lwz r8, VCPU_HOST_PID(r4) + PPC_LL r11, VCPU_SHARED(r4) diff --git a/queue-4.14/powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-32-bit.patch b/queue-4.14/powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-32-bit.patch new file mode 100644 index 00000000000..8d8f7186b66 --- /dev/null +++ b/queue-4.14/powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-32-bit.patch @@ -0,0 +1,83 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:14 +1100 +Subject: [PATCH stable v4.14 26/32] powerpc/fsl: Flush the branch predictor at each kernel entry (32 bit) +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-27-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 7fef436295bf6c05effe682c8797dfcb0deb112a upstream. + +In order to protect against speculation attacks on +indirect branches, the branch predictor is flushed at +kernel entry to protect for the following situations: +- userspace process attacking another userspace process +- userspace process attacking the kernel +Basically when the privillege level change (i.e.the kernel +is entered), the branch predictor state is flushed. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/head_booke.h | 6 ++++++ + arch/powerpc/kernel/head_fsl_booke.S | 15 +++++++++++++++ + 2 files changed, 21 insertions(+) + +--- a/arch/powerpc/kernel/head_booke.h ++++ b/arch/powerpc/kernel/head_booke.h +@@ -43,6 +43,9 @@ + andi. r11, r11, MSR_PR; /* check whether user or kernel */\ + mr r11, r1; \ + beq 1f; \ ++START_BTB_FLUSH_SECTION \ ++ BTB_FLUSH(r11) \ ++END_BTB_FLUSH_SECTION \ + /* if from user, start at top of this thread's kernel stack */ \ + lwz r11, THREAD_INFO-THREAD(r10); \ + ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ +@@ -128,6 +131,9 @@ + stw r9,_CCR(r8); /* save CR on stack */\ + mfspr r11,exc_level_srr1; /* check whether user or kernel */\ + DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ ++START_BTB_FLUSH_SECTION \ ++ BTB_FLUSH(r10) \ ++END_BTB_FLUSH_SECTION \ + andi. r11,r11,MSR_PR; \ + mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ +--- a/arch/powerpc/kernel/head_fsl_booke.S ++++ b/arch/powerpc/kernel/head_fsl_booke.S +@@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) + DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 ++START_BTB_FLUSH_SECTION ++ mfspr r11, SPRN_SRR1 ++ andi. r10,r11,MSR_PR ++ beq 1f ++ BTB_FLUSH(r10) ++1: ++END_BTB_FLUSH_SECTION + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the +@@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) + DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 ++START_BTB_FLUSH_SECTION ++ mfspr r11, SPRN_SRR1 ++ andi. r10,r11,MSR_PR ++ beq 1f ++ BTB_FLUSH(r10) ++1: ++END_BTB_FLUSH_SECTION ++ + mfspr r10, SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the diff --git a/queue-4.14/powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-64bit.patch b/queue-4.14/powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-64bit.patch new file mode 100644 index 00000000000..88fe2a2286f --- /dev/null +++ b/queue-4.14/powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-64bit.patch @@ -0,0 +1,103 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:13 +1100 +Subject: [PATCH stable v4.14 25/32] powerpc/fsl: Flush the branch predictor at each kernel entry (64bit) +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-26-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit 10c5e83afd4a3f01712d97d3bb1ae34d5b74a185 upstream. + +In order to protect against speculation attacks on +indirect branches, the branch predictor is flushed at +kernel entry to protect for the following situations: +- userspace process attacking another userspace process +- userspace process attacking the kernel +Basically when the privillege level change (i.e. the +kernel is entered), the branch predictor state is flushed. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/entry_64.S | 5 +++++ + arch/powerpc/kernel/exceptions-64e.S | 26 +++++++++++++++++++++++++- + arch/powerpc/mm/tlb_low_64e.S | 7 +++++++ + 3 files changed, 37 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/kernel/entry_64.S ++++ b/arch/powerpc/kernel/entry_64.S +@@ -78,6 +78,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) + std r0,GPR0(r1) + std r10,GPR1(r1) + beq 2f /* if from kernel mode */ ++#ifdef CONFIG_PPC_FSL_BOOK3E ++START_BTB_FLUSH_SECTION ++ BTB_FLUSH(r10) ++END_BTB_FLUSH_SECTION ++#endif + ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) + 2: std r2,GPR2(r1) + std r3,GPR3(r1) +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -295,7 +295,8 @@ ret_from_mc_except: + andi. r10,r11,MSR_PR; /* save stack pointer */ \ + beq 1f; /* branch around if supervisor */ \ + ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ +-1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ ++1: type##_BTB_FLUSH \ ++ cmpdi cr1,r1,0; /* check if SP makes sense */ \ + bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ + mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ + +@@ -327,6 +328,29 @@ ret_from_mc_except: + #define SPRN_MC_SRR0 SPRN_MCSRR0 + #define SPRN_MC_SRR1 SPRN_MCSRR1 + ++#ifdef CONFIG_PPC_FSL_BOOK3E ++#define GEN_BTB_FLUSH \ ++ START_BTB_FLUSH_SECTION \ ++ beq 1f; \ ++ BTB_FLUSH(r10) \ ++ 1: \ ++ END_BTB_FLUSH_SECTION ++ ++#define CRIT_BTB_FLUSH \ ++ START_BTB_FLUSH_SECTION \ ++ BTB_FLUSH(r10) \ ++ END_BTB_FLUSH_SECTION ++ ++#define DBG_BTB_FLUSH CRIT_BTB_FLUSH ++#define MC_BTB_FLUSH CRIT_BTB_FLUSH ++#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH ++#else ++#define GEN_BTB_FLUSH ++#define CRIT_BTB_FLUSH ++#define DBG_BTB_FLUSH ++#define GDBELL_BTB_FLUSH ++#endif ++ + #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ + EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) + +--- a/arch/powerpc/mm/tlb_low_64e.S ++++ b/arch/powerpc/mm/tlb_low_64e.S +@@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + std r15,EX_TLB_R15(r12) + std r10,EX_TLB_CR(r12) + #ifdef CONFIG_PPC_FSL_BOOK3E ++START_BTB_FLUSH_SECTION ++ mfspr r11, SPRN_SRR1 ++ andi. r10,r11,MSR_PR ++ beq 1f ++ BTB_FLUSH(r10) ++1: ++END_BTB_FLUSH_SECTION + std r7,EX_TLB_R7(r12) + #endif + TLB_MISS_PROLOG_STATS diff --git a/queue-4.14/powerpc-fsl-sanitize-the-syscall-table-for-nxp-powerpc-32-bit-platforms.patch b/queue-4.14/powerpc-fsl-sanitize-the-syscall-table-for-nxp-powerpc-32-bit-platforms.patch new file mode 100644 index 00000000000..87797717006 --- /dev/null +++ b/queue-4.14/powerpc-fsl-sanitize-the-syscall-table-for-nxp-powerpc-32-bit-platforms.patch @@ -0,0 +1,49 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:02 +1100 +Subject: [PATCH stable v4.14 14/32] powerpc/fsl: Sanitize the syscall table for NXP PowerPC 32 bit platforms +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-15-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit c28218d4abbf4f2035495334d8bfcba64bda4787 upstream. + +Used barrier_nospec to sanitize the syscall table. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/entry_32.S | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/arch/powerpc/kernel/entry_32.S ++++ b/arch/powerpc/kernel/entry_32.S +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + + /* + * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. +@@ -358,6 +359,15 @@ syscall_dotrace_cont: + ori r10,r10,sys_call_table@l + slwi r0,r0,2 + bge- 66f ++ ++ barrier_nospec_asm ++ /* ++ * Prevent the load of the handler below (based on the user-passed ++ * system call number) being speculatively executed until the test ++ * against NR_syscalls and branch to .66f above has ++ * committed. ++ */ ++ + lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ + mtlr r10 + addi r9,r1,STACK_FRAME_OVERHEAD diff --git a/queue-4.14/powerpc-fsl-update-spectre-v2-reporting.patch b/queue-4.14/powerpc-fsl-update-spectre-v2-reporting.patch new file mode 100644 index 00000000000..78ffe154a89 --- /dev/null +++ b/queue-4.14/powerpc-fsl-update-spectre-v2-reporting.patch @@ -0,0 +1,39 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:17 +1100 +Subject: [PATCH stable v4.14 29/32] powerpc/fsl: Update Spectre v2 reporting +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-30-mpe@ellerman.id.au> + +From: Michael Ellerman + +From: Diana Craciun + +commit dfa88658fb0583abb92e062c7a9cd5a5b94f2a46 upstream. + +Report branch predictor state flush as a mitigation for +Spectre variant 2. + +Signed-off-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -212,8 +212,11 @@ ssize_t cpu_show_spectre_v2(struct devic + + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) + seq_buf_printf(&s, "(hardware accelerated)"); +- } else ++ } else if (btb_flush_enabled) { ++ seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); ++ } else { + seq_buf_printf(&s, "Vulnerable"); ++ } + + seq_buf_printf(&s, "\n"); + diff --git a/queue-4.14/powerpc-powernv-query-firmware-for-count-cache-flush-settings.patch b/queue-4.14/powerpc-powernv-query-firmware-for-count-cache-flush-settings.patch new file mode 100644 index 00000000000..ee1e11adaf5 --- /dev/null +++ b/queue-4.14/powerpc-powernv-query-firmware-for-count-cache-flush-settings.patch @@ -0,0 +1,45 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:07 +1100 +Subject: [PATCH stable v4.14 19/32] powerpc/powernv: Query firmware for count cache flush settings +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-20-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit 99d54754d3d5f896a8f616b0b6520662bc99d66b upstream. + +Look for fw-features properties to determine the appropriate settings +for the count cache flush, and then call the generic powerpc code to +set it up based on the security feature flags. + +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/platforms/powernv/setup.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct de + if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + ++ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np)) ++ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); ++ ++ if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np)) ++ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); ++ + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. +@@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); ++ setup_count_cache_flush(); + } + + static void __init pnv_setup_arch(void) diff --git a/queue-4.14/powerpc-pseries-query-hypervisor-for-count-cache-flush-settings.patch b/queue-4.14/powerpc-pseries-query-hypervisor-for-count-cache-flush-settings.patch new file mode 100644 index 00000000000..2805f0be12f --- /dev/null +++ b/queue-4.14/powerpc-pseries-query-hypervisor-for-count-cache-flush-settings.patch @@ -0,0 +1,61 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:06 +1100 +Subject: [PATCH stable v4.14 18/32] powerpc/pseries: Query hypervisor for count cache flush settings +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-19-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit ba72dc171954b782a79d25e0f4b3ed91090c3b1e upstream. + +Use the existing hypercall to determine the appropriate settings for +the count cache flush, and then call the generic powerpc code to set +it up based on the security feature flags. + +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/hvcall.h | 2 ++ + arch/powerpc/platforms/pseries/setup.c | 7 +++++++ + 2 files changed, 9 insertions(+) + +--- a/arch/powerpc/include/asm/hvcall.h ++++ b/arch/powerpc/include/asm/hvcall.h +@@ -340,10 +340,12 @@ + #define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 + #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 + #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 ++#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9 + + #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 + #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 + #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 ++#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 + + /* Flag values used in H_REGISTER_PROC_TBL hcall */ + #define PROC_TABLE_OP_MASK 0x18 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -484,6 +484,12 @@ static void init_cpu_char_feature_flags( + if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + ++ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) ++ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); ++ ++ if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE) ++ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); ++ + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. +@@ -534,6 +540,7 @@ void pseries_setup_rfi_flush(void) + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); + + setup_rfi_flush(types, enable); ++ setup_count_cache_flush(); + } + + static void __init pSeries_setup_arch(void) diff --git a/queue-4.14/powerpc-security-fix-spectre_v2-reporting.patch b/queue-4.14/powerpc-security-fix-spectre_v2-reporting.patch new file mode 100644 index 00000000000..a27ab474382 --- /dev/null +++ b/queue-4.14/powerpc-security-fix-spectre_v2-reporting.patch @@ -0,0 +1,89 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:26:20 +1100 +Subject: [PATCH stable v4.14 32/32] powerpc/security: Fix spectre_v2 reporting +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-33-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit 92edf8df0ff2ae86cc632eeca0e651fd8431d40d upstream. + +When I updated the spectre_v2 reporting to handle software count cache +flush I got the logic wrong when there's no software count cache +enabled at all. + +The result is that on systems with the software count cache flush +disabled we print: + + Mitigation: Indirect branch cache disabled, Software count cache flush + +Which correctly indicates that the count cache is disabled, but +incorrectly says the software count cache flush is enabled. + +The root of the problem is that we are trying to handle all +combinations of options. But we know now that we only expect to see +the software count cache flush enabled if the other options are false. + +So split the two cases, which simplifies the logic and fixes the bug. +We were also missing a space before "(hardware accelerated)". + +The result is we see one of: + + Mitigation: Indirect branch serialisation (kernel only) + Mitigation: Indirect branch cache disabled + Mitigation: Software count cache flush + Mitigation: Software count cache flush (hardware accelerated) + +Fixes: ee13cb249fab ("powerpc/64s: Add support for software count cache flush") +Cc: stable@vger.kernel.org # v4.19+ +Signed-off-by: Michael Ellerman +Reviewed-by: Michael Neuling +Reviewed-by: Diana Craciun +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 23 ++++++++--------------- + 1 file changed, 8 insertions(+), 15 deletions(-) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -189,29 +189,22 @@ ssize_t cpu_show_spectre_v2(struct devic + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); + +- if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { +- bool comma = false; ++ if (bcs || ccd) { + seq_buf_printf(&s, "Mitigation: "); + +- if (bcs) { ++ if (bcs) + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); +- comma = true; +- } + +- if (ccd) { +- if (comma) +- seq_buf_printf(&s, ", "); +- seq_buf_printf(&s, "Indirect branch cache disabled"); +- comma = true; +- } +- +- if (comma) ++ if (bcs && ccd) + seq_buf_printf(&s, ", "); + +- seq_buf_printf(&s, "Software count cache flush"); ++ if (ccd) ++ seq_buf_printf(&s, "Indirect branch cache disabled"); ++ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { ++ seq_buf_printf(&s, "Mitigation: Software count cache flush"); + + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) +- seq_buf_printf(&s, "(hardware accelerated)"); ++ seq_buf_printf(&s, " (hardware accelerated)"); + } else if (btb_flush_enabled) { + seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); + } else { diff --git a/queue-4.14/powerpc-use-barrier_nospec-in-copy_from_user.patch b/queue-4.14/powerpc-use-barrier_nospec-in-copy_from_user.patch new file mode 100644 index 00000000000..44afedfe10f --- /dev/null +++ b/queue-4.14/powerpc-use-barrier_nospec-in-copy_from_user.patch @@ -0,0 +1,89 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:52 +1100 +Subject: [PATCH stable v4.14 04/32] powerpc: Use barrier_nospec in copy_from_user() +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-5-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit ddf35cf3764b5a182b178105f57515b42e2634f8 upstream. + +Based on the x86 commit doing the same. + +See commit 304ec1b05031 ("x86/uaccess: Use __uaccess_begin_nospec() +and uaccess_try_nospec") and b3bbfb3fb5d2 ("x86: Introduce +__uaccess_begin_nospec() and uaccess_try_nospec") for more detail. + +In all cases we are ordering the load from the potentially +user-controlled pointer vs a previous branch based on an access_ok() +check or similar. + +Base on a patch from Michal Suchanek. + +Signed-off-by: Michal Suchanek +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/uaccess.h | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -238,6 +238,7 @@ do { \ + __chk_user_ptr(ptr); \ + if (!is_kernel_addr((unsigned long)__gu_addr)) \ + might_fault(); \ ++ barrier_nospec(); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +@@ -249,8 +250,10 @@ do { \ + __long_type(*(ptr)) __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + might_fault(); \ +- if (access_ok(VERIFY_READ, __gu_addr, (size))) \ ++ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ ++ barrier_nospec(); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ } \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ + }) +@@ -261,6 +264,7 @@ do { \ + __long_type(*(ptr)) __gu_val; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __chk_user_ptr(ptr); \ ++ barrier_nospec(); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +@@ -288,15 +292,19 @@ static inline unsigned long raw_copy_fro + + switch (n) { + case 1: ++ barrier_nospec(); + __get_user_size(*(u8 *)to, from, 1, ret); + break; + case 2: ++ barrier_nospec(); + __get_user_size(*(u16 *)to, from, 2, ret); + break; + case 4: ++ barrier_nospec(); + __get_user_size(*(u32 *)to, from, 4, ret); + break; + case 8: ++ barrier_nospec(); + __get_user_size(*(u64 *)to, from, 8, ret); + break; + } +@@ -304,6 +312,7 @@ static inline unsigned long raw_copy_fro + return 0; + } + ++ barrier_nospec(); + return __copy_tofrom_user((__force void __user *)to, from, n); + } + diff --git a/queue-4.14/powerpc64s-show-ori31-availability-in-spectre_v1-sysfs-file-not-v2.patch b/queue-4.14/powerpc64s-show-ori31-availability-in-spectre_v1-sysfs-file-not-v2.patch new file mode 100644 index 00000000000..f44325b52b8 --- /dev/null +++ b/queue-4.14/powerpc64s-show-ori31-availability-in-spectre_v1-sysfs-file-not-v2.patch @@ -0,0 +1,97 @@ +From foo@baz Fri Mar 29 15:53:50 CET 2019 +From: Michael Ellerman +Date: Fri, 29 Mar 2019 22:25:55 +1100 +Subject: [PATCH stable v4.14 07/32] powerpc64s: Show ori31 availability in spectre_v1 sysfs file not v2 +To: stable@vger.kernel.org, gregkh@linuxfoundation.org +Cc: linuxppc-dev@ozlabs.org, diana.craciun@nxp.com, msuchanek@suse.de, christophe.leroy@c-s.fr +Message-ID: <20190329112620.14489-8-mpe@ellerman.id.au> + +From: Michael Ellerman + +commit 6d44acae1937b81cf8115ada8958e04f601f3f2e upstream. + +When I added the spectre_v2 information in sysfs, I included the +availability of the ori31 speculation barrier. + +Although the ori31 barrier can be used to mitigate v2, it's primarily +intended as a spectre v1 mitigation. Spectre v2 is mitigated by +hardware changes. + +So rework the sysfs files to show the ori31 information in the +spectre_v1 file, rather than v2. + +Currently we display eg: + + $ grep . spectre_v* + spectre_v1:Mitigation: __user pointer sanitization + spectre_v2:Mitigation: Indirect branch cache disabled, ori31 speculation barrier enabled + +After: + + $ grep . spectre_v* + spectre_v1:Mitigation: __user pointer sanitization, ori31 speculation barrier enabled + spectre_v2:Mitigation: Indirect branch cache disabled + +Fixes: d6fbe1c55c55 ("powerpc/64s: Wire up cpu_show_spectre_v2()") +Cc: stable@vger.kernel.org # v4.17+ +Signed-off-by: Michael Ellerman +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/kernel/security.c | 27 +++++++++++++++++---------- + 1 file changed, 17 insertions(+), 10 deletions(-) + +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -117,25 +117,35 @@ ssize_t cpu_show_meltdown(struct device + + ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) + { +- if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) +- return sprintf(buf, "Not affected\n"); ++ struct seq_buf s; ++ ++ seq_buf_init(&s, buf, PAGE_SIZE - 1); ++ ++ if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { ++ if (barrier_nospec_enabled) ++ seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); ++ else ++ seq_buf_printf(&s, "Vulnerable"); + +- if (barrier_nospec_enabled) +- return sprintf(buf, "Mitigation: __user pointer sanitization\n"); ++ if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) ++ seq_buf_printf(&s, ", ori31 speculation barrier enabled"); + +- return sprintf(buf, "Vulnerable\n"); ++ seq_buf_printf(&s, "\n"); ++ } else ++ seq_buf_printf(&s, "Not affected\n"); ++ ++ return s.len; + } + + ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) + { +- bool bcs, ccd, ori; + struct seq_buf s; ++ bool bcs, ccd; + + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); +- ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31); + + if (bcs || ccd) { + seq_buf_printf(&s, "Mitigation: "); +@@ -151,9 +161,6 @@ ssize_t cpu_show_spectre_v2(struct devic + } else + seq_buf_printf(&s, "Vulnerable"); + +- if (ori) +- seq_buf_printf(&s, ", ori31 speculation barrier enabled"); +- + seq_buf_printf(&s, "\n"); + + return s.len; diff --git a/queue-4.14/series b/queue-4.14/series index c563c3924c0..72c2989ca78 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -21,3 +21,35 @@ net-aquantia-fix-rx-checksum-offload-for-udp-tcp-over-ipv6.patch mac8390-fix-mmio-access-size-probe.patch tun-properly-test-for-iff_up.patch tun-add-a-missing-rcu_read_unlock-in-error-path.patch +powerpc-64s-add-support-for-ori-barrier_nospec-patching.patch +powerpc-64s-patch-barrier_nospec-in-modules.patch +powerpc-64s-enable-barrier_nospec-based-on-firmware-settings.patch +powerpc-use-barrier_nospec-in-copy_from_user.patch +powerpc-64-use-barrier_nospec-in-syscall-entry.patch +powerpc-64s-enhance-the-information-in-cpu_show_spectre_v1.patch +powerpc64s-show-ori31-availability-in-spectre_v1-sysfs-file-not-v2.patch +powerpc-64-disable-the-speculation-barrier-from-the-command-line.patch +powerpc-64-make-stf-barrier-ppc_book3s_64-specific.patch +powerpc-64-add-config_ppc_barrier_nospec.patch +powerpc-64-call-setup_barrier_nospec-from-setup_arch.patch +powerpc-64-make-meltdown-reporting-book3s-64-specific.patch +powerpc-fsl-add-barrier_nospec-implementation-for-nxp-powerpc-book3e.patch +powerpc-fsl-sanitize-the-syscall-table-for-nxp-powerpc-32-bit-platforms.patch +powerpc-asm-add-a-patch_site-macro-helpers-for-patching-instructions.patch +powerpc-64s-add-new-security-feature-flags-for-count-cache-flush.patch +powerpc-64s-add-support-for-software-count-cache-flush.patch +powerpc-pseries-query-hypervisor-for-count-cache-flush-settings.patch +powerpc-powernv-query-firmware-for-count-cache-flush-settings.patch +powerpc-fsl-add-infrastructure-to-fixup-branch-predictor-flush.patch +powerpc-fsl-add-macro-to-flush-the-branch-predictor.patch +powerpc-fsl-fix-spectre_v2-mitigations-reporting.patch +powerpc-fsl-emulate-sprn_bucsr-register.patch +powerpc-fsl-add-nospectre_v2-command-line-argument.patch +powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-64bit.patch +powerpc-fsl-flush-the-branch-predictor-at-each-kernel-entry-32-bit.patch +powerpc-fsl-flush-branch-predictor-when-entering-kvm.patch +powerpc-fsl-enable-runtime-patching-if-nospectre_v2-boot-arg-is-used.patch +powerpc-fsl-update-spectre-v2-reporting.patch +powerpc-fsl-fixed-warning-orphan-section-__btb_flush_fixup.patch +powerpc-fsl-fix-the-flush-of-branch-predictor.patch +powerpc-security-fix-spectre_v2-reporting.patch