--- /dev/null
+From f4a551b72358facbbe5714248dff78404272feee Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Wed, 9 May 2018 16:12:17 +0200
+Subject: KVM: s390: vsie: fix < 8k check for the itdba
+
+From: David Hildenbrand <david@redhat.com>
+
+commit f4a551b72358facbbe5714248dff78404272feee upstream.
+
+By missing an "L", we might detect some addresses to be <8k,
+although they are not.
+
+e.g. for itdba = 100001fff
+!(gpa & ~0x1fffU) -> 1
+!(gpa & ~0x1fffUL) -> 0
+
+So we would report a SIE validity intercept although everything is fine.
+
+Fixes: 166ecb3 ("KVM: s390: vsie: support transactional execution")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
+Cc: stable@vger.kernel.org # v4.8+
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/vsie.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -590,7 +590,7 @@ static int pin_blocks(struct kvm_vcpu *v
+
+ gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
+ if (gpa && (scb_s->ecb & ECB_TE)) {
+- if (!(gpa & ~0x1fffU)) {
++ if (!(gpa & ~0x1fffUL)) {
+ rc = set_validity_icpt(scb_s, 0x0080U);
+ goto unpin;
+ }
--- /dev/null
+From 0aa48468d00959c8a37cd3ac727284f4f7359151 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Mon, 21 May 2018 17:54:49 -0400
+Subject: KVM/VMX: Expose SSBD properly to guests
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 0aa48468d00959c8a37cd3ac727284f4f7359151 upstream.
+
+The X86_FEATURE_SSBD is an synthetic CPU feature - that is
+it bit location has no relevance to the real CPUID 0x7.EBX[31]
+bit position. For that we need the new CPU feature name.
+
+Fixes: 52817587e706 ("x86/cpufeatures: Disentangle SSBD enumeration")
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: kvm@vger.kernel.org
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: stable@vger.kernel.org
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Link: https://lkml.kernel.org/r/20180521215449.26423-2-konrad.wilk@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -394,8 +394,8 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
+- F(ARCH_CAPABILITIES);
++ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
--- /dev/null
+From 1eaafe91a0df4157521b6417b3dd8430bf5f52f0 Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Wed, 9 May 2018 14:29:35 -0700
+Subject: kvm: x86: IA32_ARCH_CAPABILITIES is always supported
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 1eaafe91a0df4157521b6417b3dd8430bf5f52f0 upstream.
+
+If there is a possibility that a VM may migrate to a Skylake host,
+then the hypervisor should report IA32_ARCH_CAPABILITIES.RSBA[bit 2]
+as being set (future work, of course). This implies that
+CPUID.(EAX=7,ECX=0):EDX.ARCH_CAPABILITIES[bit 29] should be
+set. Therefore, kvm should report this CPUID bit as being supported
+whether or not the host supports it. Userspace is still free to clear
+the bit if it chooses.
+
+For more information on RSBA, see Intel's white paper, "Retpoline: A
+Branch Target Injection Mitigation" (Document Number 337131-001),
+currently available at https://bugzilla.kernel.org/show_bug.cgi?id=199511.
+
+Since the IA32_ARCH_CAPABILITIES MSR is emulated in kvm, there is no
+dependency on hardware support for this feature.
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Fixes: 28c1c9fabf48 ("KVM/VMX: Emulate MSR_IA32_ARCH_CAPABILITIES")
+Cc: stable@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -481,6 +481,11 @@ static inline int __do_cpuid_ent(struct
+ entry->ecx &= ~F(PKU);
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ cpuid_mask(&entry->edx, CPUID_7_EDX);
++ /*
++ * We emulate ARCH_CAPABILITIES in software even
++ * if the host doesn't support it.
++ */
++ entry->edx |= F(ARCH_CAPABILITIES);
+ } else {
+ entry->ebx = 0;
+ entry->ecx = 0;
--- /dev/null
+From c4d2188206bafa177ea58e9a25b952baa0bf7712 Mon Sep 17 00:00:00 2001
+From: Wei Huang <wei@redhat.com>
+Date: Tue, 1 May 2018 09:49:54 -0500
+Subject: KVM: x86: Update cpuid properly when CR4.OSXAVE or CR4.PKE is changed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wei Huang <wei@redhat.com>
+
+commit c4d2188206bafa177ea58e9a25b952baa0bf7712 upstream.
+
+The CPUID bits of OSXSAVE (function=0x1) and OSPKE (func=0x7, leaf=0x0)
+allows user apps to detect if OS has set CR4.OSXSAVE or CR4.PKE. KVM is
+supposed to update these CPUID bits when CR4 is updated. Current KVM
+code doesn't handle some special cases when updates come from emulator.
+Here is one example:
+
+ Step 1: guest boots
+ Step 2: guest OS enables XSAVE ==> CR4.OSXSAVE=1 and CPUID.OSXSAVE=1
+ Step 3: guest hot reboot ==> QEMU reset CR4 to 0, but CPUID.OSXAVE==1
+ Step 4: guest os checks CPUID.OSXAVE, detects 1, then executes xgetbv
+
+Step 4 above will cause an #UD and guest crash because guest OS hasn't
+turned on OSXAVE yet. This patch solves the problem by comparing the the
+old_cr4 with cr4. If the related bits have been changed,
+kvm_update_cpuid() needs to be called.
+
+Signed-off-by: Wei Huang <wei@redhat.com>
+Reviewed-by: Bandan Das <bsd@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7505,6 +7505,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
+ {
+ struct msr_data apic_base_msr;
+ int mmu_reset_needed = 0;
++ int cpuid_update_needed = 0;
+ int pending_vec, max_bits, idx;
+ struct desc_ptr dt;
+
+@@ -7542,8 +7543,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
+ vcpu->arch.cr0 = sregs->cr0;
+
+ mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
++ cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
++ (X86_CR4_OSXSAVE | X86_CR4_PKE));
+ kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+- if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
++ if (cpuid_update_needed)
+ kvm_update_cpuid(vcpu);
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:49 +1000
+Subject: powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-24-mpe@ellerman.id.au>
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit a048a07d7f4535baa4cbad6bc024f175317ab938 upstream.
+
+On some CPUs we can prevent a vulnerability related to store-to-load
+forwarding by preventing store forwarding between privilege domains,
+by inserting a barrier in kernel entry and exit paths.
+
+This is known to be the case on at least Power7, Power8 and Power9
+powerpc CPUs.
+
+Barriers must be inserted generally before the first load after moving
+to a higher privilege, and after the last store before moving to a
+lower privilege, HV and PR privilege transitions must be protected.
+
+Barriers are added as patch sections, with all kernel/hypervisor entry
+points patched, and the exit points to lower privilge levels patched
+similarly to the RFI flush patching.
+
+Firmware advertisement is not implemented yet, so CPU flush types
+are hard coded.
+
+Thanks to Michal Suchánek for bug fixes and review.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michal Suchánek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/exception-64s.h | 29 +++++
+ arch/powerpc/include/asm/feature-fixups.h | 19 +++
+ arch/powerpc/include/asm/security_features.h | 11 +
+ arch/powerpc/kernel/exceptions-64s.S | 19 +++
+ arch/powerpc/kernel/security.c | 149 +++++++++++++++++++++++++++
+ arch/powerpc/kernel/vmlinux.lds.S | 14 ++
+ arch/powerpc/lib/feature-fixups.c | 115 ++++++++++++++++++++
+ arch/powerpc/platforms/powernv/setup.c | 1
+ arch/powerpc/platforms/pseries/setup.c | 1
+ 9 files changed, 356 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -69,6 +69,27 @@
+ */
+ #define EX_R3 EX_DAR
+
++#define STF_ENTRY_BARRIER_SLOT \
++ STF_ENTRY_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop
++
++#define STF_EXIT_BARRIER_SLOT \
++ STF_EXIT_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop
++
++/*
++ * r10 must be free to use, r13 must be paca
++ */
++#define INTERRUPT_TO_KERNEL \
++ STF_ENTRY_BARRIER_SLOT
++
+ /*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+@@ -85,16 +106,19 @@
+ rfid
+
+ #define RFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+@@ -103,21 +127,25 @@
+ hrfid
+
+ #define HRFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_UNKNOWN \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+@@ -249,6 +277,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ #define __EXCEPTION_PROLOG_1(area, extra, vec) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
++ INTERRUPT_TO_KERNEL; \
+ SAVE_CTR(r10, area); \
+ mfcr r9; \
+ extra(vec); \
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -187,6 +187,22 @@ label##3: \
+ FTR_ENTRY_OFFSET label##1b-label##3b; \
+ .popsection;
+
++#define STF_ENTRY_BARRIER_FIXUP_SECTION \
++953: \
++ .pushsection __stf_entry_barrier_fixup,"a"; \
++ .align 2; \
++954: \
++ FTR_ENTRY_OFFSET 953b-954b; \
++ .popsection;
++
++#define STF_EXIT_BARRIER_FIXUP_SECTION \
++955: \
++ .pushsection __stf_exit_barrier_fixup,"a"; \
++ .align 2; \
++956: \
++ FTR_ENTRY_OFFSET 955b-956b; \
++ .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION \
+ 951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+@@ -199,6 +215,9 @@ label##3: \
+ #ifndef __ASSEMBLY__
+ #include <linux/types.h>
+
++extern long stf_barrier_fallback;
++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+
+ void apply_feature_fixups(void);
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -12,6 +12,17 @@
+ extern unsigned long powerpc_security_features;
+ extern bool rfi_flush;
+
++/* These are bit flags */
++enum stf_barrier_type {
++ STF_BARRIER_NONE = 0x1,
++ STF_BARRIER_FALLBACK = 0x2,
++ STF_BARRIER_EIEIO = 0x4,
++ STF_BARRIER_SYNC_ORI = 0x8,
++};
++
++void setup_stf_barrier(void);
++void do_stf_barrier_fixups(enum stf_barrier_type types);
++
+ static inline void security_ftr_set(unsigned long feature)
+ {
+ powerpc_security_features |= feature;
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -825,7 +825,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+
+-EXC_REAL_MASKABLE(decrementer, 0x900, 0x80)
++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80)
+ EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900)
+ TRAMP_KVM(PACA_EXGEN, 0x900)
+ EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+@@ -901,6 +901,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknow
+ mtctr r13; \
+ GET_PACA(r13); \
+ std r10,PACA_EXGEN+EX_R10(r13); \
++ INTERRUPT_TO_KERNEL; \
+ KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
+ HMT_MEDIUM; \
+ mfctr r9;
+@@ -909,7 +910,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknow
+ #define SYSCALL_KVMTEST \
+ HMT_MEDIUM; \
+ mr r9,r13; \
+- GET_PACA(r13);
++ GET_PACA(r13); \
++ INTERRUPT_TO_KERNEL;
+ #endif
+
+ #define LOAD_SYSCALL_HANDLER(reg) \
+@@ -1434,6 +1436,19 @@ masked_##_H##interrupt: \
+ b .; \
+ MASKED_DEC_HANDLER(_H)
+
++TRAMP_REAL_BEGIN(stf_barrier_fallback)
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ sync
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ori 31,31,0
++ .rept 14
++ b 1f
++1:
++ .endr
++ blr
++
+ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -8,6 +8,7 @@
+ #include <linux/device.h>
+ #include <linux/seq_buf.h>
+
++#include <asm/debugfs.h>
+ #include <asm/security_features.h>
+
+
+@@ -86,3 +87,151 @@ ssize_t cpu_show_spectre_v2(struct devic
+
+ return s.len;
+ }
++
++/*
++ * Store-forwarding barrier support.
++ */
++
++static enum stf_barrier_type stf_enabled_flush_types;
++static bool no_stf_barrier;
++bool stf_barrier;
++
++static int __init handle_no_stf_barrier(char *p)
++{
++ pr_info("stf-barrier: disabled on command line.");
++ no_stf_barrier = true;
++ return 0;
++}
++
++early_param("no_stf_barrier", handle_no_stf_barrier);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_ssbd(char *p)
++{
++ if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
++ /* Until firmware tells us, we have the barrier with auto */
++ return 0;
++ } else if (strncmp(p, "off", 3) == 0) {
++ handle_no_stf_barrier(NULL);
++ return 0;
++ } else
++ return 1;
++
++ return 0;
++}
++early_param("spec_store_bypass_disable", handle_ssbd);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_no_ssbd(char *p)
++{
++ handle_no_stf_barrier(NULL);
++ return 0;
++}
++early_param("nospec_store_bypass_disable", handle_no_ssbd);
++
++static void stf_barrier_enable(bool enable)
++{
++ if (enable)
++ do_stf_barrier_fixups(stf_enabled_flush_types);
++ else
++ do_stf_barrier_fixups(STF_BARRIER_NONE);
++
++ stf_barrier = enable;
++}
++
++void setup_stf_barrier(void)
++{
++ enum stf_barrier_type type;
++ bool enable, hv;
++
++ hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++ /* Default to fallback in case fw-features are not available */
++ if (cpu_has_feature(CPU_FTR_ARCH_300))
++ type = STF_BARRIER_EIEIO;
++ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
++ type = STF_BARRIER_SYNC_ORI;
++ else if (cpu_has_feature(CPU_FTR_ARCH_206))
++ type = STF_BARRIER_FALLBACK;
++ else
++ type = STF_BARRIER_NONE;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
++
++ if (type == STF_BARRIER_FALLBACK) {
++ pr_info("stf-barrier: fallback barrier available\n");
++ } else if (type == STF_BARRIER_SYNC_ORI) {
++ pr_info("stf-barrier: hwsync barrier available\n");
++ } else if (type == STF_BARRIER_EIEIO) {
++ pr_info("stf-barrier: eieio barrier available\n");
++ }
++
++ stf_enabled_flush_types = type;
++
++ if (!no_stf_barrier)
++ stf_barrier_enable(enable);
++}
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
++ const char *type;
++ switch (stf_enabled_flush_types) {
++ case STF_BARRIER_EIEIO:
++ type = "eieio";
++ break;
++ case STF_BARRIER_SYNC_ORI:
++ type = "hwsync";
++ break;
++ case STF_BARRIER_FALLBACK:
++ type = "fallback";
++ break;
++ default:
++ type = "unknown";
++ }
++ return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int stf_barrier_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ /* Only do anything if we're changing state */
++ if (enable != stf_barrier)
++ stf_barrier_enable(enable);
++
++ return 0;
++}
++
++static int stf_barrier_get(void *data, u64 *val)
++{
++ *val = stf_barrier ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
++
++static __init int stf_barrier_debugfs_init(void)
++{
++ debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
++ return 0;
++}
++device_initcall(stf_barrier_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -134,6 +134,20 @@ SECTIONS
+
+ #ifdef CONFIG_PPC64
+ . = ALIGN(8);
++ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_entry_barrier_fixup = .;
++ *(__stf_entry_barrier_fixup)
++ __stop___stf_entry_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
++ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_exit_barrier_fixup = .;
++ *(__stf_exit_barrier_fixup)
++ __stop___stf_exit_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
+ __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+ __start___rfi_flush_fixup = .;
+ *(__rfi_flush_fixup)
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -23,6 +23,7 @@
+ #include <asm/page.h>
+ #include <asm/sections.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+ #include <asm/firmware.h>
+
+ struct fixup_entry {
+@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long val
+ }
+
+ #ifdef CONFIG_PPC_BOOK3S_64
++void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[3], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_entry_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK) {
++ instrs[i++] = 0x7d4802a6; /* mflr r10 */
++ instrs[i++] = 0x60000000; /* branch patched below */
++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ } else if (types & STF_BARRIER_SYNC_ORI) {
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++
++ if (types & STF_BARRIER_FALLBACK)
++ patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
++ BRANCH_SET_LINK);
++ else
++ patch_instruction(dest + 1, instrs[1]);
++
++ patch_instruction(dest + 2, instrs[2]);
++ }
++
++ printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[6], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_exit_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++ instrs[3] = 0x60000000; /* nop */
++ instrs[4] = 0x60000000; /* nop */
++ instrs[5] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
++ instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
++ } else {
++ instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
++ instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
++ }
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
++ } else {
++ instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
++ }
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++ patch_instruction(dest + 1, instrs[1]);
++ patch_instruction(dest + 2, instrs[2]);
++ patch_instruction(dest + 3, instrs[3]);
++ patch_instruction(dest + 4, instrs[4]);
++ patch_instruction(dest + 5, instrs[5]);
++ }
++ printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++
++void do_stf_barrier_fixups(enum stf_barrier_type types)
++{
++ do_stf_entry_barrier_fixups(types);
++ do_stf_exit_barrier_fixups(types);
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ unsigned int instrs[3], *dest;
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -130,6 +130,7 @@ static void __init pnv_setup_arch(void)
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+
+ pnv_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* Initialize SMP */
+ pnv_smp_init();
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -554,6 +554,7 @@ static void __init pSeries_setup_arch(vo
+ fwnmi_init();
+
+ pseries_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* By default, only probe PCI (can be overridden by rtas_pci) */
+ pci_add_flags(PCI_PROBE_ONLY);
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:40 +1000
+Subject: powerpc/64s: Enhance the information in cpu_show_meltdown()
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-15-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit ff348355e9c72493947be337bb4fae4fc1a41eba upstream.
+
+Now that we have the security feature flags we can make the
+information displayed in the "meltdown" file more informative.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/security_features.h | 1
+ arch/powerpc/kernel/security.c | 30 +++++++++++++++++++++++++--
+ 2 files changed, 29 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -10,6 +10,7 @@
+
+
+ extern unsigned long powerpc_security_features;
++extern bool rfi_flush;
+
+ static inline void security_ftr_set(unsigned long feature)
+ {
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -6,6 +6,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/device.h>
++#include <linux/seq_buf.h>
+
+ #include <asm/security_features.h>
+
+@@ -19,8 +20,33 @@ unsigned long powerpc_security_features
+
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (rfi_flush)
+- return sprintf(buf, "Mitigation: RFI Flush\n");
++ bool thread_priv;
++
++ thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (rfi_flush || thread_priv) {
++ struct seq_buf s;
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (rfi_flush)
++ seq_buf_printf(&s, "RFI Flush");
++
++ if (rfi_flush && thread_priv)
++ seq_buf_printf(&s, ", ");
++
++ if (thread_priv)
++ seq_buf_printf(&s, "L1D private per thread");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "Vulnerable\n");
+ }
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:48 +1000
+Subject: powerpc/64s: Fix section mismatch warnings from setup_rfi_flush()
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-23-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 501a78cbc17c329fabf8e9750a1e9ab810c88a0e upstream.
+
+The recent LPM changes to setup_rfi_flush() are causing some section
+mismatch warnings because we removed the __init annotation on
+setup_rfi_flush():
+
+ The function setup_rfi_flush() references
+ the function __init ppc64_bolted_size().
+ the function __init memblock_alloc_base().
+
+The references are actually in init_fallback_flush(), but that is
+inlined into setup_rfi_flush().
+
+These references are safe because:
+ - only pseries calls setup_rfi_flush() at runtime
+ - pseries always passes L1D_FLUSH_FALLBACK at boot
+ - so the fallback flush area will always be allocated
+ - so the check in init_fallback_flush() will always return early:
+ /* Only allocate the fallback flush area once (at boot time). */
+ if (l1d_flush_fallback_area)
+ return;
+
+ - and therefore we won't actually call the freed init routines.
+
+We should rework the code to make it safer by default rather than
+relying on the above, but for now as a quick-fix just add a __ref
+annotation to squash the warning.
+
+Fixes: abf110f3e1ce ("powerpc/rfi-flush: Make it possible to call setup_rfi_flush() again")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/setup_64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -831,7 +831,7 @@ void rfi_flush_enable(bool enable)
+ rfi_flush = enable;
+ }
+
+-static void init_fallback_flush(void)
++static void __ref init_fallback_flush(void)
+ {
+ u64 l1d_size, limit;
+ int cpu;
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:27 +1000
+Subject: powerpc/64s: Improve RFI L1-D cache flush fallback
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-2-mpe@ellerman.id.au>
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit bdcb1aefc5b3f7d0f1dc8b02673602bca2ff7a4b upstream.
+
+The fallback RFI flush is used when firmware does not provide a way
+to flush the cache. It's a "displacement flush" that evicts useful
+data by displacing it with an uninteresting buffer.
+
+The flush has to take care to work with implementation specific cache
+replacment policies, so the recipe has been in flux. The initial
+slow but conservative approach is to touch all lines of a congruence
+class, with dependencies between each load. It has since been
+determined that a linear pattern of loads without dependencies is
+sufficient, and is significantly faster.
+
+Measuring the speed of a null syscall with RFI fallback flush enabled
+gives the relative improvement:
+
+P8 - 1.83x
+P9 - 1.75x
+
+The flush also becomes simpler and more adaptable to different cache
+geometries.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/paca.h | 3 -
+ arch/powerpc/kernel/asm-offsets.c | 3 -
+ arch/powerpc/kernel/exceptions-64s.S | 76 ++++++++++++++++-------------------
+ arch/powerpc/kernel/setup_64.c | 13 -----
+ arch/powerpc/xmon/xmon.c | 2
+ 5 files changed, 41 insertions(+), 56 deletions(-)
+
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -238,8 +238,7 @@ struct paca_struct {
+ */
+ u64 exrfi[EX_SIZE] __aligned(0x80);
+ void *rfi_flush_fallback_area;
+- u64 l1d_flush_congruence;
+- u64 l1d_flush_sets;
++ u64 l1d_flush_size;
+ #endif
+ };
+
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -239,8 +239,7 @@ int main(void)
+ OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
+ OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
+ OFFSET(PACA_EXRFI, paca_struct, exrfi);
+- OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
+- OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
++ OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size);
+
+ #endif
+ OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1440,39 +1440,37 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+- std r12,PACA_EXRFI+EX_R12(r13)
+- std r8,PACA_EXRFI+EX_R13(r13)
+ mfctr r9
+ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+- ld r11,PACA_L1D_FLUSH_SETS(r13)
+- ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+- /*
+- * The load adresses are at staggered offsets within cachelines,
+- * which suits some pipelines better (on others it should not
+- * hurt).
+- */
+- addi r12,r12,8
++ ld r11,PACA_L1D_FLUSH_SIZE(r13)
++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+ mtctr r11
+ DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+ /* order ld/st prior to dcbt stop all streams with flushing */
+ sync
+-1: li r8,0
+- .rept 8 /* 8-way set associative */
+- ldx r11,r10,r8
+- add r8,r8,r12
+- xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
+- add r8,r8,r11 // Add 0, this creates a dependency on the ldx
+- .endr
+- addi r10,r10,128 /* 128 byte cache line */
++
++ /*
++ * The load adresses are at staggered offsets within cachelines,
++ * which suits some pipelines better (on others it should not
++ * hurt).
++ */
++1:
++ ld r11,(0x80 + 8)*0(r10)
++ ld r11,(0x80 + 8)*1(r10)
++ ld r11,(0x80 + 8)*2(r10)
++ ld r11,(0x80 + 8)*3(r10)
++ ld r11,(0x80 + 8)*4(r10)
++ ld r11,(0x80 + 8)*5(r10)
++ ld r11,(0x80 + 8)*6(r10)
++ ld r11,(0x80 + 8)*7(r10)
++ addi r10,r10,0x80*8
+ bdnz 1b
+
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+- ld r12,PACA_EXRFI+EX_R12(r13)
+- ld r8,PACA_EXRFI+EX_R13(r13)
+ GET_SCRATCH0(r13);
+ rfid
+
+@@ -1482,39 +1480,37 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+- std r12,PACA_EXRFI+EX_R12(r13)
+- std r8,PACA_EXRFI+EX_R13(r13)
+ mfctr r9
+ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+- ld r11,PACA_L1D_FLUSH_SETS(r13)
+- ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+- /*
+- * The load adresses are at staggered offsets within cachelines,
+- * which suits some pipelines better (on others it should not
+- * hurt).
+- */
+- addi r12,r12,8
++ ld r11,PACA_L1D_FLUSH_SIZE(r13)
++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+ mtctr r11
+ DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+ /* order ld/st prior to dcbt stop all streams with flushing */
+ sync
+-1: li r8,0
+- .rept 8 /* 8-way set associative */
+- ldx r11,r10,r8
+- add r8,r8,r12
+- xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
+- add r8,r8,r11 // Add 0, this creates a dependency on the ldx
+- .endr
+- addi r10,r10,128 /* 128 byte cache line */
++
++ /*
++ * The load adresses are at staggered offsets within cachelines,
++ * which suits some pipelines better (on others it should not
++ * hurt).
++ */
++1:
++ ld r11,(0x80 + 8)*0(r10)
++ ld r11,(0x80 + 8)*1(r10)
++ ld r11,(0x80 + 8)*2(r10)
++ ld r11,(0x80 + 8)*3(r10)
++ ld r11,(0x80 + 8)*4(r10)
++ ld r11,(0x80 + 8)*5(r10)
++ ld r11,(0x80 + 8)*6(r10)
++ ld r11,(0x80 + 8)*7(r10)
++ addi r10,r10,0x80*8
+ bdnz 1b
+
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+- ld r12,PACA_EXRFI+EX_R12(r13)
+- ld r8,PACA_EXRFI+EX_R13(r13)
+ GET_SCRATCH0(r13);
+ hrfid
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -851,19 +851,8 @@ static void init_fallback_flush(void)
+ memset(l1d_flush_fallback_area, 0, l1d_size * 2);
+
+ for_each_possible_cpu(cpu) {
+- /*
+- * The fallback flush is currently coded for 8-way
+- * associativity. Different associativity is possible, but it
+- * will be treated as 8-way and may not evict the lines as
+- * effectively.
+- *
+- * 128 byte lines are mandatory.
+- */
+- u64 c = l1d_size / 8;
+-
+ paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
+- paca[cpu].l1d_flush_congruence = c;
+- paca[cpu].l1d_flush_sets = c / 128;
++ paca[cpu].l1d_flush_size = l1d_size;
+ }
+ }
+
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -2348,6 +2348,8 @@ static void dump_one_paca(int cpu)
+ DUMP(p, slb_cache_ptr, "x");
+ for (i = 0; i < SLB_CACHE_ENTRIES; i++)
+ printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]);
++
++ DUMP(p, rfi_flush_fallback_area, "px");
+ #endif
+ DUMP(p, dscr_default, "llx");
+ #ifdef CONFIG_PPC_BOOK3E
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:39 +1000
+Subject: powerpc/64s: Move cpu_show_meltdown()
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-14-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 8ad33041563a10b34988800c682ada14b2612533 upstream.
+
+This landed in setup_64.c for no good reason other than we had nowhere
+else to put it. Now that we have a security-related file, that is a
+better place for it so move it.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/security.c | 11 +++++++++++
+ arch/powerpc/kernel/setup_64.c | 8 --------
+ 2 files changed, 11 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -5,6 +5,8 @@
+ // Copyright 2018, Michael Ellerman, IBM Corporation.
+
+ #include <linux/kernel.h>
++#include <linux/device.h>
++
+ #include <asm/security_features.h>
+
+
+@@ -13,3 +15,12 @@ unsigned long powerpc_security_features
+ SEC_FTR_L1D_FLUSH_PR | \
+ SEC_FTR_BNDS_CHK_SPEC_BAR | \
+ SEC_FTR_FAVOUR_SECURITY;
++
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (rfi_flush)
++ return sprintf(buf, "Mitigation: RFI Flush\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -910,12 +910,4 @@ static __init int rfi_flush_debugfs_init
+ }
+ device_initcall(rfi_flush_debugfs_init);
+ #endif
+-
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- if (rfi_flush)
+- return sprintf(buf, "Mitigation: RFI Flush\n");
+-
+- return sprintf(buf, "Vulnerable\n");
+-}
+ #endif /* CONFIG_PPC_BOOK3S_64 */
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:43 +1000
+Subject: powerpc/64s: Wire up cpu_show_spectre_v1()
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-18-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 56986016cb8cd9050e601831fe89f332b4e3c46e upstream.
+
+Add a definition for cpu_show_spectre_v1() to override the generic
+version. Currently this just prints "Not affected" or "Vulnerable"
+based on the firmware flag.
+
+Although the kernel does have array_index_nospec() in a few places, we
+haven't yet audited all the powerpc code to see where it's necessary,
+so for now we don't list that as a mitigation.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/security.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -50,3 +50,11 @@ ssize_t cpu_show_meltdown(struct device
+
+ return sprintf(buf, "Vulnerable\n");
+ }
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:44 +1000
+Subject: powerpc/64s: Wire up cpu_show_spectre_v2()
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-19-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit d6fbe1c55c55c6937cbea3531af7da84ab7473c3 upstream.
+
+Add a definition for cpu_show_spectre_v2() to override the generic
+version. This has several permuations, though in practice some may not
+occur we cater for any combination.
+
+The most verbose is:
+
+ Mitigation: Indirect branch serialisation (kernel only), Indirect
+ branch cache disabled, ori31 speculation barrier enabled
+
+We don't treat the ori31 speculation barrier as a mitigation on its
+own, because it has to be *used* by code in order to be a mitigation
+and we don't know if userspace is doing that. So if that's all we see
+we say:
+
+ Vulnerable, ori31 speculation barrier enabled
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/security.c | 33 +++++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -58,3 +58,36 @@ ssize_t cpu_show_spectre_v1(struct devic
+
+ return sprintf(buf, "Vulnerable\n");
+ }
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool bcs, ccd, ori;
++ struct seq_buf s;
++
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
++ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
++ ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (bcs || ccd) {
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (bcs)
++ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
++
++ if (bcs && ccd)
++ seq_buf_printf(&s, ", ");
++
++ if (ccd)
++ seq_buf_printf(&s, "Indirect branch cache disabled");
++ } else
++ seq_buf_printf(&s, "Vulnerable");
++
++ if (ori)
++ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++}
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:36 +1000
+Subject: powerpc: Add security feature flags for Spectre/Meltdown
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-11-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 9a868f634349e62922c226834aa23e3d1329ae7f upstream.
+
+This commit adds security feature flags to reflect the settings we
+receive from firmware regarding Spectre/Meltdown mitigations.
+
+The feature names reflect the names we are given by firmware on bare
+metal machines. See the hostboot source for details.
+
+Arguably these could be firmware features, but that then requires them
+to be read early in boot so they're available prior to asm feature
+patching, but we don't actually want to use them for patching. We may
+also want to dynamically update them in future, which would be
+incompatible with the way firmware features work (at the moment at
+least). So for now just make them separate flags.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/security_features.h | 65 +++++++++++++++++++++++++++
+ arch/powerpc/kernel/Makefile | 2
+ arch/powerpc/kernel/security.c | 15 ++++++
+ 3 files changed, 81 insertions(+), 1 deletion(-)
+ create mode 100644 arch/powerpc/include/asm/security_features.h
+ create mode 100644 arch/powerpc/kernel/security.c
+
+--- /dev/null
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -0,0 +1,65 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Security related feature bit definitions.
++ *
++ * Copyright 2018, Michael Ellerman, IBM Corporation.
++ */
++
++#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
++#define _ASM_POWERPC_SECURITY_FEATURES_H
++
++
++extern unsigned long powerpc_security_features;
++
++static inline void security_ftr_set(unsigned long feature)
++{
++ powerpc_security_features |= feature;
++}
++
++static inline void security_ftr_clear(unsigned long feature)
++{
++ powerpc_security_features &= ~feature;
++}
++
++static inline bool security_ftr_enabled(unsigned long feature)
++{
++ return !!(powerpc_security_features & feature);
++}
++
++
++// Features indicating support for Spectre/Meltdown mitigations
++
++// The L1-D cache can be flushed with ori r30,r30,0
++#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
++
++// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
++#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
++
++// ori r31,r31,0 acts as a speculation barrier
++#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
++
++// Speculation past bctr is disabled
++#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
++
++// Entries in L1-D are private to a SMT thread
++#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
++
++// Indirect branch prediction cache disabled
++#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
++
++
++// Features indicating need for Spectre/Meltdown mitigations
++
++// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
++#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
++
++// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
++#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
++
++// A speculation barrier should be used for bounds checks (Spectre variant 1)
++#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
++
++// Firmware configuration indicates user favours security over performance
++#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
++
++#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
+ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
+ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
+-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
+ obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+ obj-$(CONFIG_PPC64) += vdso64/
+--- /dev/null
++++ b/arch/powerpc/kernel/security.c
+@@ -0,0 +1,15 @@
++// SPDX-License-Identifier: GPL-2.0+
++//
++// Security related flags and so on.
++//
++// Copyright 2018, Michael Ellerman, IBM Corporation.
++
++#include <linux/kernel.h>
++#include <asm/security_features.h>
++
++
++unsigned long powerpc_security_features __read_mostly = \
++ SEC_FTR_L1D_FLUSH_HV | \
++ SEC_FTR_L1D_FLUSH_PR | \
++ SEC_FTR_BNDS_CHK_SPEC_BAR | \
++ SEC_FTR_FAVOUR_SECURITY;
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:46 +1000
+Subject: powerpc: Move default security feature flags
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-21-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit e7347a86830f38dc3e40c8f7e28c04412b12a2e7 upstream.
+
+This moves the definition of the default security feature flags
+(i.e., enabled by default) closer to the security feature flags.
+
+This can be used to restore current flags to the default flags.
+
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/security_features.h | 8 ++++++++
+ arch/powerpc/kernel/security.c | 7 +------
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -63,4 +63,12 @@ static inline bool security_ftr_enabled(
+ // Firmware configuration indicates user favours security over performance
+ #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+
++
++// Features enabled by default
++#define SEC_FTR_DEFAULT \
++ (SEC_FTR_L1D_FLUSH_HV | \
++ SEC_FTR_L1D_FLUSH_PR | \
++ SEC_FTR_BNDS_CHK_SPEC_BAR | \
++ SEC_FTR_FAVOUR_SECURITY)
++
+ #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -11,12 +11,7 @@
+ #include <asm/security_features.h>
+
+
+-unsigned long powerpc_security_features __read_mostly = \
+- SEC_FTR_L1D_FLUSH_HV | \
+- SEC_FTR_L1D_FLUSH_PR | \
+- SEC_FTR_BNDS_CHK_SPEC_BAR | \
+- SEC_FTR_FAVOUR_SECURITY;
+-
++unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:38 +1000
+Subject: powerpc/powernv: Set or clear security feature flags
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-13-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 77addf6e95c8689e478d607176b399a6242a777e upstream.
+
+Now that we have feature flags for security related things, set or
+clear them based on what we see in the device tree provided by
+firmware.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/powernv/setup.c | 56 +++++++++++++++++++++++++++++++++
+ 1 file changed, 56 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -37,9 +37,63 @@
+ #include <asm/kexec.h>
+ #include <asm/smp.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+
+ #include "powernv.h"
+
++
++static bool fw_feature_is(const char *state, const char *name,
++ struct device_node *fw_features)
++{
++ struct device_node *np;
++ bool rc = false;
++
++ np = of_get_child_by_name(fw_features, name);
++ if (np) {
++ rc = of_property_read_bool(np, state);
++ of_node_put(np);
++ }
++
++ return rc;
++}
++
++static void init_fw_feat_flags(struct device_node *np)
++{
++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
++
++ if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ static void pnv_setup_rfi_flush(void)
+ {
+ struct device_node *np, *fw_features;
+@@ -55,6 +109,8 @@ static void pnv_setup_rfi_flush(void)
+ of_node_put(np);
+
+ if (fw_features) {
++ init_fw_feat_flags(fw_features);
++
+ np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+ if (np && of_property_read_bool(np, "enabled"))
+ type = L1D_FLUSH_MTTRIG;
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:29 +1000
+Subject: powerpc/powernv: Support firmware disable of RFI flush
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-4-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit eb0a2d2620ae431c543963c8c7f08f597366fc60 upstream.
+
+Some versions of firmware will have a setting that can be configured
+to disable the RFI flush, add support for it.
+
+Fixes: 6e032b350cd1 ("powerpc/powernv: Check device-tree for RFI flush settings")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/powernv/setup.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -79,6 +79,10 @@ static void pnv_setup_rfi_flush(void)
+ if (np && of_property_read_bool(np, "disabled"))
+ enable--;
+
++ np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
++ if (np && of_property_read_bool(np, "disabled"))
++ enable = 0;
++
+ of_node_put(np);
+ of_node_put(fw_features);
+ }
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:41 +1000
+Subject: powerpc/powernv: Use the security flags in pnv_setup_rfi_flush()
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-16-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 37c0bdd00d3ae83369ab60a6712c28e11e6458d5 upstream.
+
+Now that we have the security flags we can significantly simplify the
+code in pnv_setup_rfi_flush(), because we can use the flags instead of
+checking device tree properties and because the security flags have
+pessimistic defaults.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/powernv/setup.c | 41 ++++++++-------------------------
+ 1 file changed, 10 insertions(+), 31 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -65,7 +65,7 @@ static void init_fw_feat_flags(struct de
+ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
+ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
+
+- if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
+ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
+
+ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
+@@ -98,11 +98,10 @@ static void pnv_setup_rfi_flush(void)
+ {
+ struct device_node *np, *fw_features;
+ enum l1d_flush_type type;
+- int enable;
++ bool enable;
+
+ /* Default to fallback in case fw-features are not available */
+ type = L1D_FLUSH_FALLBACK;
+- enable = 1;
+
+ np = of_find_node_by_name(NULL, "ibm,opal");
+ fw_features = of_get_child_by_name(np, "fw-features");
+@@ -110,40 +109,20 @@ static void pnv_setup_rfi_flush(void)
+
+ if (fw_features) {
+ init_fw_feat_flags(fw_features);
++ of_node_put(fw_features);
+
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+- if (np && of_property_read_bool(np, "enabled"))
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
+ type = L1D_FLUSH_MTTRIG;
+
+- of_node_put(np);
+-
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+- if (np && of_property_read_bool(np, "enabled"))
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
+ type = L1D_FLUSH_ORI;
+-
+- of_node_put(np);
+-
+- /* Enable unless firmware says NOT to */
+- enable = 2;
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- of_node_put(np);
+-
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable = 0;
+-
+- of_node_put(np);
+- of_node_put(fw_features);
+ }
+
+- setup_rfi_flush(type, enable > 0);
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
++
++ setup_rfi_flush(type, enable);
+ }
+
+ static void __init pnv_setup_arch(void)
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:35 +1000
+Subject: powerpc/pseries: Add new H_GET_CPU_CHARACTERISTICS flags
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-10-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit c4bc36628d7f8b664657d8bd6ad1c44c177880b7 upstream.
+
+Add some additional values which have been defined for the
+H_GET_CPU_CHARACTERISTICS hypercall.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/hvcall.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -337,6 +337,9 @@
+ #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
+ #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
+ #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
++#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
++#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
++#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+
+ #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
+ #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:45 +1000
+Subject: powerpc/pseries: Fix clearing of security feature flags
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-20-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit 0f9bdfe3c77091e8704d2e510eb7c2c2c6cde524 upstream.
+
+The H_CPU_BEHAV_* flags should be checked for in the 'behaviour' field
+of 'struct h_cpu_char_result' -- 'character' is for H_CPU_CHAR_*
+flags.
+
+Found by playing around with QEMU's implementation of the hypercall:
+
+ H_CPU_CHAR=0xf000000000000000
+ H_CPU_BEHAV=0x0000000000000000
+
+ This clears H_CPU_BEHAV_FAVOUR_SECURITY and H_CPU_BEHAV_L1D_FLUSH_PR
+ so pseries_setup_rfi_flush() disables 'rfi_flush'; and it also
+ clears H_CPU_CHAR_L1D_THREAD_PRIV flag. So there is no RFI flush
+ mitigation at all for cpu_show_meltdown() to report; but currently
+ it does:
+
+ Original kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/meltdown
+ Mitigation: RFI Flush
+
+ Patched kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/meltdown
+ Not affected
+
+ H_CPU_CHAR=0x0000000000000000
+ H_CPU_BEHAV=0xf000000000000000
+
+ This sets H_CPU_BEHAV_BNDS_CHK_SPEC_BAR so cpu_show_spectre_v1() should
+ report vulnerable; but currently it doesn't:
+
+ Original kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ Not affected
+
+ Patched kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ Vulnerable
+
+Brown-paper-bag-by: Michael Ellerman <mpe@ellerman.id.au>
+Fixes: f636c14790ea ("powerpc/pseries: Set or clear security feature flags")
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -484,13 +484,13 @@ static void init_cpu_char_feature_flags(
+ * The features below are enabled by default, so we instead look to see
+ * if firmware has *disabled* them, and clear them if so.
+ */
+- if (!(result->character & H_CPU_BEHAV_FAVOUR_SECURITY))
++ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
+ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
+
+- if (!(result->character & H_CPU_BEHAV_L1D_FLUSH_PR))
++ if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
+
+- if (!(result->character & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
++ if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
+ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
+ }
+
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:47 +1000
+Subject: powerpc/pseries: Restore default security feature flags on setup
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-22-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit 6232774f1599028a15418179d17f7df47ede770a upstream.
+
+After migration the security feature flags might have changed (e.g.,
+destination system with unpatched firmware), but some flags are not
+set/clear again in init_cpu_char_feature_flags() because it assumes
+the security flags to be the defaults.
+
+Additionally, if the H_GET_CPU_CHARACTERISTICS hypercall fails then
+init_cpu_char_feature_flags() does not run again, which potentially
+might leave the system in an insecure or sub-optimal configuration.
+
+So, just restore the security feature flags to the defaults assumed
+by init_cpu_char_feature_flags() so it can set/clear them correctly,
+and to ensure safe settings are in place in case the hypercall fail.
+
+Fixes: f636c14790ea ("powerpc/pseries: Set or clear security feature flags")
+Depends-on: 19887d6a28e2 ("powerpc: Move default security feature flags")
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -462,6 +462,10 @@ static void __init find_and_init_phbs(vo
+
+ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
+ {
++ /*
++ * The features below are disabled by default, so we instead look to see
++ * if firmware has *enabled* them, and set them if so.
++ */
+ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
+ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
+
+@@ -501,6 +505,13 @@ void pseries_setup_rfi_flush(void)
+ bool enable;
+ long rc;
+
++ /*
++ * Set features to the defaults assumed by init_cpu_char_feature_flags()
++ * so it can set/clear again any features that might have changed after
++ * migration, and in case the hypercall fails and it is not even called.
++ */
++ powerpc_security_features = SEC_FTR_DEFAULT;
++
+ rc = plpar_get_cpu_characteristics(&result);
+ if (rc == H_SUCCESS)
+ init_cpu_char_feature_flags(&result);
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:37 +1000
+Subject: powerpc/pseries: Set or clear security feature flags
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-12-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit f636c14790ead6cc22cf62279b1f8d7e11a67116 upstream.
+
+Now that we have feature flags for security related things, set or
+clear them based on what we receive from the hypercall.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 43 +++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -68,6 +68,7 @@
+ #include <asm/plpar_wrappers.h>
+ #include <asm/kexec.h>
+ #include <asm/isa-bridge.h>
++#include <asm/security_features.h>
+
+ #include "pseries.h"
+
+@@ -459,6 +460,40 @@ static void __init find_and_init_phbs(vo
+ of_pci_check_probe_only();
+ }
+
++static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
++{
++ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (!(result->character & H_CPU_BEHAV_FAVOUR_SECURITY))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (!(result->character & H_CPU_BEHAV_L1D_FLUSH_PR))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (!(result->character & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ void pseries_setup_rfi_flush(void)
+ {
+ struct h_cpu_char_result result;
+@@ -472,6 +507,8 @@ void pseries_setup_rfi_flush(void)
+
+ rc = plpar_get_cpu_characteristics(&result);
+ if (rc == H_SUCCESS) {
++ init_cpu_char_feature_flags(&result);
++
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+ types |= L1D_FLUSH_MTTRIG;
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+@@ -482,6 +519,12 @@ void pseries_setup_rfi_flush(void)
+ enable = false;
+ }
+
++ /*
++ * We're the guest so this doesn't apply to us, clear it to simplify
++ * handling of it elsewhere.
++ */
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
++
+ setup_rfi_flush(types, enable);
+ }
+
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:28 +1000
+Subject: powerpc/pseries: Support firmware disable of RFI flush
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-3-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 582605a429e20ae68fd0b041b2e840af296edd08 upstream.
+
+Some versions of firmware will have a setting that can be configured
+to disable the RFI flush, add support for it.
+
+Fixes: 8989d56878a7 ("powerpc/pseries: Query hypervisor for RFI flush settings")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -482,7 +482,8 @@ static void pseries_setup_rfi_flush(void
+ if (types == L1D_FLUSH_NONE)
+ types = L1D_FLUSH_FALLBACK;
+
+- if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++ if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
++ (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+ enable = false;
+ } else {
+ /* Default to fallback if case hcall is not available */
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:42 +1000
+Subject: powerpc/pseries: Use the security flags in pseries_setup_rfi_flush()
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-17-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 2e4a16161fcd324b1f9bf6cb6856529f7eaf0689 upstream.
+
+Now that we have the security flags we can simplify the code in
+pseries_setup_rfi_flush() because the security flags have pessimistic
+defaults.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 27 ++++++++++++---------------
+ 1 file changed, 12 insertions(+), 15 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -501,30 +501,27 @@ void pseries_setup_rfi_flush(void)
+ bool enable;
+ long rc;
+
+- /* Enable by default */
+- enable = true;
+- types = L1D_FLUSH_FALLBACK;
+-
+ rc = plpar_get_cpu_characteristics(&result);
+- if (rc == H_SUCCESS) {
++ if (rc == H_SUCCESS)
+ init_cpu_char_feature_flags(&result);
+
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+- types |= L1D_FLUSH_MTTRIG;
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+- types |= L1D_FLUSH_ORI;
+-
+- if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+- (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+- enable = false;
+- }
+-
+ /*
+ * We're the guest so this doesn't apply to us, clear it to simplify
+ * handling of it elsewhere.
+ */
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
+
++ types = L1D_FLUSH_FALLBACK;
++
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++ types |= L1D_FLUSH_MTTRIG;
++
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
++ types |= L1D_FLUSH_ORI;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
++
+ setup_rfi_flush(types, enable);
+ }
+
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:32 +1000
+Subject: powerpc/rfi-flush: Always enable fallback flush on pseries
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-7-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 84749a58b6e382f109abf1e734bc4dd43c2c25bb upstream.
+
+This ensures the fallback flush area is always allocated on pseries,
+so in case a LPAR is migrated from a patched to an unpatched system,
+it is possible to enable the fallback flush in the target system.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -468,26 +468,18 @@ static void pseries_setup_rfi_flush(void
+
+ /* Enable by default */
+ enable = true;
++ types = L1D_FLUSH_FALLBACK;
+
+ rc = plpar_get_cpu_characteristics(&result);
+ if (rc == H_SUCCESS) {
+- types = L1D_FLUSH_NONE;
+-
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+ types |= L1D_FLUSH_MTTRIG;
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+ types |= L1D_FLUSH_ORI;
+
+- /* Use fallback if nothing set in hcall */
+- if (types == L1D_FLUSH_NONE)
+- types = L1D_FLUSH_FALLBACK;
+-
+ if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+ (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+ enable = false;
+- } else {
+- /* Default to fallback if case hcall is not available */
+- types = L1D_FLUSH_FALLBACK;
+ }
+
+ setup_rfi_flush(types, enable);
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:34 +1000
+Subject: powerpc/rfi-flush: Call setup_rfi_flush() after LPM migration
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-9-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 921bc6cf807ceb2ab8005319cf39f33494d6b100 upstream.
+
+We might have migrated to a machine that uses a different flush type,
+or doesn't need flushing at all.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/mobility.c | 3 +++
+ arch/powerpc/platforms/pseries/pseries.h | 2 ++
+ arch/powerpc/platforms/pseries/setup.c | 2 +-
+ 3 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -348,6 +348,9 @@ void post_mobility_fixup(void)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
+
++ /* Possibly switch to a new RFI flush type */
++ pseries_setup_rfi_flush();
++
+ return;
+ }
+
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -100,4 +100,6 @@ static inline unsigned long cmo_get_page
+
+ int dlpar_workqueue_init(void);
+
++void pseries_setup_rfi_flush(void);
++
+ #endif /* _PSERIES_PSERIES_H */
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -459,7 +459,7 @@ static void __init find_and_init_phbs(vo
+ of_pci_check_probe_only();
+ }
+
+-static void pseries_setup_rfi_flush(void)
++void pseries_setup_rfi_flush(void)
+ {
+ struct h_cpu_char_result result;
+ enum l1d_flush_type types;
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:33 +1000
+Subject: powerpc/rfi-flush: Differentiate enabled and patched flush types
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-8-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit 0063d61ccfc011f379a31acaeba6de7c926fed2c upstream.
+
+Currently the rfi-flush messages print 'Using <type> flush' for all
+enabled_flush_types, but that is not necessarily true -- as now the
+fallback flush is always enabled on pseries, but the fixup function
+overwrites its nop/branch slot with other flush types, if available.
+
+So, replace the 'Using <type> flush' messages with '<type> flush is
+available'.
+
+Also, print the patched flush types in the fixup function, so users
+can know what is (not) being used (e.g., the slower, fallback flush,
+or no flush type at all if flush is disabled via the debugfs switch).
+
+Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/setup_64.c | 6 +++---
+ arch/powerpc/lib/feature-fixups.c | 9 ++++++++-
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -860,15 +860,15 @@ static void init_fallback_flush(void)
+ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ {
+ if (types & L1D_FLUSH_FALLBACK) {
+- pr_info("rfi-flush: Using fallback displacement flush\n");
++ pr_info("rfi-flush: fallback displacement flush available\n");
+ init_fallback_flush();
+ }
+
+ if (types & L1D_FLUSH_ORI)
+- pr_info("rfi-flush: Using ori type flush\n");
++ pr_info("rfi-flush: ori type flush available\n");
+
+ if (types & L1D_FLUSH_MTTRIG)
+- pr_info("rfi-flush: Using mttrig type flush\n");
++ pr_info("rfi-flush: mttrig type flush available\n");
+
+ enabled_flush_types = types;
+
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -153,7 +153,14 @@ void do_rfi_flush_fixups(enum l1d_flush_
+ patch_instruction(dest + 2, instrs[2]);
+ }
+
+- printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
++ printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
++ (types == L1D_FLUSH_NONE) ? "no" :
++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
++ ? "ori+mttrig type"
++ : "ori type" :
++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
++ : "unknown");
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:31 +1000
+Subject: powerpc/rfi-flush: Make it possible to call setup_rfi_flush() again
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-6-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit abf110f3e1cea40f5ea15e85f5d67c39c14568a7 upstream.
+
+For PowerVM migration we want to be able to call setup_rfi_flush()
+again after we've migrated the partition.
+
+To support that we need to check that we're not trying to allocate the
+fallback flush area after memblock has gone away (i.e., boot-time only).
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/setup.h | 2 +-
+ arch/powerpc/kernel/setup_64.c | 6 +++++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -49,7 +49,7 @@ enum l1d_flush_type {
+ L1D_FLUSH_MTTRIG = 0x8,
+ };
+
+-void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
++void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
+
+ #endif /* !__ASSEMBLY__ */
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -836,6 +836,10 @@ static void init_fallback_flush(void)
+ u64 l1d_size, limit;
+ int cpu;
+
++ /* Only allocate the fallback flush area once (at boot time). */
++ if (l1d_flush_fallback_area)
++ return;
++
+ l1d_size = ppc64_caches.l1d.size;
+ limit = min(safe_stack_limit(), ppc64_rma_size);
+
+@@ -853,7 +857,7 @@ static void init_fallback_flush(void)
+ }
+ }
+
+-void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
++void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ {
+ if (types & L1D_FLUSH_FALLBACK) {
+ pr_info("rfi-flush: Using fallback displacement flush\n");
--- /dev/null
+From foo@baz Sun May 27 15:47:18 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 26 May 2018 14:27:30 +1000
+Subject: powerpc/rfi-flush: Move the logic to avoid a redo into the debugfs code
+To: greg@kroah.com
+Cc: stable@vger.kernel.org, tglx@linutronix.de, linuxppc-dev@ozlabs.org
+Message-ID: <20180526042749.5324-5-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 1e2a9fc7496955faacbbed49461d611b704a7505 upstream.
+
+rfi_flush_enable() includes a check to see if we're already
+enabled (or disabled), and in that case does nothing.
+
+But that means calling setup_rfi_flush() a 2nd time doesn't actually
+work, which is a bit confusing.
+
+Move that check into the debugfs code, where it really belongs.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/setup_64.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -822,9 +822,6 @@ static void do_nothing(void *unused)
+
+ void rfi_flush_enable(bool enable)
+ {
+- if (rfi_flush == enable)
+- return;
+-
+ if (enable) {
+ do_rfi_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+@@ -878,13 +875,19 @@ void __init setup_rfi_flush(enum l1d_flu
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
++ bool enable;
++
+ if (val == 1)
+- rfi_flush_enable(true);
++ enable = true;
+ else if (val == 0)
+- rfi_flush_enable(false);
++ enable = false;
+ else
+ return -EINVAL;
+
++ /* Only do anything if we're changing state */
++ if (enable != rfi_flush)
++ rfi_flush_enable(enable);
++
+ return 0;
+ }
+
kasan-free-allocated-shadow-memory-on-mem_cancel_online.patch
kasan-fix-memory-hotplug-during-boot.patch
kernel-sys.c-fix-potential-spectre-v1-issue.patch
+kvm-vmx-expose-ssbd-properly-to-guests.patch
+kvm-s390-vsie-fix-8k-check-for-the-itdba.patch
+kvm-x86-update-cpuid-properly-when-cr4.osxave-or-cr4.pke-is-changed.patch
+kvm-x86-ia32_arch_capabilities-is-always-supported.patch
+x86-kvm-fix-lapic-timer-drift-when-guest-uses-periodic-mode.patch
+powerpc-64s-improve-rfi-l1-d-cache-flush-fallback.patch
+powerpc-pseries-support-firmware-disable-of-rfi-flush.patch
+powerpc-powernv-support-firmware-disable-of-rfi-flush.patch
+powerpc-rfi-flush-move-the-logic-to-avoid-a-redo-into-the-debugfs-code.patch
+powerpc-rfi-flush-make-it-possible-to-call-setup_rfi_flush-again.patch
+powerpc-rfi-flush-always-enable-fallback-flush-on-pseries.patch
+powerpc-rfi-flush-differentiate-enabled-and-patched-flush-types.patch
+powerpc-rfi-flush-call-setup_rfi_flush-after-lpm-migration.patch
+powerpc-pseries-add-new-h_get_cpu_characteristics-flags.patch
+powerpc-add-security-feature-flags-for-spectre-meltdown.patch
+powerpc-pseries-set-or-clear-security-feature-flags.patch
+powerpc-powernv-set-or-clear-security-feature-flags.patch
+powerpc-64s-move-cpu_show_meltdown.patch
+powerpc-64s-enhance-the-information-in-cpu_show_meltdown.patch
+powerpc-powernv-use-the-security-flags-in-pnv_setup_rfi_flush.patch
+powerpc-pseries-use-the-security-flags-in-pseries_setup_rfi_flush.patch
+powerpc-64s-wire-up-cpu_show_spectre_v1.patch
+powerpc-64s-wire-up-cpu_show_spectre_v2.patch
+powerpc-pseries-fix-clearing-of-security-feature-flags.patch
+powerpc-move-default-security-feature-flags.patch
+powerpc-pseries-restore-default-security-feature-flags-on-setup.patch
+powerpc-64s-fix-section-mismatch-warnings-from-setup_rfi_flush.patch
+powerpc-64s-add-support-for-a-store-forwarding-barrier-at-kernel-entry-exit.patch
--- /dev/null
+From d8f2f498d9ed0c5010bc1bbc1146f94c8bf9f8cc Mon Sep 17 00:00:00 2001
+From: David Vrabel <david.vrabel@nutanix.com>
+Date: Fri, 18 May 2018 16:55:46 +0100
+Subject: x86/kvm: fix LAPIC timer drift when guest uses periodic mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Vrabel <david.vrabel@nutanix.com>
+
+commit d8f2f498d9ed0c5010bc1bbc1146f94c8bf9f8cc upstream.
+
+Since 4.10, commit 8003c9ae204e (KVM: LAPIC: add APIC Timer
+periodic/oneshot mode VMX preemption timer support), guests using
+periodic LAPIC timers (such as FreeBSD 8.4) would see their timers
+drift significantly over time.
+
+Differences in the underlying clocks and numerical errors means the
+periods of the two timers (hv and sw) are not the same. This
+difference will accumulate with every expiry resulting in a large
+error between the hv and sw timer.
+
+This means the sw timer may be running slow when compared to the hv
+timer. When the timer is switched from hv to sw, the now active sw
+timer will expire late. The guest VCPU is reentered and it switches to
+using the hv timer. This timer catches up, injecting multiple IRQs
+into the guest (of which the guest only sees one as it does not get to
+run until the hv timer has caught up) and thus the guest's timer rate
+is low (and becomes increasing slower over time as the sw timer lags
+further and further behind).
+
+I believe a similar problem would occur if the hv timer is the slower
+one, but I have not observed this.
+
+Fix this by synchronizing the deadlines for both timers to the same
+time source on every tick. This prevents the errors from accumulating.
+
+Fixes: 8003c9ae204e21204e49816c5ea629357e283b06
+Cc: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: David Vrabel <david.vrabel@nutanix.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Wanpeng Li <wanpengli@tencent.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/lapic.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1467,11 +1467,23 @@ static bool set_target_expiration(struct
+
+ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
+ {
+- apic->lapic_timer.tscdeadline +=
+- nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
++ ktime_t now = ktime_get();
++ u64 tscl = rdtsc();
++ ktime_t delta;
++
++ /*
++ * Synchronize both deadlines to the same time source or
++ * differences in the periods (caused by differences in the
++ * underlying clocks or numerical approximation errors) will
++ * cause the two to drift apart over time as the errors
++ * accumulate.
++ */
+ apic->lapic_timer.target_expiration =
+ ktime_add_ns(apic->lapic_timer.target_expiration,
+ apic->lapic_timer.period);
++ delta = ktime_sub(apic->lapic_timer.target_expiration, now);
++ apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
++ nsec_to_cycles(apic->vcpu, delta);
+ }
+
+ static void start_sw_period(struct kvm_lapic *apic)