--- /dev/null
+From a4023f682739439b434165b54af7cb3676a4766e Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 8 Nov 2016 13:56:20 +0000
+Subject: arm64: Add hypervisor safe helper for checking constant capabilities
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit a4023f682739439b434165b54af7cb3676a4766e upstream.
+
+The hypervisor may not have full access to the kernel data structures
+and hence cannot safely use cpus_have_cap() helper for checking the
+system capability. Add a safe helper for hypervisors to check a constant
+system capability, which *doesn't* fall back to checking the bitmap
+maintained by the kernel. With this, make the cpus_have_cap() only
+check the bitmask and force constant cap checks to use the new API
+for quicker checks.
+
+Cc: Robert Ritcher <rritcher@cavium.com>
+Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[4.9: restore cpus_have_const_cap() to previously-backported code]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h | 19 ++++++++++++-------
+ arch/arm64/include/asm/kvm_host.h | 2 +-
+ arch/arm64/include/asm/kvm_mmu.h | 2 +-
+ arch/arm64/include/asm/mmu.h | 2 +-
+ arch/arm64/kernel/cpufeature.c | 5 +++--
+ arch/arm64/kernel/process.c | 2 +-
+ drivers/irqchip/irq-gic-v3.c | 13 +------------
+ 7 files changed, 20 insertions(+), 25 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -9,8 +9,6 @@
+ #ifndef __ASM_CPUFEATURE_H
+ #define __ASM_CPUFEATURE_H
+
+-#include <linux/jump_label.h>
+-
+ #include <asm/cpucaps.h>
+ #include <asm/hwcap.h>
+ #include <asm/sysreg.h>
+@@ -27,6 +25,8 @@
+
+ #ifndef __ASSEMBLY__
+
++#include <linux/bug.h>
++#include <linux/jump_label.h>
+ #include <linux/kernel.h>
+
+ /* CPU feature register tracking */
+@@ -104,14 +104,19 @@ static inline bool cpu_have_feature(unsi
+ return elf_hwcap & (1UL << num);
+ }
+
++/* System capability check for constant caps */
++static inline bool cpus_have_const_cap(int num)
++{
++ if (num >= ARM64_NCAPS)
++ return false;
++ return static_branch_unlikely(&cpu_hwcap_keys[num]);
++}
++
+ static inline bool cpus_have_cap(unsigned int num)
+ {
+ if (num >= ARM64_NCAPS)
+ return false;
+- if (__builtin_constant_p(num))
+- return static_branch_unlikely(&cpu_hwcap_keys[num]);
+- else
+- return test_bit(num, cpu_hwcaps);
++ return test_bit(num, cpu_hwcaps);
+ }
+
+ static inline void cpus_set_cap(unsigned int num)
+@@ -200,7 +205,7 @@ static inline bool cpu_supports_mixed_en
+
+ static inline bool system_supports_32bit_el0(void)
+ {
+- return cpus_have_cap(ARM64_HAS_32BIT_EL0);
++ return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+ }
+
+ static inline bool system_supports_mixed_endian_el0(void)
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -398,7 +398,7 @@ static inline void __cpu_init_stage2(voi
+
+ static inline bool kvm_arm_harden_branch_predictor(void)
+ {
+- return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
++ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ }
+
+ #endif /* __ARM64_KVM_HOST_H__ */
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -341,7 +341,7 @@ static inline void *kvm_get_hyp_vector(v
+ vect = __bp_harden_hyp_vecs_start +
+ data->hyp_vectors_slot * SZ_2K;
+
+- if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
++ if (!cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ vect = lm_alias(vect);
+ }
+
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -37,7 +37,7 @@ typedef struct {
+ static inline bool arm64_kernel_unmapped_at_el0(void)
+ {
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+- cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
++ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+ }
+
+ typedef void (*bp_hardening_cb_t)(void);
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mo
+ #endif
+
+ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
++EXPORT_SYMBOL(cpu_hwcaps);
+
+ DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
+ EXPORT_SYMBOL(cpu_hwcap_keys);
+@@ -762,7 +763,7 @@ static bool unmap_kernel_at_el0(const st
+ * ThunderX leads to apparent I-cache corruption of kernel text, which
+ * ends as well as you might imagine. Don't even try.
+ */
+- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ str = "ARM64_WORKAROUND_CAVIUM_27456";
+ __kpti_forced = -1;
+ }
+@@ -1203,5 +1204,5 @@ void __init setup_cpu_features(void)
+ static bool __maybe_unused
+ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
+ {
+- return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
++ return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
+ }
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -291,7 +291,7 @@ int copy_thread(unsigned long clone_flag
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->pstate = PSR_MODE_EL1h;
+ if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+- cpus_have_cap(ARM64_HAS_UAO))
++ cpus_have_const_cap(ARM64_HAS_UAO))
+ childregs->pstate |= PSR_UAO_BIT;
+ p->thread.cpu_context.x19 = stack_start;
+ p->thread.cpu_context.x20 = stk_sz;
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void
+ }
+
+ #ifdef CONFIG_ARM64
+-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
+
+ static u64 __maybe_unused gic_read_iar(void)
+ {
+- if (static_branch_unlikely(&is_cavium_thunderx))
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
+ return gic_read_iar_cavium_thunderx();
+ else
+ return gic_read_iar_common();
+@@ -908,14 +907,6 @@ static const struct irq_domain_ops parti
+ .select = gic_irq_domain_select,
+ };
+
+-static void gicv3_enable_quirks(void)
+-{
+-#ifdef CONFIG_ARM64
+- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
+- static_branch_enable(&is_cavium_thunderx);
+-#endif
+-}
+-
+ static int __init gic_init_bases(void __iomem *dist_base,
+ struct redist_region *rdist_regs,
+ u32 nr_redist_regions,
+@@ -938,8 +929,6 @@ static int __init gic_init_bases(void __
+ gic_data.nr_redist_regions = nr_redist_regions;
+ gic_data.redist_stride = redist_stride;
+
+- gicv3_enable_quirks();
+-
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
--- /dev/null
+From 63a1e1c95e60e798fa09ab3c536fb555aa5bbf2b Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 16 May 2017 15:18:05 +0100
+Subject: arm64/cpufeature: don't use mutex in bringup path
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 63a1e1c95e60e798fa09ab3c536fb555aa5bbf2b upstream.
+
+Currently, cpus_set_cap() calls static_branch_enable_cpuslocked(), which
+must take the jump_label mutex.
+
+We call cpus_set_cap() in the secondary bringup path, from the idle
+thread where interrupts are disabled. Taking a mutex in this path "is a
+NONO" regardless of whether it's contended, and something we must avoid.
+We didn't spot this until recently, as ___might_sleep() won't warn for
+this case until all CPUs have been brought up.
+
+This patch avoids taking the mutex in the secondary bringup path. The
+poking of static keys is deferred until enable_cpu_capabilities(), which
+runs in a suitable context on the boot CPU. To account for the static
+keys being set later, cpus_have_const_cap() is updated to use another
+static key to check whether the const cap keys have been initialised,
+falling back to the caps bitmap until this is the case.
+
+This means that users of cpus_have_const_cap() gain should only gain a
+single additional NOP in the fast path once the const caps are
+initialised, but should always see the current cap value.
+
+The hyp code should never dereference the caps array, since the caps are
+initialized before we run the module initcall to initialise hyp. A check
+is added to the hyp init code to document this requirement.
+
+This change will sidestep a number of issues when the upcoming hotplug
+locking rework is merged.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Marc Zyniger <marc.zyngier@arm.com>
+Reviewed-by: Suzuki Poulose <suzuki.poulose@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Sewior <bigeasy@linutronix.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[4.9: this avoids an IPI before GICv3 is up, preventing a boot time crash]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h | 12 ++++++++++--
+ arch/arm64/include/asm/kvm_host.h | 8 ++++++--
+ arch/arm64/kernel/cpufeature.c | 23 +++++++++++++++++++++--
+ 3 files changed, 37 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -96,6 +96,7 @@ struct arm64_cpu_capabilities {
+
+ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+ extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
++extern struct static_key_false arm64_const_caps_ready;
+
+ bool this_cpu_has_cap(unsigned int cap);
+
+@@ -105,7 +106,7 @@ static inline bool cpu_have_feature(unsi
+ }
+
+ /* System capability check for constant caps */
+-static inline bool cpus_have_const_cap(int num)
++static inline bool __cpus_have_const_cap(int num)
+ {
+ if (num >= ARM64_NCAPS)
+ return false;
+@@ -119,6 +120,14 @@ static inline bool cpus_have_cap(unsigne
+ return test_bit(num, cpu_hwcaps);
+ }
+
++static inline bool cpus_have_const_cap(int num)
++{
++ if (static_branch_likely(&arm64_const_caps_ready))
++ return __cpus_have_const_cap(num);
++ else
++ return cpus_have_cap(num);
++}
++
+ static inline void cpus_set_cap(unsigned int num)
+ {
+ if (num >= ARM64_NCAPS) {
+@@ -126,7 +135,6 @@ static inline void cpus_set_cap(unsigned
+ num, ARM64_NCAPS);
+ } else {
+ __set_bit(num, cpu_hwcaps);
+- static_branch_enable(&cpu_hwcap_keys[num]);
+ }
+ }
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -24,6 +24,7 @@
+
+ #include <linux/types.h>
+ #include <linux/kvm_types.h>
++#include <asm/cpufeature.h>
+ #include <asm/kvm.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmio.h>
+@@ -358,9 +359,12 @@ static inline void __cpu_init_hyp_mode(p
+ unsigned long vector_ptr)
+ {
+ /*
+- * Call initialization code, and switch to the full blown
+- * HYP code.
++ * Call initialization code, and switch to the full blown HYP code.
++ * If the cpucaps haven't been finalized yet, something has gone very
++ * wrong, and hyp will crash and burn when it uses any
++ * cpus_have_const_cap() wrapper.
+ */
++ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
+ }
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1052,8 +1052,16 @@ void update_cpu_capabilities(const struc
+ */
+ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ {
+- for (; caps->matches; caps++)
+- if (caps->enable && cpus_have_cap(caps->capability))
++ for (; caps->matches; caps++) {
++ unsigned int num = caps->capability;
++
++ if (!cpus_have_cap(num))
++ continue;
++
++ /* Ensure cpus_have_const_cap(num) works */
++ static_branch_enable(&cpu_hwcap_keys[num]);
++
++ if (caps->enable) {
+ /*
+ * Use stop_machine() as it schedules the work allowing
+ * us to modify PSTATE, instead of on_each_cpu() which
+@@ -1061,6 +1069,8 @@ void __init enable_cpu_capabilities(cons
+ * we return.
+ */
+ stop_machine(caps->enable, (void *)caps, cpu_online_mask);
++ }
++ }
+ }
+
+ /*
+@@ -1164,6 +1174,14 @@ static void __init setup_feature_capabil
+ enable_cpu_capabilities(arm64_features);
+ }
+
++DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
++EXPORT_SYMBOL(arm64_const_caps_ready);
++
++static void __init mark_const_caps_ready(void)
++{
++ static_branch_enable(&arm64_const_caps_ready);
++}
++
+ extern const struct arm64_cpu_capabilities arm64_errata[];
+
+ bool this_cpu_has_cap(unsigned int cap)
+@@ -1180,6 +1198,7 @@ void __init setup_cpu_features(void)
+ /* Set the CPU feature capabilies */
+ setup_feature_capabilities();
+ enable_errata_workarounds();
++ mark_const_caps_ready();
+ setup_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (system_supports_32bit_el0())
--- /dev/null
+From 271ef65b5882425d500e969e875c98e47a6b0c86 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Thu, 19 Oct 2017 14:33:52 +0200
+Subject: ASoC: Intel: sst: remove redundant variable dma_dev_name
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 271ef65b5882425d500e969e875c98e47a6b0c86 upstream.
+
+The pointer dma_dev_name is assigned but never read, it is redundant
+and can therefore be removed.
+
+Cleans up clang warning:
+sound/soc/intel/common/sst-firmware.c:288:3: warning: Value stored to
+'dma_dev_name' is never read
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Acked-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/intel/common/sst-firmware.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/sound/soc/intel/common/sst-firmware.c
++++ b/sound/soc/intel/common/sst-firmware.c
+@@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst)
+ struct sst_pdata *sst_pdata = sst->pdata;
+ struct sst_dma *dma;
+ struct resource mem;
+- const char *dma_dev_name;
+ int ret = 0;
+
+ if (sst->pdata->resindex_dma_base == -1)
+@@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst)
+ * is attached to the ADSP IP. */
+ switch (sst->pdata->dma_engine) {
+ case SST_DMA_TYPE_DW:
+- dma_dev_name = "dw_dmac";
+ break;
+ default:
+ dev_err(sst->dev, "error: invalid DMA engine %d\n",
--- /dev/null
+From 814596495dd2b9d4aab92d8f89cf19060d25d2ea Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 14 May 2018 20:09:24 -0700
+Subject: cfg80211: further limit wiphy names to 64 bytes
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 814596495dd2b9d4aab92d8f89cf19060d25d2ea upstream.
+
+wiphy names were recently limited to 128 bytes by commit a7cfebcb7594
+("cfg80211: limit wiphy names to 128 bytes"). As it turns out though,
+this isn't sufficient because dev_vprintk_emit() needs the syslog header
+string "SUBSYSTEM=ieee80211\0DEVICE=+ieee80211:$devname" to fit into 128
+bytes. This triggered the "device/subsystem name too long" WARN when
+the device name was >= 90 bytes. As before, this was reproduced by
+syzbot by sending an HWSIM_CMD_NEW_RADIO command to the MAC80211_HWSIM
+generic netlink family.
+
+Fix it by further limiting wiphy names to 64 bytes.
+
+Reported-by: syzbot+e64565577af34b3768dc@syzkaller.appspotmail.com
+Fixes: a7cfebcb7594 ("cfg80211: limit wiphy names to 128 bytes")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/uapi/linux/nl80211.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -2379,7 +2379,7 @@ enum nl80211_attrs {
+ #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+ #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+
+-#define NL80211_WIPHY_NAME_MAXLEN 128
++#define NL80211_WIPHY_NAME_MAXLEN 64
+
+ #define NL80211_MAX_SUPP_RATES 32
+ #define NL80211_MAX_SUPP_HT_RATES 77
--- /dev/null
+From 531beb067c6185aceabfdee0965234c6a8fd133b Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Fri, 15 Sep 2017 00:05:16 +0100
+Subject: dma-buf: remove redundant initialization of sg_table
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 531beb067c6185aceabfdee0965234c6a8fd133b upstream.
+
+sg_table is being initialized and is never read before it is updated
+again later on, hence making the initialization redundant. Remove
+the initialization.
+
+Detected by clang scan-build:
+"warning: Value stored to 'sg_table' during its initialization is
+never read"
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20170914230516.6056-1-colin.king@canonical.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma-buf/dma-buf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -551,7 +551,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
+ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+ {
+- struct sg_table *sg_table = ERR_PTR(-EINVAL);
++ struct sg_table *sg_table;
+
+ might_sleep();
+
--- /dev/null
+From srn@prgmr.com Sat Jun 2 15:31:17 2018
+From: Sarah Newman <srn@prgmr.com>
+Date: Wed, 30 May 2018 18:04:05 -0700
+Subject: net/mlx4_en: fix potential use-after-free with dma_unmap_page
+To: stable@vger.kernel.org
+Cc: tariqt@mellanox.com, davem@davemloft.net, Sarah Newman <srn@prgmr.com>
+Message-ID: <1527728645-6216-1-git-send-email-srn@prgmr.com>
+
+From: Sarah Newman <srn@prgmr.com>
+
+[ Not relevant upstream, therefore no upstream commit. ]
+
+To fix, unmap the page as soon as possible.
+
+When swiotlb is in use, calling dma_unmap_page means that
+the original page mapped with dma_map_page must still be valid,
+as swiotlb will copy data from its internal cache back to the
+originally requested DMA location.
+
+When GRO is enabled, before this patch all references to the
+original frag may be put and the page freed before dma_unmap_page
+in mlx4_en_free_frag is called.
+
+It is possible there is a path where the use-after-free occurs
+even with GRO disabled, but this has not been observed so far.
+
+The bug can be trivially detected by doing the following:
+
+* Compile the kernel with DEBUG_PAGEALLOC
+* Run the kernel as a Xen Dom0
+* Leave GRO enabled on the interface
+* Run a 10 second or more test with iperf over the interface.
+
+This bug was likely introduced in
+commit 4cce66cdd14a ("mlx4_en: map entire pages to increase throughput"),
+first part of u3.6.
+
+It was incidentally fixed in
+commit 34db548bfb95 ("mlx4: add page recycling in receive path"),
+first part of v4.12.
+
+This version applies to the v4.9 series.
+
+Signed-off-by: Sarah Newman <srn@prgmr.com>
+Tested-by: Sarah Newman <srn@prgmr.com>
+Cc: Tariq Toukan <tariqt@mellanox.com>
+Cc: Yishai Hadas <yishaih@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 32 ++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -142,16 +142,17 @@ static void mlx4_en_free_frag(struct mlx
+ struct mlx4_en_rx_alloc *frags,
+ int i)
+ {
+- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+- u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+-
+-
+- if (next_frag_end > frags[i].page_size)
+- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+- frag_info->dma_dir);
++ if (frags[i].page) {
++ const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
++ u32 next_frag_end = frags[i].page_offset +
++ 2 * frag_info->frag_stride;
+
+- if (frags[i].page)
++ if (next_frag_end > frags[i].page_size) {
++ dma_unmap_page(priv->ddev, frags[i].dma,
++ frags[i].page_size, frag_info->dma_dir);
++ }
+ put_page(frags[i].page);
++ }
+ }
+
+ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+@@ -586,21 +587,28 @@ static int mlx4_en_complete_rx_desc(stru
+ int length)
+ {
+ struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
+- struct mlx4_en_frag_info *frag_info;
+ int nr;
+ dma_addr_t dma;
+
+ /* Collect used fragments while replacing them in the HW descriptors */
+ for (nr = 0; nr < priv->num_frags; nr++) {
+- frag_info = &priv->frag_info[nr];
++ struct mlx4_en_frag_info *frag_info = &priv->frag_info[nr];
++ u32 next_frag_end = frags[nr].page_offset +
++ 2 * frag_info->frag_stride;
++
+ if (length <= frag_info->frag_prefix_size)
+ break;
+ if (unlikely(!frags[nr].page))
+ goto fail;
+
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+- dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
+- DMA_FROM_DEVICE);
++ if (next_frag_end > frags[nr].page_size)
++ dma_unmap_page(priv->ddev, frags[nr].dma,
++ frags[nr].page_size, frag_info->dma_dir);
++ else
++ dma_sync_single_for_cpu(priv->ddev, dma,
++ frag_info->frag_size,
++ DMA_FROM_DEVICE);
+
+ /* Save page reference in skb */
+ __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
--- /dev/null
+From d3b56c566d4ba8cae688baf3cca94425d57ea783 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 31 Oct 2017 10:27:47 +0000
+Subject: platform/chrome: cros_ec_lpc: remove redundant pointer request
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit d3b56c566d4ba8cae688baf3cca94425d57ea783 upstream.
+
+Pointer request is being assigned but never used, so remove it. Cleans
+up the clang warning:
+
+drivers/platform/chrome/cros_ec_lpc.c:68:2: warning: Value stored to
+'request' is never read
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Benson Leung <bleung@chromium.org>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/chrome/cros_ec_lpc.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -49,7 +49,6 @@ static int ec_response_timed_out(void)
+ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
+ struct cros_ec_command *msg)
+ {
+- struct ec_host_request *request;
+ struct ec_host_response response;
+ u8 sum = 0;
+ int i;
+@@ -62,8 +61,6 @@ static int cros_ec_pkt_xfer_lpc(struct c
+ for (i = 0; i < ret; i++)
+ outb(ec->dout[i], EC_LPC_ADDR_HOST_PACKET + i);
+
+- request = (struct ec_host_request *)ec->dout;
+-
+ /* Here we go */
+ outb(EC_COMMAND_PROTOCOL_3, EC_LPC_ADDR_HOST_CMD);
+
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:08 +1000
+Subject: powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-24-mpe@ellerman.id.au>
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit a048a07d7f4535baa4cbad6bc024f175317ab938 upstream.
+
+On some CPUs we can prevent a vulnerability related to store-to-load
+forwarding by preventing store forwarding between privilege domains,
+by inserting a barrier in kernel entry and exit paths.
+
+This is known to be the case on at least Power7, Power8 and Power9
+powerpc CPUs.
+
+Barriers must be inserted generally before the first load after moving
+to a higher privilege, and after the last store before moving to a
+lower privilege, HV and PR privilege transitions must be protected.
+
+Barriers are added as patch sections, with all kernel/hypervisor entry
+points patched, and the exit points to lower privilge levels patched
+similarly to the RFI flush patching.
+
+Firmware advertisement is not implemented yet, so CPU flush types
+are hard coded.
+
+Thanks to Michal Suchánek for bug fixes and review.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michal Suchánek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/exception-64s.h | 29 +++++
+ arch/powerpc/include/asm/feature-fixups.h | 19 +++
+ arch/powerpc/include/asm/security_features.h | 11 +
+ arch/powerpc/kernel/exceptions-64s.S | 16 ++
+ arch/powerpc/kernel/security.c | 149 +++++++++++++++++++++++++++
+ arch/powerpc/kernel/vmlinux.lds.S | 14 ++
+ arch/powerpc/lib/feature-fixups.c | 115 ++++++++++++++++++++
+ arch/powerpc/platforms/powernv/setup.c | 1
+ arch/powerpc/platforms/pseries/setup.c | 1
+ 9 files changed, 354 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -51,6 +51,27 @@
+ #define EX_PPR 88 /* SMT thread status register (priority) */
+ #define EX_CTR 96
+
++#define STF_ENTRY_BARRIER_SLOT \
++ STF_ENTRY_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop
++
++#define STF_EXIT_BARRIER_SLOT \
++ STF_EXIT_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop
++
++/*
++ * r10 must be free to use, r13 must be paca
++ */
++#define INTERRUPT_TO_KERNEL \
++ STF_ENTRY_BARRIER_SLOT
++
+ /*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+@@ -67,16 +88,19 @@
+ rfid
+
+ #define RFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+@@ -85,21 +109,25 @@
+ hrfid
+
+ #define HRFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_UNKNOWN \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+@@ -225,6 +253,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ #define __EXCEPTION_PROLOG_1(area, extra, vec) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
++ INTERRUPT_TO_KERNEL; \
+ SAVE_CTR(r10, area); \
+ mfcr r9; \
+ extra(vec); \
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -189,6 +189,22 @@ void apply_feature_fixups(void);
+ void setup_feature_keys(void);
+ #endif
+
++#define STF_ENTRY_BARRIER_FIXUP_SECTION \
++953: \
++ .pushsection __stf_entry_barrier_fixup,"a"; \
++ .align 2; \
++954: \
++ FTR_ENTRY_OFFSET 953b-954b; \
++ .popsection;
++
++#define STF_EXIT_BARRIER_FIXUP_SECTION \
++955: \
++ .pushsection __stf_exit_barrier_fixup,"a"; \
++ .align 2; \
++956: \
++ FTR_ENTRY_OFFSET 955b-956b; \
++ .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION \
+ 951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+@@ -200,6 +216,9 @@ void setup_feature_keys(void);
+
+ #ifndef __ASSEMBLY__
+
++extern long stf_barrier_fallback;
++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+
+ #endif
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -12,6 +12,17 @@
+ extern unsigned long powerpc_security_features;
+ extern bool rfi_flush;
+
++/* These are bit flags */
++enum stf_barrier_type {
++ STF_BARRIER_NONE = 0x1,
++ STF_BARRIER_FALLBACK = 0x2,
++ STF_BARRIER_EIEIO = 0x4,
++ STF_BARRIER_SYNC_ORI = 0x8,
++};
++
++void setup_stf_barrier(void);
++void do_stf_barrier_fixups(enum stf_barrier_type types);
++
+ static inline void security_ftr_set(unsigned long feature)
+ {
+ powerpc_security_features |= feature;
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -846,7 +846,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+
+-EXC_REAL_MASKABLE(decrementer, 0x900, 0x980)
++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x980)
+ EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
+ TRAMP_KVM(PACA_EXGEN, 0x900)
+ EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+@@ -884,6 +884,7 @@ BEGIN_FTR_SECTION \
+ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
+ mr r9,r13 ; \
+ GET_PACA(r13) ; \
++ INTERRUPT_TO_KERNEL ; \
+ mfspr r11,SPRN_SRR0 ; \
+ 0:
+
+@@ -1353,6 +1354,19 @@ masked_##_H##interrupt: \
+ ##_H##RFI_TO_KERNEL; \
+ b .
+
++TRAMP_REAL_BEGIN(stf_barrier_fallback)
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ sync
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ori 31,31,0
++ .rept 14
++ b 1f
++1:
++ .endr
++ blr
++
+ /*
+ * Real mode exceptions actually use this too, but alternate
+ * instruction code patches (which end up in the common .text area)
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -5,6 +5,7 @@
+ // Copyright 2018, Michael Ellerman, IBM Corporation.
+
+ #include <linux/kernel.h>
++#include <linux/debugfs.h>
+ #include <linux/device.h>
+ #include <linux/seq_buf.h>
+
+@@ -86,3 +87,151 @@ ssize_t cpu_show_spectre_v2(struct devic
+
+ return s.len;
+ }
++
++/*
++ * Store-forwarding barrier support.
++ */
++
++static enum stf_barrier_type stf_enabled_flush_types;
++static bool no_stf_barrier;
++bool stf_barrier;
++
++static int __init handle_no_stf_barrier(char *p)
++{
++ pr_info("stf-barrier: disabled on command line.");
++ no_stf_barrier = true;
++ return 0;
++}
++
++early_param("no_stf_barrier", handle_no_stf_barrier);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_ssbd(char *p)
++{
++ if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
++ /* Until firmware tells us, we have the barrier with auto */
++ return 0;
++ } else if (strncmp(p, "off", 3) == 0) {
++ handle_no_stf_barrier(NULL);
++ return 0;
++ } else
++ return 1;
++
++ return 0;
++}
++early_param("spec_store_bypass_disable", handle_ssbd);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_no_ssbd(char *p)
++{
++ handle_no_stf_barrier(NULL);
++ return 0;
++}
++early_param("nospec_store_bypass_disable", handle_no_ssbd);
++
++static void stf_barrier_enable(bool enable)
++{
++ if (enable)
++ do_stf_barrier_fixups(stf_enabled_flush_types);
++ else
++ do_stf_barrier_fixups(STF_BARRIER_NONE);
++
++ stf_barrier = enable;
++}
++
++void setup_stf_barrier(void)
++{
++ enum stf_barrier_type type;
++ bool enable, hv;
++
++ hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++ /* Default to fallback in case fw-features are not available */
++ if (cpu_has_feature(CPU_FTR_ARCH_300))
++ type = STF_BARRIER_EIEIO;
++ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
++ type = STF_BARRIER_SYNC_ORI;
++ else if (cpu_has_feature(CPU_FTR_ARCH_206))
++ type = STF_BARRIER_FALLBACK;
++ else
++ type = STF_BARRIER_NONE;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
++
++ if (type == STF_BARRIER_FALLBACK) {
++ pr_info("stf-barrier: fallback barrier available\n");
++ } else if (type == STF_BARRIER_SYNC_ORI) {
++ pr_info("stf-barrier: hwsync barrier available\n");
++ } else if (type == STF_BARRIER_EIEIO) {
++ pr_info("stf-barrier: eieio barrier available\n");
++ }
++
++ stf_enabled_flush_types = type;
++
++ if (!no_stf_barrier)
++ stf_barrier_enable(enable);
++}
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
++ const char *type;
++ switch (stf_enabled_flush_types) {
++ case STF_BARRIER_EIEIO:
++ type = "eieio";
++ break;
++ case STF_BARRIER_SYNC_ORI:
++ type = "hwsync";
++ break;
++ case STF_BARRIER_FALLBACK:
++ type = "fallback";
++ break;
++ default:
++ type = "unknown";
++ }
++ return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int stf_barrier_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ /* Only do anything if we're changing state */
++ if (enable != stf_barrier)
++ stf_barrier_enable(enable);
++
++ return 0;
++}
++
++static int stf_barrier_get(void *data, u64 *val)
++{
++ *val = stf_barrier ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
++
++static __init int stf_barrier_debugfs_init(void)
++{
++ debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
++ return 0;
++}
++device_initcall(stf_barrier_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -134,6 +134,20 @@ SECTIONS
+
+ #ifdef CONFIG_PPC64
+ . = ALIGN(8);
++ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_entry_barrier_fixup = .;
++ *(__stf_entry_barrier_fixup)
++ __stop___stf_entry_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
++ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_exit_barrier_fixup = .;
++ *(__stf_exit_barrier_fixup)
++ __stop___stf_exit_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
+ __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+ __start___rfi_flush_fixup = .;
+ *(__rfi_flush_fixup)
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -22,6 +22,7 @@
+ #include <asm/page.h>
+ #include <asm/sections.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+ #include <asm/firmware.h>
+ #include <asm/setup.h>
+
+@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long val
+ }
+
+ #ifdef CONFIG_PPC_BOOK3S_64
++void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[3], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_entry_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK) {
++ instrs[i++] = 0x7d4802a6; /* mflr r10 */
++ instrs[i++] = 0x60000000; /* branch patched below */
++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ } else if (types & STF_BARRIER_SYNC_ORI) {
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++
++ if (types & STF_BARRIER_FALLBACK)
++ patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
++ BRANCH_SET_LINK);
++ else
++ patch_instruction(dest + 1, instrs[1]);
++
++ patch_instruction(dest + 2, instrs[2]);
++ }
++
++ printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[6], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_exit_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++ instrs[3] = 0x60000000; /* nop */
++ instrs[4] = 0x60000000; /* nop */
++ instrs[5] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
++ instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
++ } else {
++ instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
++ instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
++ }
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
++ } else {
++ instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
++ }
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++ patch_instruction(dest + 1, instrs[1]);
++ patch_instruction(dest + 2, instrs[2]);
++ patch_instruction(dest + 3, instrs[3]);
++ patch_instruction(dest + 4, instrs[4]);
++ patch_instruction(dest + 5, instrs[5]);
++ }
++ printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++
++void do_stf_barrier_fixups(enum stf_barrier_type types)
++{
++ do_stf_entry_barrier_fixups(types);
++ do_stf_exit_barrier_fixups(types);
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ unsigned int instrs[3], *dest;
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -130,6 +130,7 @@ static void __init pnv_setup_arch(void)
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+
+ pnv_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* Initialize SMP */
+ pnv_smp_init();
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -545,6 +545,7 @@ static void __init pSeries_setup_arch(vo
+ fwnmi_init();
+
+ pseries_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* By default, only probe PCI (can be overridden by rtas_pci) */
+ pci_add_flags(PCI_PROBE_ONLY);
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:59 +1000
+Subject: powerpc/64s: Enhance the information in cpu_show_meltdown()
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-15-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit ff348355e9c72493947be337bb4fae4fc1a41eba upstream.
+
+Now that we have the security feature flags we can make the
+information displayed in the "meltdown" file more informative.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/security.c | 30 ++++++++++++++++++++++++++++--
+ 1 file changed, 28 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -6,6 +6,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/device.h>
++#include <linux/seq_buf.h>
+
+ #include <asm/security_features.h>
+
+@@ -19,8 +20,33 @@ unsigned long powerpc_security_features
+
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (rfi_flush)
+- return sprintf(buf, "Mitigation: RFI Flush\n");
++ bool thread_priv;
++
++ thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (rfi_flush || thread_priv) {
++ struct seq_buf s;
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (rfi_flush)
++ seq_buf_printf(&s, "RFI Flush");
++
++ if (rfi_flush && thread_priv)
++ seq_buf_printf(&s, ", ");
++
++ if (thread_priv)
++ seq_buf_printf(&s, "L1D private per thread");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "Vulnerable\n");
+ }
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:07 +1000
+Subject: powerpc/64s: Fix section mismatch warnings from setup_rfi_flush()
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-23-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 501a78cbc17c329fabf8e9750a1e9ab810c88a0e upstream.
+
+The recent LPM changes to setup_rfi_flush() are causing some section
+mismatch warnings because we removed the __init annotation on
+setup_rfi_flush():
+
+ The function setup_rfi_flush() references
+ the function __init ppc64_bolted_size().
+ the function __init memblock_alloc_base().
+
+The references are actually in init_fallback_flush(), but that is
+inlined into setup_rfi_flush().
+
+These references are safe because:
+ - only pseries calls setup_rfi_flush() at runtime
+ - pseries always passes L1D_FLUSH_FALLBACK at boot
+ - so the fallback flush area will always be allocated
+ - so the check in init_fallback_flush() will always return early:
+ /* Only allocate the fallback flush area once (at boot time). */
+ if (l1d_flush_fallback_area)
+ return;
+
+ - and therefore we won't actually call the freed init routines.
+
+We should rework the code to make it safer by default rather than
+relying on the above, but for now as a quick-fix just add a __ref
+annotation to squash the warning.
+
+Fixes: abf110f3e1ce ("powerpc/rfi-flush: Make it possible to call setup_rfi_flush() again")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/setup_64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -726,7 +726,7 @@ void rfi_flush_enable(bool enable)
+ rfi_flush = enable;
+ }
+
+-static void init_fallback_flush(void)
++static void __ref init_fallback_flush(void)
+ {
+ u64 l1d_size, limit;
+ int cpu;
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:58 +1000
+Subject: powerpc/64s: Move cpu_show_meltdown()
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-14-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 8ad33041563a10b34988800c682ada14b2612533 upstream.
+
+This landed in setup_64.c for no good reason other than we had nowhere
+else to put it. Now that we have a security-related file, that is a
+better place for it so move it.
+
+[mpe: Add extern for rfi_flush to fix bisection break]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/security_features.h | 1 +
+ arch/powerpc/kernel/security.c | 11 +++++++++++
+ arch/powerpc/kernel/setup_64.c | 8 --------
+ 3 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -10,6 +10,7 @@
+
+
+ extern unsigned long powerpc_security_features;
++extern bool rfi_flush;
+
+ static inline void security_ftr_set(unsigned long feature)
+ {
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -5,6 +5,8 @@
+ // Copyright 2018, Michael Ellerman, IBM Corporation.
+
+ #include <linux/kernel.h>
++#include <linux/device.h>
++
+ #include <asm/security_features.h>
+
+
+@@ -13,3 +15,12 @@ unsigned long powerpc_security_features
+ SEC_FTR_L1D_FLUSH_PR | \
+ SEC_FTR_BNDS_CHK_SPEC_BAR | \
+ SEC_FTR_FAVOUR_SECURITY;
++
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (rfi_flush)
++ return sprintf(buf, "Mitigation: RFI Flush\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -805,12 +805,4 @@ static __init int rfi_flush_debugfs_init
+ }
+ device_initcall(rfi_flush_debugfs_init);
+ #endif
+-
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- if (rfi_flush)
+- return sprintf(buf, "Mitigation: RFI Flush\n");
+-
+- return sprintf(buf, "Vulnerable\n");
+-}
+ #endif /* CONFIG_PPC_BOOK3S_64 */
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:02 +1000
+Subject: powerpc/64s: Wire up cpu_show_spectre_v1()
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-18-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 56986016cb8cd9050e601831fe89f332b4e3c46e upstream.
+
+Add a definition for cpu_show_spectre_v1() to override the generic
+version. Currently this just prints "Not affected" or "Vulnerable"
+based on the firmware flag.
+
+Although the kernel does have array_index_nospec() in a few places, we
+haven't yet audited all the powerpc code to see where it's necessary,
+so for now we don't list that as a mitigation.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/security.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -50,3 +50,11 @@ ssize_t cpu_show_meltdown(struct device
+
+ return sprintf(buf, "Vulnerable\n");
+ }
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:03 +1000
+Subject: powerpc/64s: Wire up cpu_show_spectre_v2()
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-19-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit d6fbe1c55c55c6937cbea3531af7da84ab7473c3 upstream.
+
+Add a definition for cpu_show_spectre_v2() to override the generic
+version. This has several permuations, though in practice some may not
+occur we cater for any combination.
+
+The most verbose is:
+
+ Mitigation: Indirect branch serialisation (kernel only), Indirect
+ branch cache disabled, ori31 speculation barrier enabled
+
+We don't treat the ori31 speculation barrier as a mitigation on its
+own, because it has to be *used* by code in order to be a mitigation
+and we don't know if userspace is doing that. So if that's all we see
+we say:
+
+ Vulnerable, ori31 speculation barrier enabled
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/security.c | 33 +++++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -58,3 +58,36 @@ ssize_t cpu_show_spectre_v1(struct devic
+
+ return sprintf(buf, "Vulnerable\n");
+ }
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool bcs, ccd, ori;
++ struct seq_buf s;
++
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
++ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
++ ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (bcs || ccd) {
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (bcs)
++ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
++
++ if (bcs && ccd)
++ seq_buf_printf(&s, ", ");
++
++ if (ccd)
++ seq_buf_printf(&s, "Indirect branch cache disabled");
++ } else
++ seq_buf_printf(&s, "Vulnerable");
++
++ if (ori)
++ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++}
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:55 +1000
+Subject: powerpc: Add security feature flags for Spectre/Meltdown
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-11-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 9a868f634349e62922c226834aa23e3d1329ae7f upstream.
+
+This commit adds security feature flags to reflect the settings we
+receive from firmware regarding Spectre/Meltdown mitigations.
+
+The feature names reflect the names we are given by firmware on bare
+metal machines. See the hostboot source for details.
+
+Arguably these could be firmware features, but that then requires them
+to be read early in boot so they're available prior to asm feature
+patching, but we don't actually want to use them for patching. We may
+also want to dynamically update them in future, which would be
+incompatible with the way firmware features work (at the moment at
+least). So for now just make them separate flags.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/security_features.h | 65 +++++++++++++++++++++++++++
+ arch/powerpc/kernel/Makefile | 2
+ arch/powerpc/kernel/security.c | 15 ++++++
+ 3 files changed, 81 insertions(+), 1 deletion(-)
+ create mode 100644 arch/powerpc/include/asm/security_features.h
+ create mode 100644 arch/powerpc/kernel/security.c
+
+--- /dev/null
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -0,0 +1,65 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Security related feature bit definitions.
++ *
++ * Copyright 2018, Michael Ellerman, IBM Corporation.
++ */
++
++#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
++#define _ASM_POWERPC_SECURITY_FEATURES_H
++
++
++extern unsigned long powerpc_security_features;
++
++static inline void security_ftr_set(unsigned long feature)
++{
++ powerpc_security_features |= feature;
++}
++
++static inline void security_ftr_clear(unsigned long feature)
++{
++ powerpc_security_features &= ~feature;
++}
++
++static inline bool security_ftr_enabled(unsigned long feature)
++{
++ return !!(powerpc_security_features & feature);
++}
++
++
++// Features indicating support for Spectre/Meltdown mitigations
++
++// The L1-D cache can be flushed with ori r30,r30,0
++#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
++
++// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
++#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
++
++// ori r31,r31,0 acts as a speculation barrier
++#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
++
++// Speculation past bctr is disabled
++#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
++
++// Entries in L1-D are private to a SMT thread
++#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
++
++// Indirect branch prediction cache disabled
++#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
++
++
++// Features indicating need for Spectre/Meltdown mitigations
++
++// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
++#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
++
++// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
++#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
++
++// A speculation barrier should be used for bounds checks (Spectre variant 1)
++#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
++
++// Firmware configuration indicates user favours security over performance
++#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
++
++#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -44,7 +44,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_p
+ obj-$(CONFIG_VDSO32) += vdso32/
+ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
+-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
+ obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+ obj-$(CONFIG_PPC64) += vdso64/
+--- /dev/null
++++ b/arch/powerpc/kernel/security.c
+@@ -0,0 +1,15 @@
++// SPDX-License-Identifier: GPL-2.0+
++//
++// Security related flags and so on.
++//
++// Copyright 2018, Michael Ellerman, IBM Corporation.
++
++#include <linux/kernel.h>
++#include <asm/security_features.h>
++
++
++unsigned long powerpc_security_features __read_mostly = \
++ SEC_FTR_L1D_FLUSH_HV | \
++ SEC_FTR_L1D_FLUSH_PR | \
++ SEC_FTR_BNDS_CHK_SPEC_BAR | \
++ SEC_FTR_FAVOUR_SECURITY;
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:05 +1000
+Subject: powerpc: Move default security feature flags
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-21-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit e7347a86830f38dc3e40c8f7e28c04412b12a2e7 upstream.
+
+This moves the definition of the default security feature flags
+(i.e., enabled by default) closer to the security feature flags.
+
+This can be used to restore current flags to the default flags.
+
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/security_features.h | 8 ++++++++
+ arch/powerpc/kernel/security.c | 7 +------
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -63,4 +63,12 @@ static inline bool security_ftr_enabled(
+ // Firmware configuration indicates user favours security over performance
+ #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+
++
++// Features enabled by default
++#define SEC_FTR_DEFAULT \
++ (SEC_FTR_L1D_FLUSH_HV | \
++ SEC_FTR_L1D_FLUSH_PR | \
++ SEC_FTR_BNDS_CHK_SPEC_BAR | \
++ SEC_FTR_FAVOUR_SECURITY)
++
+ #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -11,12 +11,7 @@
+ #include <asm/security_features.h>
+
+
+-unsigned long powerpc_security_features __read_mostly = \
+- SEC_FTR_L1D_FLUSH_HV | \
+- SEC_FTR_L1D_FLUSH_PR | \
+- SEC_FTR_BNDS_CHK_SPEC_BAR | \
+- SEC_FTR_FAVOUR_SECURITY;
+-
++unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:57 +1000
+Subject: powerpc/powernv: Set or clear security feature flags
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-13-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 77addf6e95c8689e478d607176b399a6242a777e upstream.
+
+Now that we have feature flags for security related things, set or
+clear them based on what we see in the device tree provided by
+firmware.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/powernv/setup.c | 56 +++++++++++++++++++++++++++++++++
+ 1 file changed, 56 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -37,9 +37,63 @@
+ #include <asm/smp.h>
+ #include <asm/tm.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+
+ #include "powernv.h"
+
++
++static bool fw_feature_is(const char *state, const char *name,
++ struct device_node *fw_features)
++{
++ struct device_node *np;
++ bool rc = false;
++
++ np = of_get_child_by_name(fw_features, name);
++ if (np) {
++ rc = of_property_read_bool(np, state);
++ of_node_put(np);
++ }
++
++ return rc;
++}
++
++static void init_fw_feat_flags(struct device_node *np)
++{
++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
++
++ if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ static void pnv_setup_rfi_flush(void)
+ {
+ struct device_node *np, *fw_features;
+@@ -55,6 +109,8 @@ static void pnv_setup_rfi_flush(void)
+ of_node_put(np);
+
+ if (fw_features) {
++ init_fw_feat_flags(fw_features);
++
+ np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+ if (np && of_property_read_bool(np, "enabled"))
+ type = L1D_FLUSH_MTTRIG;
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:48 +1000
+Subject: powerpc/powernv: Support firmware disable of RFI flush
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-4-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit eb0a2d2620ae431c543963c8c7f08f597366fc60 upstream.
+
+Some versions of firmware will have a setting that can be configured
+to disable the RFI flush, add support for it.
+
+Fixes: 6e032b350cd1 ("powerpc/powernv: Check device-tree for RFI flush settings")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/powernv/setup.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -79,6 +79,10 @@ static void pnv_setup_rfi_flush(void)
+ if (np && of_property_read_bool(np, "disabled"))
+ enable--;
+
++ np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
++ if (np && of_property_read_bool(np, "disabled"))
++ enable = 0;
++
+ of_node_put(np);
+ of_node_put(fw_features);
+ }
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:00 +1000
+Subject: powerpc/powernv: Use the security flags in pnv_setup_rfi_flush()
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-16-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 37c0bdd00d3ae83369ab60a6712c28e11e6458d5 upstream.
+
+Now that we have the security flags we can significantly simplify the
+code in pnv_setup_rfi_flush(), because we can use the flags instead of
+checking device tree properties and because the security flags have
+pessimistic defaults.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/powernv/setup.c | 41 ++++++++-------------------------
+ 1 file changed, 10 insertions(+), 31 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -65,7 +65,7 @@ static void init_fw_feat_flags(struct de
+ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
+ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
+
+- if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
+ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
+
+ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
+@@ -98,11 +98,10 @@ static void pnv_setup_rfi_flush(void)
+ {
+ struct device_node *np, *fw_features;
+ enum l1d_flush_type type;
+- int enable;
++ bool enable;
+
+ /* Default to fallback in case fw-features are not available */
+ type = L1D_FLUSH_FALLBACK;
+- enable = 1;
+
+ np = of_find_node_by_name(NULL, "ibm,opal");
+ fw_features = of_get_child_by_name(np, "fw-features");
+@@ -110,40 +109,20 @@ static void pnv_setup_rfi_flush(void)
+
+ if (fw_features) {
+ init_fw_feat_flags(fw_features);
++ of_node_put(fw_features);
+
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+- if (np && of_property_read_bool(np, "enabled"))
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
+ type = L1D_FLUSH_MTTRIG;
+
+- of_node_put(np);
+-
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+- if (np && of_property_read_bool(np, "enabled"))
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
+ type = L1D_FLUSH_ORI;
+-
+- of_node_put(np);
+-
+- /* Enable unless firmware says NOT to */
+- enable = 2;
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- of_node_put(np);
+-
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable = 0;
+-
+- of_node_put(np);
+- of_node_put(fw_features);
+ }
+
+- setup_rfi_flush(type, enable > 0);
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
++
++ setup_rfi_flush(type, enable);
+ }
+
+ static void __init pnv_setup_arch(void)
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:54 +1000
+Subject: powerpc/pseries: Add new H_GET_CPU_CHARACTERISTICS flags
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-10-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit c4bc36628d7f8b664657d8bd6ad1c44c177880b7 upstream.
+
+Add some additional values which have been defined for the
+H_GET_CPU_CHARACTERISTICS hypercall.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/hvcall.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -313,6 +313,9 @@
+ #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
+ #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
+ #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
++#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
++#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
++#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+
+ #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
+ #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:04 +1000
+Subject: powerpc/pseries: Fix clearing of security feature flags
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-20-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit 0f9bdfe3c77091e8704d2e510eb7c2c2c6cde524 upstream.
+
+The H_CPU_BEHAV_* flags should be checked for in the 'behaviour' field
+of 'struct h_cpu_char_result' -- 'character' is for H_CPU_CHAR_*
+flags.
+
+Found by playing around with QEMU's implementation of the hypercall:
+
+ H_CPU_CHAR=0xf000000000000000
+ H_CPU_BEHAV=0x0000000000000000
+
+ This clears H_CPU_BEHAV_FAVOUR_SECURITY and H_CPU_BEHAV_L1D_FLUSH_PR
+ so pseries_setup_rfi_flush() disables 'rfi_flush'; and it also
+ clears H_CPU_CHAR_L1D_THREAD_PRIV flag. So there is no RFI flush
+ mitigation at all for cpu_show_meltdown() to report; but currently
+ it does:
+
+ Original kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/meltdown
+ Mitigation: RFI Flush
+
+ Patched kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/meltdown
+ Not affected
+
+ H_CPU_CHAR=0x0000000000000000
+ H_CPU_BEHAV=0xf000000000000000
+
+ This sets H_CPU_BEHAV_BNDS_CHK_SPEC_BAR so cpu_show_spectre_v1() should
+ report vulnerable; but currently it doesn't:
+
+ Original kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ Not affected
+
+ Patched kernel:
+
+ # cat /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ Vulnerable
+
+Brown-paper-bag-by: Michael Ellerman <mpe@ellerman.id.au>
+Fixes: f636c14790ea ("powerpc/pseries: Set or clear security feature flags")
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -475,13 +475,13 @@ static void init_cpu_char_feature_flags(
+ * The features below are enabled by default, so we instead look to see
+ * if firmware has *disabled* them, and clear them if so.
+ */
+- if (!(result->character & H_CPU_BEHAV_FAVOUR_SECURITY))
++ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
+ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
+
+- if (!(result->character & H_CPU_BEHAV_L1D_FLUSH_PR))
++ if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
+
+- if (!(result->character & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
++ if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
+ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
+ }
+
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:06 +1000
+Subject: powerpc/pseries: Restore default security feature flags on setup
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-22-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit 6232774f1599028a15418179d17f7df47ede770a upstream.
+
+After migration the security feature flags might have changed (e.g.,
+destination system with unpatched firmware), but some flags are not
+set/clear again in init_cpu_char_feature_flags() because it assumes
+the security flags to be the defaults.
+
+Additionally, if the H_GET_CPU_CHARACTERISTICS hypercall fails then
+init_cpu_char_feature_flags() does not run again, which potentially
+might leave the system in an insecure or sub-optimal configuration.
+
+So, just restore the security feature flags to the defaults assumed
+by init_cpu_char_feature_flags() so it can set/clear them correctly,
+and to ensure safe settings are in place in case the hypercall fail.
+
+Fixes: f636c14790ea ("powerpc/pseries: Set or clear security feature flags")
+Depends-on: 19887d6a28e2 ("powerpc: Move default security feature flags")
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -453,6 +453,10 @@ static void __init find_and_init_phbs(vo
+
+ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
+ {
++ /*
++ * The features below are disabled by default, so we instead look to see
++ * if firmware has *enabled* them, and set them if so.
++ */
+ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
+ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
+
+@@ -492,6 +496,13 @@ void pseries_setup_rfi_flush(void)
+ bool enable;
+ long rc;
+
++ /*
++ * Set features to the defaults assumed by init_cpu_char_feature_flags()
++ * so it can set/clear again any features that might have changed after
++ * migration, and in case the hypercall fails and it is not even called.
++ */
++ powerpc_security_features = SEC_FTR_DEFAULT;
++
+ rc = plpar_get_cpu_characteristics(&result);
+ if (rc == H_SUCCESS)
+ init_cpu_char_feature_flags(&result);
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:56 +1000
+Subject: powerpc/pseries: Set or clear security feature flags
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-12-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit f636c14790ead6cc22cf62279b1f8d7e11a67116 upstream.
+
+Now that we have feature flags for security related things, set or
+clear them based on what we receive from the hypercall.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 43 +++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -66,6 +66,7 @@
+ #include <asm/reg.h>
+ #include <asm/plpar_wrappers.h>
+ #include <asm/kexec.h>
++#include <asm/security_features.h>
+
+ #include "pseries.h"
+
+@@ -450,6 +451,40 @@ static void __init find_and_init_phbs(vo
+ of_pci_check_probe_only();
+ }
+
++static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
++{
++ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (!(result->character & H_CPU_BEHAV_FAVOUR_SECURITY))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (!(result->character & H_CPU_BEHAV_L1D_FLUSH_PR))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (!(result->character & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ void pseries_setup_rfi_flush(void)
+ {
+ struct h_cpu_char_result result;
+@@ -463,6 +498,8 @@ void pseries_setup_rfi_flush(void)
+
+ rc = plpar_get_cpu_characteristics(&result);
+ if (rc == H_SUCCESS) {
++ init_cpu_char_feature_flags(&result);
++
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+ types |= L1D_FLUSH_MTTRIG;
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+@@ -473,6 +510,12 @@ void pseries_setup_rfi_flush(void)
+ enable = false;
+ }
+
++ /*
++ * We're the guest so this doesn't apply to us, clear it to simplify
++ * handling of it elsewhere.
++ */
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
++
+ setup_rfi_flush(types, enable);
+ }
+
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:47 +1000
+Subject: powerpc/pseries: Support firmware disable of RFI flush
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-3-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 582605a429e20ae68fd0b041b2e840af296edd08 upstream.
+
+Some versions of firmware will have a setting that can be configured
+to disable the RFI flush, add support for it.
+
+Fixes: 8989d56878a7 ("powerpc/pseries: Query hypervisor for RFI flush settings")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -473,7 +473,8 @@ static void pseries_setup_rfi_flush(void
+ if (types == L1D_FLUSH_NONE)
+ types = L1D_FLUSH_FALLBACK;
+
+- if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++ if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
++ (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+ enable = false;
+ } else {
+ /* Default to fallback if case hcall is not available */
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:09:01 +1000
+Subject: powerpc/pseries: Use the security flags in pseries_setup_rfi_flush()
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-17-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 2e4a16161fcd324b1f9bf6cb6856529f7eaf0689 upstream.
+
+Now that we have the security flags we can simplify the code in
+pseries_setup_rfi_flush() because the security flags have pessimistic
+defaults.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 27 ++++++++++++---------------
+ 1 file changed, 12 insertions(+), 15 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -492,30 +492,27 @@ void pseries_setup_rfi_flush(void)
+ bool enable;
+ long rc;
+
+- /* Enable by default */
+- enable = true;
+- types = L1D_FLUSH_FALLBACK;
+-
+ rc = plpar_get_cpu_characteristics(&result);
+- if (rc == H_SUCCESS) {
++ if (rc == H_SUCCESS)
+ init_cpu_char_feature_flags(&result);
+
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+- types |= L1D_FLUSH_MTTRIG;
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+- types |= L1D_FLUSH_ORI;
+-
+- if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+- (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+- enable = false;
+- }
+-
+ /*
+ * We're the guest so this doesn't apply to us, clear it to simplify
+ * handling of it elsewhere.
+ */
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
+
++ types = L1D_FLUSH_FALLBACK;
++
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++ types |= L1D_FLUSH_MTTRIG;
++
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
++ types |= L1D_FLUSH_ORI;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
++
+ setup_rfi_flush(types, enable);
+ }
+
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:51 +1000
+Subject: powerpc/rfi-flush: Always enable fallback flush on pseries
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-7-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 84749a58b6e382f109abf1e734bc4dd43c2c25bb upstream.
+
+This ensures the fallback flush area is always allocated on pseries,
+so in case a LPAR is migrated from a patched to an unpatched system,
+it is possible to enable the fallback flush in the target system.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/setup.c | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -459,26 +459,18 @@ static void pseries_setup_rfi_flush(void
+
+ /* Enable by default */
+ enable = true;
++ types = L1D_FLUSH_FALLBACK;
+
+ rc = plpar_get_cpu_characteristics(&result);
+ if (rc == H_SUCCESS) {
+- types = L1D_FLUSH_NONE;
+-
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+ types |= L1D_FLUSH_MTTRIG;
+ if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+ types |= L1D_FLUSH_ORI;
+
+- /* Use fallback if nothing set in hcall */
+- if (types == L1D_FLUSH_NONE)
+- types = L1D_FLUSH_FALLBACK;
+-
+ if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+ (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+ enable = false;
+- } else {
+- /* Default to fallback if case hcall is not available */
+- types = L1D_FLUSH_FALLBACK;
+ }
+
+ setup_rfi_flush(types, enable);
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:53 +1000
+Subject: powerpc/rfi-flush: Call setup_rfi_flush() after LPM migration
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-9-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 921bc6cf807ceb2ab8005319cf39f33494d6b100 upstream.
+
+We might have migrated to a machine that uses a different flush type,
+or doesn't need flushing at all.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/pseries/mobility.c | 3 +++
+ arch/powerpc/platforms/pseries/pseries.h | 2 ++
+ arch/powerpc/platforms/pseries/setup.c | 2 +-
+ 3 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -314,6 +314,9 @@ void post_mobility_fixup(void)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
+
++ /* Possibly switch to a new RFI flush type */
++ pseries_setup_rfi_flush();
++
+ return;
+ }
+
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -79,4 +79,6 @@ extern struct pci_controller_ops pseries
+
+ unsigned long pseries_memory_block_size(void);
+
++void pseries_setup_rfi_flush(void);
++
+ #endif /* _PSERIES_PSERIES_H */
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -450,7 +450,7 @@ static void __init find_and_init_phbs(vo
+ of_pci_check_probe_only();
+ }
+
+-static void pseries_setup_rfi_flush(void)
++void pseries_setup_rfi_flush(void)
+ {
+ struct h_cpu_char_result result;
+ enum l1d_flush_type types;
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:52 +1000
+Subject: powerpc/rfi-flush: Differentiate enabled and patched flush types
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-8-mpe@ellerman.id.au>
+
+From: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+
+commit 0063d61ccfc011f379a31acaeba6de7c926fed2c upstream.
+
+Currently the rfi-flush messages print 'Using <type> flush' for all
+enabled_flush_types, but that is not necessarily true -- as now the
+fallback flush is always enabled on pseries, but the fixup function
+overwrites its nop/branch slot with other flush types, if available.
+
+So, replace the 'Using <type> flush' messages with '<type> flush is
+available'.
+
+Also, print the patched flush types in the fixup function, so users
+can know what is (not) being used (e.g., the slower, fallback flush,
+or no flush type at all if flush is disabled via the debugfs switch).
+
+Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/setup_64.c | 6 +++---
+ arch/powerpc/lib/feature-fixups.c | 9 ++++++++-
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -755,15 +755,15 @@ static void init_fallback_flush(void)
+ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ {
+ if (types & L1D_FLUSH_FALLBACK) {
+- pr_info("rfi-flush: Using fallback displacement flush\n");
++ pr_info("rfi-flush: fallback displacement flush available\n");
+ init_fallback_flush();
+ }
+
+ if (types & L1D_FLUSH_ORI)
+- pr_info("rfi-flush: Using ori type flush\n");
++ pr_info("rfi-flush: ori type flush available\n");
+
+ if (types & L1D_FLUSH_MTTRIG)
+- pr_info("rfi-flush: Using mttrig type flush\n");
++ pr_info("rfi-flush: mttrig type flush available\n");
+
+ enabled_flush_types = types;
+
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -153,7 +153,14 @@ void do_rfi_flush_fixups(enum l1d_flush_
+ patch_instruction(dest + 2, instrs[2]);
+ }
+
+- printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
++ printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
++ (types == L1D_FLUSH_NONE) ? "no" :
++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
++ ? "ori+mttrig type"
++ : "ori type" :
++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
++ : "unknown");
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:50 +1000
+Subject: powerpc/rfi-flush: Make it possible to call setup_rfi_flush() again
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-6-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit abf110f3e1cea40f5ea15e85f5d67c39c14568a7 upstream.
+
+For PowerVM migration we want to be able to call setup_rfi_flush()
+again after we've migrated the partition.
+
+To support that we need to check that we're not trying to allocate the
+fallback flush area after memblock has gone away (i.e., boot-time only).
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/setup.h | 2 +-
+ arch/powerpc/kernel/setup_64.c | 6 +++++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -48,7 +48,7 @@ enum l1d_flush_type {
+ L1D_FLUSH_MTTRIG = 0x8,
+ };
+
+-void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
++void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
+
+ #endif /* !__ASSEMBLY__ */
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -731,6 +731,10 @@ static void init_fallback_flush(void)
+ u64 l1d_size, limit;
+ int cpu;
+
++ /* Only allocate the fallback flush area once (at boot time). */
++ if (l1d_flush_fallback_area)
++ return;
++
+ l1d_size = ppc64_caches.dsize;
+ limit = min(safe_stack_limit(), ppc64_rma_size);
+
+@@ -748,7 +752,7 @@ static void init_fallback_flush(void)
+ }
+ }
+
+-void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
++void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ {
+ if (types & L1D_FLUSH_FALLBACK) {
+ pr_info("rfi-flush: Using fallback displacement flush\n");
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:46 +1000
+Subject: powerpc/rfi-flush: Move out of HARDLOCKUP_DETECTOR #ifdef
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-2-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+The backport of the RFI flush support, done by me, has a minor bug in
+that the code is inside an #ifdef CONFIG_HARDLOCKUP_DETECTOR, which is
+incorrect.
+
+This doesn't matter with common configs because we enable
+HARDLOCKUP_DETECTOR, but with future patches it will break the build.
+So fix it.
+
+Fixes: c3b82ebee6e0 ("powerpc/64s: Add support for RFI flush of L1-D cache")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/setup_64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -679,6 +679,7 @@ static int __init disable_hardlockup_det
+ return 0;
+ }
+ early_initcall(disable_hardlockup_detector);
++#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+
+ #ifdef CONFIG_PPC_BOOK3S_64
+ static enum l1d_flush_type enabled_flush_types;
+@@ -806,4 +807,3 @@ ssize_t cpu_show_meltdown(struct device
+ return sprintf(buf, "Vulnerable\n");
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+-#endif
--- /dev/null
+From foo@baz Sat Jun 2 15:29:05 CEST 2018
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Sat, 2 Jun 2018 21:08:49 +1000
+Subject: powerpc/rfi-flush: Move the logic to avoid a redo into the debugfs code
+To: gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, linuxppc-dev@ozlabs.org
+Message-ID: <20180602110908.29773-5-mpe@ellerman.id.au>
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 1e2a9fc7496955faacbbed49461d611b704a7505 upstream.
+
+rfi_flush_enable() includes a check to see if we're already
+enabled (or disabled), and in that case does nothing.
+
+But that means calling setup_rfi_flush() a 2nd time doesn't actually
+work, which is a bit confusing.
+
+Move that check into the debugfs code, where it really belongs.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/setup_64.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -717,9 +717,6 @@ static void do_nothing(void *unused)
+
+ void rfi_flush_enable(bool enable)
+ {
+- if (rfi_flush == enable)
+- return;
+-
+ if (enable) {
+ do_rfi_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+@@ -773,13 +770,19 @@ void __init setup_rfi_flush(enum l1d_flu
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
++ bool enable;
++
+ if (val == 1)
+- rfi_flush_enable(true);
++ enable = true;
+ else if (val == 0)
+- rfi_flush_enable(false);
++ enable = false;
+ else
+ return -EINVAL;
+
++ /* Only do anything if we're changing state */
++ if (enable != rfi_flush)
++ rfi_flush_enable(enable);
++
+ return 0;
+ }
+
--- /dev/null
+From fb239c1209bb0f0b4830cc72507cc2f2d63fadbd Mon Sep 17 00:00:00 2001
+From: Matthias Kaehlcke <mka@chromium.org>
+Date: Thu, 8 Feb 2018 16:57:12 -0800
+Subject: rtlwifi: rtl8192cu: Remove variable self-assignment in rf.c
+
+From: Matthias Kaehlcke <mka@chromium.org>
+
+commit fb239c1209bb0f0b4830cc72507cc2f2d63fadbd upstream.
+
+In _rtl92c_get_txpower_writeval_by_regulatory() the variable writeVal
+is assigned to itself in an if ... else statement, apparently only to
+document that the branch condition is handled and that a previously read
+value should be returned unmodified. The self-assignment causes clang to
+raise the following warning:
+
+drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c:304:13:
+ error: explicitly assigning value of variable of type 'u32'
+ (aka 'unsigned int') to itself [-Werror,-Wself-assign]
+ writeVal = writeVal;
+
+Delete the branch with the self-assignment.
+
+Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
+Acked-by: Larry Finger <Larry.Finger@lwfinger.net>
+Reviewed-by: Guenter Roeck <groeck@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+@@ -304,9 +304,6 @@ static void _rtl92c_get_txpower_writeval
+ writeVal = 0x00000000;
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeVal = writeVal - 0x06060606;
+- else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+- TXHIGHPWRLEVEL_BT2)
+- writeVal = writeVal;
+ *(p_outwriteval + rf) = writeVal;
+ }
+ }
--- /dev/null
+From a4b0e8a4e92b1baa860e744847fbdb84a50a5071 Mon Sep 17 00:00:00 2001
+From: "Potomski, MichalX" <michalx.potomski@intel.com>
+Date: Thu, 23 Feb 2017 09:05:30 +0000
+Subject: scsi: ufs: Factor out ufshcd_read_desc_param
+
+From: Potomski, MichalX <michalx.potomski@intel.com>
+
+commit a4b0e8a4e92b1baa860e744847fbdb84a50a5071 upstream.
+
+Since in UFS 2.1 specification some of the descriptor lengths differs
+from 2.0 specification and some devices, which are reporting spec
+version 2.0 have different descriptor lengths we can not rely on
+hardcoded values taken from 2.0 specification. This patch introduces
+reading these lengths per each device from descriptor headers at probe
+time to ensure their correctness.
+
+Signed-off-by: Michal' Potomski <michalx.potomski@intel.com>
+Reviewed-by: Subhash Jadavani <subhashj@codeaurora.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+[Wei Li: Slight tweaks to get the cherry-pick to apply,resolved collisions]
+Signed-off-by: Li Wei <liwei213@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/ufs/ufs.h | 22 +---
+ drivers/scsi/ufs/ufshcd.c | 231 ++++++++++++++++++++++++++++++++++------------
+ drivers/scsi/ufs/ufshcd.h | 16 +++
+ 3 files changed, 197 insertions(+), 72 deletions(-)
+
+--- a/drivers/scsi/ufs/ufs.h
++++ b/drivers/scsi/ufs/ufs.h
+@@ -145,7 +145,7 @@ enum attr_idn {
+ /* Descriptor idn for Query requests */
+ enum desc_idn {
+ QUERY_DESC_IDN_DEVICE = 0x0,
+- QUERY_DESC_IDN_CONFIGURAION = 0x1,
++ QUERY_DESC_IDN_CONFIGURATION = 0x1,
+ QUERY_DESC_IDN_UNIT = 0x2,
+ QUERY_DESC_IDN_RFU_0 = 0x3,
+ QUERY_DESC_IDN_INTERCONNECT = 0x4,
+@@ -161,19 +161,13 @@ enum desc_header_offset {
+ QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+ };
+
+-enum ufs_desc_max_size {
+- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+- QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
+- QUERY_DESC_UNIT_MAX_SIZE = 0x23,
+- QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
+- /*
+- * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
+- * of descriptor header.
+- */
+- QUERY_DESC_STRING_MAX_SIZE = 0xFE,
+- QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
+- QUERY_DESC_POWER_MAX_SIZE = 0x62,
+- QUERY_DESC_RFU_MAX_SIZE = 0x00,
++enum ufs_desc_def_size {
++ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
++ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
++ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
++ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
++ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
++ QUERY_DESC_POWER_DEF_SIZE = 0x62,
+ };
+
+ /* Unit descriptor parameters offsets in bytes*/
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -98,19 +98,6 @@
+ _ret; \
+ })
+
+-static u32 ufs_query_desc_max_size[] = {
+- QUERY_DESC_DEVICE_MAX_SIZE,
+- QUERY_DESC_CONFIGURAION_MAX_SIZE,
+- QUERY_DESC_UNIT_MAX_SIZE,
+- QUERY_DESC_RFU_MAX_SIZE,
+- QUERY_DESC_INTERCONNECT_MAX_SIZE,
+- QUERY_DESC_STRING_MAX_SIZE,
+- QUERY_DESC_RFU_MAX_SIZE,
+- QUERY_DESC_GEOMETRY_MAX_SIZE,
+- QUERY_DESC_POWER_MAX_SIZE,
+- QUERY_DESC_RFU_MAX_SIZE,
+-};
+-
+ enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+@@ -1961,7 +1948,7 @@ static int __ufshcd_query_descriptor(str
+ goto out;
+ }
+
+- if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
++ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+@@ -2041,6 +2028,92 @@ int ufshcd_query_descriptor_retry(struct
+ EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
+
+ /**
++ * ufshcd_read_desc_length - read the specified descriptor length from header
++ * @hba: Pointer to adapter instance
++ * @desc_id: descriptor idn value
++ * @desc_index: descriptor index
++ * @desc_length: pointer to variable to read the length of descriptor
++ *
++ * Return 0 in case of success, non-zero otherwise
++ */
++static int ufshcd_read_desc_length(struct ufs_hba *hba,
++ enum desc_idn desc_id,
++ int desc_index,
++ int *desc_length)
++{
++ int ret;
++ u8 header[QUERY_DESC_HDR_SIZE];
++ int header_len = QUERY_DESC_HDR_SIZE;
++
++ if (desc_id >= QUERY_DESC_IDN_MAX)
++ return -EINVAL;
++
++ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
++ desc_id, desc_index, 0, header,
++ &header_len);
++
++ if (ret) {
++ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
++ __func__, desc_id);
++ return ret;
++ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
++ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
++ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
++ desc_id);
++ ret = -EINVAL;
++ }
++
++ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
++ return ret;
++
++}
++
++/**
++ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
++ * @hba: Pointer to adapter instance
++ * @desc_id: descriptor idn value
++ * @desc_len: mapped desc length (out)
++ *
++ * Return 0 in case of success, non-zero otherwise
++ */
++int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
++ enum desc_idn desc_id, int *desc_len)
++{
++ switch (desc_id) {
++ case QUERY_DESC_IDN_DEVICE:
++ *desc_len = hba->desc_size.dev_desc;
++ break;
++ case QUERY_DESC_IDN_POWER:
++ *desc_len = hba->desc_size.pwr_desc;
++ break;
++ case QUERY_DESC_IDN_GEOMETRY:
++ *desc_len = hba->desc_size.geom_desc;
++ break;
++ case QUERY_DESC_IDN_CONFIGURATION:
++ *desc_len = hba->desc_size.conf_desc;
++ break;
++ case QUERY_DESC_IDN_UNIT:
++ *desc_len = hba->desc_size.unit_desc;
++ break;
++ case QUERY_DESC_IDN_INTERCONNECT:
++ *desc_len = hba->desc_size.interc_desc;
++ break;
++ case QUERY_DESC_IDN_STRING:
++ *desc_len = QUERY_DESC_MAX_SIZE;
++ break;
++ case QUERY_DESC_IDN_RFU_0:
++ case QUERY_DESC_IDN_RFU_1:
++ *desc_len = 0;
++ break;
++ default:
++ *desc_len = 0;
++ return -EINVAL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
++
++/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+@@ -2054,42 +2127,49 @@ EXPORT_SYMBOL(ufshcd_query_descriptor_re
+ static int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+- u32 param_offset,
++ u8 param_offset,
+ u8 *param_read_buf,
+- u32 param_size)
++ u8 param_size)
+ {
+ int ret;
+ u8 *desc_buf;
+- u32 buff_len;
++ int buff_len;
+ bool is_kmalloc = true;
+
+- /* safety checks */
+- if (desc_id >= QUERY_DESC_IDN_MAX)
++ /* Safety check */
++ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
+ return -EINVAL;
+
+- buff_len = ufs_query_desc_max_size[desc_id];
+- if ((param_offset + param_size) > buff_len)
+- return -EINVAL;
++ /* Get the max length of descriptor from structure filled up at probe
++ * time.
++ */
++ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+
+- if (!param_offset && (param_size == buff_len)) {
+- /* memory space already available to hold full descriptor */
+- desc_buf = param_read_buf;
+- is_kmalloc = false;
+- } else {
+- /* allocate memory to hold full descriptor */
++ /* Sanity checks */
++ if (ret || !buff_len) {
++ dev_err(hba->dev, "%s: Failed to get full descriptor length",
++ __func__);
++ return ret;
++ }
++
++ /* Check whether we need temp memory */
++ if (param_offset != 0 || param_size < buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
++ } else {
++ desc_buf = param_read_buf;
++ is_kmalloc = false;
+ }
+
++ /* Request for full descriptor */
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+- desc_id, desc_index, 0, desc_buf,
+- &buff_len);
++ desc_id, desc_index, 0,
++ desc_buf, &buff_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
+-
+ goto out;
+ }
+
+@@ -2101,25 +2181,9 @@ static int ufshcd_read_desc_param(struct
+ goto out;
+ }
+
+- /*
+- * While reading variable size descriptors (like string descriptor),
+- * some UFS devices may report the "LENGTH" (field in "Transaction
+- * Specific fields" of Query Response UPIU) same as what was requested
+- * in Query Request UPIU instead of reporting the actual size of the
+- * variable size descriptor.
+- * Although it's safe to ignore the "LENGTH" field for variable size
+- * descriptors as we can always derive the length of the descriptor from
+- * the descriptor header fields. Hence this change impose the length
+- * match check only for fixed size descriptors (for which we always
+- * request the correct size as part of Query Request UPIU).
+- */
+- if ((desc_id != QUERY_DESC_IDN_STRING) &&
+- (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+- dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+- __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+- ret = -EINVAL;
+- goto out;
+- }
++ /* Check wherher we will not copy more data, than available */
++ if (is_kmalloc && param_size > buff_len)
++ param_size = buff_len;
+
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+@@ -4812,8 +4876,8 @@ out:
+ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+ {
+ int ret;
+- int buff_len = QUERY_DESC_POWER_MAX_SIZE;
+- u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
++ int buff_len = hba->desc_size.pwr_desc;
++ u8 desc_buf[hba->desc_size.pwr_desc];
+
+ ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ if (ret) {
+@@ -4911,11 +4975,10 @@ static int ufs_get_device_desc(struct uf
+ {
+ int err;
+ u8 model_index;
+- u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
+- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
++ u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
++ u8 desc_buf[hba->desc_size.dev_desc];
+
+- err = ufshcd_read_device_desc(hba, desc_buf,
+- QUERY_DESC_DEVICE_MAX_SIZE);
++ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
+ __func__, err);
+@@ -4932,14 +4995,14 @@ static int ufs_get_device_desc(struct uf
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+- QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
++ QUERY_DESC_MAX_SIZE, ASCII_STD);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+- str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
++ str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
+ strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+@@ -5129,6 +5192,51 @@ static void ufshcd_tune_unipro_params(st
+ ufshcd_vops_apply_dev_quirks(hba);
+ }
+
++static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
++{
++ int err;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
++ &hba->desc_size.dev_desc);
++ if (err)
++ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
++ &hba->desc_size.pwr_desc);
++ if (err)
++ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
++ &hba->desc_size.interc_desc);
++ if (err)
++ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
++ &hba->desc_size.conf_desc);
++ if (err)
++ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
++ &hba->desc_size.unit_desc);
++ if (err)
++ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
++ &hba->desc_size.geom_desc);
++ if (err)
++ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
++}
++
++static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
++{
++ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
++ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
++ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
++ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
++ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
++ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
++}
++
+ /**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+@@ -5161,6 +5269,9 @@ static int ufshcd_probe_hba(struct ufs_h
+ if (ret)
+ goto out;
+
++ /* Init check for device descriptor sizes */
++ ufshcd_init_desc_sizes(hba);
++
+ ret = ufs_get_device_desc(hba, &card);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+@@ -5194,6 +5305,7 @@ static int ufshcd_probe_hba(struct ufs_h
+
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
++
+ /*
+ * If we are in error handling context or in power management callbacks
+ * context, no need to scan the host
+@@ -6570,6 +6682,9 @@ int ufshcd_init(struct ufs_hba *hba, voi
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
+
++ /* Set descriptor lengths to specification defaults */
++ ufshcd_def_desc_sizes(hba);
++
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -205,6 +205,15 @@ struct ufs_dev_cmd {
+ struct ufs_query query;
+ };
+
++struct ufs_desc_size {
++ int dev_desc;
++ int pwr_desc;
++ int geom_desc;
++ int interc_desc;
++ int unit_desc;
++ int conf_desc;
++};
++
+ /**
+ * struct ufs_clk_info - UFS clock related info
+ * @list: list headed by hba->clk_list_head
+@@ -388,6 +397,7 @@ struct ufs_init_prefetch {
+ * @clk_list_head: UFS host controller clocks list node head
+ * @pwr_info: holds current power mode
+ * @max_pwr_info: keeps the device max valid pwm
++ * @desc_size: descriptor sizes reported by device
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
+@@ -563,6 +573,8 @@ struct ufs_hba {
+
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
++
++ struct ufs_desc_size desc_size;
+ };
+
+ /* Returns true if clocks can be gated. Otherwise false */
+@@ -736,6 +748,10 @@ int ufshcd_query_flag(struct ufs_hba *hb
+ enum flag_idn idn, bool *flag_res);
+ int ufshcd_hold(struct ufs_hba *hba, bool async);
+ void ufshcd_release(struct ufs_hba *hba);
++
++int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
++ int *desc_length);
++
+ u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+
+ /* Wrapper functions for safely calling variant operations */
--- /dev/null
+From bde44bb665d049468b6a1a2fa7d666434de4f83f Mon Sep 17 00:00:00 2001
+From: "subhashj@codeaurora.org" <subhashj@codeaurora.org>
+Date: Wed, 23 Nov 2016 16:31:41 -0800
+Subject: scsi: ufs: fix failure to read the string descriptor
+
+From: Subhash Jadavani <subhashj@codeaurora.org>
+
+commit bde44bb665d049468b6a1a2fa7d666434de4f83f upstream.
+
+While reading variable size descriptors (like string descriptor), some UFS
+devices may report the "LENGTH" (field in "Transaction Specific fields" of
+Query Response UPIU) same as what was requested in Query Request UPIU
+instead of reporting the actual size of the variable size descriptor.
+Although it's safe to ignore the "LENGTH" field for variable size
+descriptors as we can always derive the length of the descriptor from
+the descriptor header fields. Hence this change impose the length match
+check only for fixed size descriptors (for which we always request the
+correct size as part of Query Request UPIU).
+
+Reviewed-by: Venkat Gopalakrishnan <venkatg@codeaurora.org>
+Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+[Wei Li: Slight tweaks to get the cherry-pick to apply,resolved collisions.]
+Signed-off-by: Li Wei <liwei213@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ufs/ufshcd.c | 39 +++++++++++++++++++++++++++++++--------
+ 1 file changed, 31 insertions(+), 8 deletions(-)
+
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -2086,18 +2086,41 @@ static int ufshcd_read_desc_param(struct
+ desc_id, desc_index, 0, desc_buf,
+ &buff_len);
+
+- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
+- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
+- ufs_query_desc_max_size[desc_id])
+- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
+- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
+- __func__, desc_id, param_offset, buff_len, ret);
+- if (!ret)
+- ret = -EINVAL;
++ if (ret) {
++ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
++ __func__, desc_id, desc_index, param_offset, ret);
+
+ goto out;
+ }
+
++ /* Sanity check */
++ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
++ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
++ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * While reading variable size descriptors (like string descriptor),
++ * some UFS devices may report the "LENGTH" (field in "Transaction
++ * Specific fields" of Query Response UPIU) same as what was requested
++ * in Query Request UPIU instead of reporting the actual size of the
++ * variable size descriptor.
++ * Although it's safe to ignore the "LENGTH" field for variable size
++ * descriptors as we can always derive the length of the descriptor from
++ * the descriptor header fields. Hence this change impose the length
++ * match check only for fixed size descriptors (for which we always
++ * request the correct size as part of Query Request UPIU).
++ */
++ if ((desc_id != QUERY_DESC_IDN_STRING) &&
++ (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
++ dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
++ __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
++ ret = -EINVAL;
++ goto out;
++ }
++
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+ out:
--- /dev/null
+From 93fdd5ac64bbe80dac6416f048405362d7ef0945 Mon Sep 17 00:00:00 2001
+From: Tomas Winkler <tomas.winkler@intel.com>
+Date: Thu, 5 Jan 2017 10:45:12 +0200
+Subject: scsi: ufs: refactor device descriptor reading
+
+From: Tomas Winkler <tomas.winkler@intel.com>
+
+commit 93fdd5ac64bbe80dac6416f048405362d7ef0945 upstream.
+
+Pull device descriptor reading out of ufs quirk so it can be used also
+for other purposes.
+
+Revamp the fixup setup:
+
+1. Rename ufs_device_info to ufs_dev_desc as very similar name
+ ufs_dev_info is already in use.
+
+2. Make the handlers static as they are not used out of the ufshdc.c
+ file.
+
+[mkp: applied by hand]
+
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Reviewed-by: Subhash Jadavani <subhashj@codeaurora.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Li Wei <liwei213@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ufs/ufs.h | 12 ++++++++++++
+ drivers/scsi/ufs/ufs_quirks.h | 28 ++++++----------------------
+ drivers/scsi/ufs/ufshcd.c | 40 +++++++++++++++++++---------------------
+ 3 files changed, 37 insertions(+), 43 deletions(-)
+
+--- a/drivers/scsi/ufs/ufs.h
++++ b/drivers/scsi/ufs/ufs.h
+@@ -522,4 +522,16 @@ struct ufs_dev_info {
+ bool is_lu_power_on_wp;
+ };
+
++#define MAX_MODEL_LEN 16
++/**
++ * ufs_dev_desc - ufs device details from the device descriptor
++ *
++ * @wmanufacturerid: card details
++ * @model: card model
++ */
++struct ufs_dev_desc {
++ u16 wmanufacturerid;
++ char model[MAX_MODEL_LEN + 1];
++};
++
+ #endif /* End of Header */
+--- a/drivers/scsi/ufs/ufs_quirks.h
++++ b/drivers/scsi/ufs/ufs_quirks.h
+@@ -21,41 +21,28 @@
+ #define UFS_ANY_VENDOR 0xFFFF
+ #define UFS_ANY_MODEL "ANY_MODEL"
+
+-#define MAX_MODEL_LEN 16
+-
+ #define UFS_VENDOR_TOSHIBA 0x198
+ #define UFS_VENDOR_SAMSUNG 0x1CE
+ #define UFS_VENDOR_SKHYNIX 0x1AD
+
+ /**
+- * ufs_device_info - ufs device details
+- * @wmanufacturerid: card details
+- * @model: card model
+- */
+-struct ufs_device_info {
+- u16 wmanufacturerid;
+- char model[MAX_MODEL_LEN + 1];
+-};
+-
+-/**
+ * ufs_dev_fix - ufs device quirk info
+ * @card: ufs card details
+ * @quirk: device quirk
+ */
+ struct ufs_dev_fix {
+- struct ufs_device_info card;
++ struct ufs_dev_desc card;
+ unsigned int quirk;
+ };
+
+ #define END_FIX { { 0 }, 0 }
+
+ /* add specific device quirk */
+-#define UFS_FIX(_vendor, _model, _quirk) \
+- { \
+- .card.wmanufacturerid = (_vendor),\
+- .card.model = (_model), \
+- .quirk = (_quirk), \
+- }
++#define UFS_FIX(_vendor, _model, _quirk) { \
++ .card.wmanufacturerid = (_vendor),\
++ .card.model = (_model), \
++ .quirk = (_quirk), \
++}
+
+ /*
+ * If UFS device is having issue in processing LCC (Line Control
+@@ -144,7 +131,4 @@ struct ufs_dev_fix {
+ */
+ #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
+
+-struct ufs_hba;
+-void ufs_advertise_fixup_device(struct ufs_hba *hba);
+-
+ #endif /* UFS_QUIRKS_H_ */
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -4906,8 +4906,8 @@ out:
+ return ret;
+ }
+
+-static int ufs_get_device_info(struct ufs_hba *hba,
+- struct ufs_device_info *card_data)
++static int ufs_get_device_desc(struct ufs_hba *hba,
++ struct ufs_dev_desc *dev_desc)
+ {
+ int err;
+ u8 model_index;
+@@ -4926,7 +4926,7 @@ static int ufs_get_device_info(struct uf
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+- card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
++ dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+@@ -4940,36 +4940,26 @@ static int ufs_get_device_info(struct uf
+ }
+
+ str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+- strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
++ strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+
+ /* Null terminate the model string */
+- card_data->model[MAX_MODEL_LEN] = '\0';
++ dev_desc->model[MAX_MODEL_LEN] = '\0';
+
+ out:
+ return err;
+ }
+
+-void ufs_advertise_fixup_device(struct ufs_hba *hba)
++static void ufs_fixup_device_setup(struct ufs_hba *hba,
++ struct ufs_dev_desc *dev_desc)
+ {
+- int err;
+ struct ufs_dev_fix *f;
+- struct ufs_device_info card_data;
+-
+- card_data.wmanufacturerid = 0;
+-
+- err = ufs_get_device_info(hba, &card_data);
+- if (err) {
+- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+- __func__, err);
+- return;
+- }
+
+ for (f = ufs_fixups; f->quirk; f++) {
+- if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
+- (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
+- (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
++ if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
++ f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
++ (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+@@ -5147,6 +5137,7 @@ static void ufshcd_tune_unipro_params(st
+ */
+ static int ufshcd_probe_hba(struct ufs_hba *hba)
+ {
++ struct ufs_dev_desc card = {0};
+ int ret;
+
+ ret = ufshcd_link_startup(hba);
+@@ -5170,7 +5161,14 @@ static int ufshcd_probe_hba(struct ufs_h
+ if (ret)
+ goto out;
+
+- ufs_advertise_fixup_device(hba);
++ ret = ufs_get_device_desc(hba, &card);
++ if (ret) {
++ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
++ __func__, ret);
++ goto out;
++ }
++
++ ufs_fixup_device_setup(hba, &card);
+ ufshcd_tune_unipro_params(hba);
+
+ ret = ufshcd_set_vccq_rail_unused(hba,
input-elan_i2c_smbus-fix-corrupted-stack.patch
tracing-fix-crash-when-freeing-instances-with-event-triggers.patch
selinux-kasan-slab-out-of-bounds-in-xattr_getsecurity.patch
+cfg80211-further-limit-wiphy-names-to-64-bytes.patch
+dma-buf-remove-redundant-initialization-of-sg_table.patch
+rtlwifi-rtl8192cu-remove-variable-self-assignment-in-rf.c.patch
+asoc-intel-sst-remove-redundant-variable-dma_dev_name.patch
+platform-chrome-cros_ec_lpc-remove-redundant-pointer-request.patch
+x86-amd-revert-commit-944e0fc51a89c9827b9.patch
+xen-set-cpu-capabilities-from-xen_start_kernel.patch
+x86-amd-don-t-set-x86_bug_sysret_ss_attrs-when-running-under-xen.patch
+tcp-avoid-integer-overflows-in-tcp_rcv_space_adjust.patch
+scsi-ufs-fix-failure-to-read-the-string-descriptor.patch
+scsi-ufs-refactor-device-descriptor-reading.patch
+scsi-ufs-factor-out-ufshcd_read_desc_param.patch
+arm64-add-hypervisor-safe-helper-for-checking-constant-capabilities.patch
+arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
+powerpc-rfi-flush-move-out-of-hardlockup_detector-ifdef.patch
+powerpc-pseries-support-firmware-disable-of-rfi-flush.patch
+powerpc-powernv-support-firmware-disable-of-rfi-flush.patch
+powerpc-rfi-flush-move-the-logic-to-avoid-a-redo-into-the-debugfs-code.patch
+powerpc-rfi-flush-make-it-possible-to-call-setup_rfi_flush-again.patch
+powerpc-rfi-flush-always-enable-fallback-flush-on-pseries.patch
+powerpc-rfi-flush-differentiate-enabled-and-patched-flush-types.patch
+powerpc-rfi-flush-call-setup_rfi_flush-after-lpm-migration.patch
+powerpc-pseries-add-new-h_get_cpu_characteristics-flags.patch
+powerpc-add-security-feature-flags-for-spectre-meltdown.patch
+powerpc-pseries-set-or-clear-security-feature-flags.patch
+powerpc-powernv-set-or-clear-security-feature-flags.patch
+powerpc-64s-move-cpu_show_meltdown.patch
+powerpc-64s-enhance-the-information-in-cpu_show_meltdown.patch
+powerpc-powernv-use-the-security-flags-in-pnv_setup_rfi_flush.patch
+powerpc-pseries-use-the-security-flags-in-pseries_setup_rfi_flush.patch
+powerpc-64s-wire-up-cpu_show_spectre_v1.patch
+powerpc-64s-wire-up-cpu_show_spectre_v2.patch
+powerpc-pseries-fix-clearing-of-security-feature-flags.patch
+powerpc-move-default-security-feature-flags.patch
+powerpc-pseries-restore-default-security-feature-flags-on-setup.patch
+powerpc-64s-fix-section-mismatch-warnings-from-setup_rfi_flush.patch
+powerpc-64s-add-support-for-a-store-forwarding-barrier-at-kernel-entry-exit.patch
+net-mlx4_en-fix-potential-use-after-free-with-dma_unmap_page.patch
--- /dev/null
+From 607065bad9931e72207b0cac365d7d4abc06bd99 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 10 Dec 2017 17:55:03 -0800
+Subject: tcp: avoid integer overflows in tcp_rcv_space_adjust()
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 607065bad9931e72207b0cac365d7d4abc06bd99 upstream.
+
+When using large tcp_rmem[2] values (I did tests with 500 MB),
+I noticed overflows while computing rcvwin.
+
+Lets fix this before the following patch.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Acked-by: Wei Wang <weiwan@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[Backport: sysctl_tcp_rmem is not Namespace-ify'd in older kernels]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/tcp.h | 2 +-
+ net/ipv4/tcp_input.c | 10 ++++++----
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -337,7 +337,7 @@ struct tcp_sock {
+
+ /* Receiver queue space */
+ struct {
+- int space;
++ u32 space;
+ u32 seq;
+ u32 time;
+ } rcvq_space;
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -581,8 +581,8 @@ static inline void tcp_rcv_rtt_measure_t
+ void tcp_rcv_space_adjust(struct sock *sk)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
++ u32 copied;
+ int time;
+- int copied;
+
+ time = tcp_time_stamp - tp->rcvq_space.time;
+ if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
+@@ -604,12 +604,13 @@ void tcp_rcv_space_adjust(struct sock *s
+
+ if (sysctl_tcp_moderate_rcvbuf &&
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+- int rcvwin, rcvmem, rcvbuf;
++ int rcvmem, rcvbuf;
++ u64 rcvwin;
+
+ /* minimal window to cope with packet losses, assuming
+ * steady state. Add some cushion because of small variations.
+ */
+- rcvwin = (copied << 1) + 16 * tp->advmss;
++ rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
+
+ /* If rate increased by 25%,
+ * assume slow start, rcvwin = 3 * copied
+@@ -629,7 +630,8 @@ void tcp_rcv_space_adjust(struct sock *s
+ while (tcp_win_from_space(rcvmem) < tp->advmss)
+ rcvmem += 128;
+
+- rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
++ do_div(rcvwin, tp->advmss);
++ rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
+ if (rcvbuf > sk->sk_rcvbuf) {
+ sk->sk_rcvbuf = rcvbuf;
+
--- /dev/null
+From foo@baz Sat Jun 2 15:10:07 CEST 2018
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 30 May 2018 13:09:58 +0200
+Subject: x86/amd: don't set X86_BUG_SYSRET_SS_ATTRS when running under Xen
+To: stable@vger.kernel.org, xen-devel@lists.xenproject.org
+Cc: dwmw@amazon.co.uk, boris.ostrovsky@oracle.com, Juergen Gross <jgross@suse.com>
+Message-ID: <20180530110958.19413-4-jgross@suse.com>
+
+From: Juergen Gross <jgross@suse.com>
+
+Upstream commit: def9331a12977770cc6132d79f8e6565871e8e38 ("x86/amd:
+don't set X86_BUG_SYSRET_SS_ATTRS when running under Xen")
+
+When running as Xen pv guest X86_BUG_SYSRET_SS_ATTRS must not be set
+on AMD cpus.
+
+This bug/feature bit is kind of special as it will be used very early
+when switching threads. Setting the bit and clearing it a little bit
+later leaves a critical window where things can go wrong. This time
+window has enlarged a little bit by using setup_clear_cpu_cap() instead
+of the hypervisor's set_cpu_features callback. It seems this larger
+window now makes it rather easy to hit the problem.
+
+The proper solution is to never set the bit in case of Xen.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 5 +++--
+ arch/x86/xen/enlighten.c | 4 +---
+ 2 files changed, 4 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -857,8 +857,9 @@ static void init_amd(struct cpuinfo_x86
+ if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
+ set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
+
+- /* AMD CPUs don't reset SS attributes on SYSRET */
+- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
++ if (!cpu_has(c, X86_FEATURE_XENPV))
++ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ }
+
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -473,10 +473,8 @@ static void __init xen_init_cpuid_mask(v
+
+ static void __init xen_init_capabilities(void)
+ {
+- if (xen_pv_domain()) {
+- setup_clear_cpu_cap(X86_BUG_SYSRET_SS_ATTRS);
++ if (xen_pv_domain())
+ setup_force_cpu_cap(X86_FEATURE_XENPV);
+- }
+ }
+
+ static void xen_set_debugreg(int reg, unsigned long val)
--- /dev/null
+From foo@baz Sat Jun 2 15:10:07 CEST 2018
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 30 May 2018 13:09:56 +0200
+Subject: x86/amd: revert commit 944e0fc51a89c9827b9
+To: stable@vger.kernel.org, xen-devel@lists.xenproject.org
+Cc: dwmw@amazon.co.uk, boris.ostrovsky@oracle.com, Juergen Gross <jgross@suse.com>
+Message-ID: <20180530110958.19413-2-jgross@suse.com>
+
+From: Juergen Gross <jgross@suse.com>
+
+Revert commit 944e0fc51a89c9827b98813d65dc083274777c7f ("x86/amd: don't
+set X86_BUG_SYSRET_SS_ATTRS when running under Xen") as it is lacking
+a prerequisite patch and is making things worse.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 5 ++---
+ arch/x86/xen/enlighten.c | 4 +++-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -857,9 +857,8 @@ static void init_amd(struct cpuinfo_x86
+ if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
+ set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
+
+- /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+- if (!cpu_has(c, X86_FEATURE_XENPV))
+- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++ /* AMD CPUs don't reset SS attributes on SYSRET */
++ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ }
+
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1977,8 +1977,10 @@ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
+ static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+ {
+- if (xen_pv_domain())
++ if (xen_pv_domain()) {
++ clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ set_cpu_cap(c, X86_FEATURE_XENPV);
++ }
+ }
+
+ static void xen_pin_vcpu(int cpu)
--- /dev/null
+From foo@baz Sat Jun 2 15:10:07 CEST 2018
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 30 May 2018 13:09:57 +0200
+Subject: xen: set cpu capabilities from xen_start_kernel()
+To: stable@vger.kernel.org, xen-devel@lists.xenproject.org
+Cc: dwmw@amazon.co.uk, boris.ostrovsky@oracle.com, Juergen Gross <jgross@suse.com>
+Message-ID: <20180530110958.19413-3-jgross@suse.com>
+
+From: Juergen Gross <jgross@suse.com>
+
+Upstream commit: 0808e80cb760de2733c0527d2090ed2205a1eef8 ("xen: set
+cpu capabilities from xen_start_kernel()")
+
+There is no need to set the same capabilities for each cpu
+individually. This can easily be done for all cpus when starting the
+kernel.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/xen/enlighten.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -471,6 +471,14 @@ static void __init xen_init_cpuid_mask(v
+ cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
+ }
+
++static void __init xen_init_capabilities(void)
++{
++ if (xen_pv_domain()) {
++ setup_clear_cpu_cap(X86_BUG_SYSRET_SS_ATTRS);
++ setup_force_cpu_cap(X86_FEATURE_XENPV);
++ }
++}
++
+ static void xen_set_debugreg(int reg, unsigned long val)
+ {
+ HYPERVISOR_set_debugreg(reg, val);
+@@ -1631,6 +1639,7 @@ asmlinkage __visible void __init xen_sta
+
+ xen_init_irq_ops();
+ xen_init_cpuid_mask();
++ xen_init_capabilities();
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ /*
+@@ -1975,14 +1984,6 @@ bool xen_hvm_need_lapic(void)
+ }
+ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
+-static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+-{
+- if (xen_pv_domain()) {
+- clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+- set_cpu_cap(c, X86_FEATURE_XENPV);
+- }
+-}
+-
+ static void xen_pin_vcpu(int cpu)
+ {
+ static bool disable_pinning;
+@@ -2029,7 +2030,6 @@ const struct hypervisor_x86 x86_hyper_xe
+ .init_platform = xen_hvm_guest_init,
+ #endif
+ .x2apic_available = xen_x2apic_para_available,
+- .set_cpu_features = xen_set_cpu_features,
+ .pin_vcpu = xen_pin_vcpu,
+ };
+ EXPORT_SYMBOL(x86_hyper_xen);