--- /dev/null
+From 1bf72720281770162c87990697eae1ba2f1d917a Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Thu, 16 May 2019 09:09:35 +0200
+Subject: cpu/speculation: Warn on unsupported mitigations= parameter
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit 1bf72720281770162c87990697eae1ba2f1d917a upstream.
+
+Currently, if the user specifies an unsupported mitigation strategy on the
+kernel command line, it will be ignored silently. The code will fall back
+to the default strategy, possibly leaving the system more vulnerable than
+expected.
+
+This may happen due to e.g. a simple typo, or, for a stable kernel release,
+because not all mitigation strategies have been backported.
+
+Inform the user by printing a message.
+
+Fixes: 98af8452945c5565 ("cpu/speculation: Add 'mitigations=' cmdline option")
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20190516070935.22546-1-geert@linux-m68k.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpu.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2289,6 +2289,9 @@ static int __init mitigations_parse_cmdl
+ cpu_mitigations = CPU_MITIGATIONS_AUTO;
+ else if (!strcmp(arg, "auto,nosmt"))
+ cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
++ else
++ pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
++ arg);
+
+ return 0;
+ }
--- /dev/null
+From 6d4d367d0e9ffab4d64a3436256a6a052dc1195d Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Wed, 5 Jun 2019 09:34:10 +0100
+Subject: irqchip/mips-gic: Use the correct local interrupt map registers
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit 6d4d367d0e9ffab4d64a3436256a6a052dc1195d upstream.
+
+The MIPS GIC contains a block of registers used to map local interrupts
+to a particular CPU interrupt pin. Since these registers are found at a
+consecutive range of addresses we access them using an index, via the
+(read|write)_gic_v[lo]_map accessor functions. We currently use values
+from enum mips_gic_local_interrupt as those indices.
+
+Unfortunately whilst enum mips_gic_local_interrupt provides the correct
+offsets for bits in the pending & mask registers, the ordering of the
+map registers is subtly different... Compared with the ordering of
+pending & mask bits, the map registers move the FDC from the end of the
+list to index 3 after the timer interrupt. As a result the performance
+counter & software interrupts are therefore at indices 4-6 rather than
+indices 3-5.
+
+Notably this causes problems with performance counter interrupts being
+incorrectly mapped on some systems, and presumably will also cause
+problems for FDC interrupts.
+
+Introduce a function to map from enum mips_gic_local_interrupt to the
+index of the corresponding map register, and use it to ensure we access
+the map registers for the correct interrupts.
+
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Fixes: a0dc5cb5e31b ("irqchip: mips-gic: Simplify gic_local_irq_domain_map()")
+Fixes: da61fcf9d62a ("irqchip: mips-gic: Use irq_cpu_online to (un)mask all-VP(E) IRQs")
+Reported-and-tested-by: Archer Yan <ayan@wavecomp.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Jason Cooper <jason@lakedaemon.net>
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/mips-gic.h | 30 ++++++++++++++++++++++++++++++
+ drivers/irqchip/irq-mips-gic.c | 4 ++--
+ 2 files changed, 32 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/include/asm/mips-gic.h
++++ b/arch/mips/include/asm/mips-gic.h
+@@ -315,6 +315,36 @@ static inline bool mips_gic_present(void
+ }
+
+ /**
++ * mips_gic_vx_map_reg() - Return GIC_Vx_<intr>_MAP register offset
++ * @intr: A GIC local interrupt
++ *
++ * Determine the index of the GIC_VL_<intr>_MAP or GIC_VO_<intr>_MAP register
++ * within the block of GIC map registers. This is almost the same as the order
++ * of interrupts in the pending & mask registers, as used by enum
++ * mips_gic_local_interrupt, but moves the FDC interrupt & thus offsets the
++ * interrupts after it...
++ *
++ * Return: The map register index corresponding to @intr.
++ *
++ * The return value is suitable for use with the (read|write)_gic_v[lo]_map
++ * accessor functions.
++ */
++static inline unsigned int
++mips_gic_vx_map_reg(enum mips_gic_local_interrupt intr)
++{
++ /* WD, Compare & Timer are 1:1 */
++ if (intr <= GIC_LOCAL_INT_TIMER)
++ return intr;
++
++ /* FDC moves to after Timer... */
++ if (intr == GIC_LOCAL_INT_FDC)
++ return GIC_LOCAL_INT_TIMER + 1;
++
++ /* As a result everything else is offset by 1 */
++ return intr + 1;
++}
++
++/**
+ * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq
+ *
+ * Determine the virq number to use for the coprocessor 0 count/compare
+--- a/drivers/irqchip/irq-mips-gic.c
++++ b/drivers/irqchip/irq-mips-gic.c
+@@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+
+- write_gic_vl_map(intr, cd->map);
++ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+ }
+@@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq
+ spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+- write_gic_vo_map(intr, map);
++ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+
--- /dev/null
+From b6b80c78af838bef17501416d5d383fedab0010a Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Thu, 13 Jun 2019 10:22:23 -0700
+Subject: KVM: x86/mmu: Allocate PAE root array when using SVM's 32-bit NPT
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit b6b80c78af838bef17501416d5d383fedab0010a upstream.
+
+SVM's Nested Page Tables (NPT) reuses x86 paging for the host-controlled
+page walk. For 32-bit KVM, this means PAE paging is used even when TDP
+is enabled, i.e. the PAE root array needs to be allocated.
+
+Fixes: ee6268ba3a68 ("KVM: x86: Skip pae_root shadow allocation if tdp enabled")
+Cc: stable@vger.kernel.org
+Reported-by: Jiri Palecek <jpalecek@web.de>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Jiri Palecek <jpalecek@web.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5386,7 +5386,16 @@ static int alloc_mmu_pages(struct kvm_vc
+ struct page *page;
+ int i;
+
+- if (tdp_enabled)
++ /*
++ * When using PAE paging, the four PDPTEs are treated as 'root' pages,
++ * while the PDP table is a per-vCPU construct that's allocated at MMU
++ * creation. When emulating 32-bit mode, cr3 is only 32 bits even on
++ * x86_64. Therefore we need to allocate the PDP table in the first
++ * 4GB of memory, which happens to fit the DMA32 zone. Except for
++ * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
++ * skip allocating the PDP table.
++ */
++ if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
+ return 0;
+
+ /*
--- /dev/null
+From 68f461593f76bd5f17e87cdd0bea28f4278c7268 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Tue, 25 Jun 2019 16:41:16 -0400
+Subject: NFS/flexfiles: Use the correct TCP timeout for flexfiles I/O
+
+From: Trond Myklebust <trondmy@gmail.com>
+
+commit 68f461593f76bd5f17e87cdd0bea28f4278c7268 upstream.
+
+Fix a typo where we're confusing the default TCP retrans value
+(NFS_DEF_TCP_RETRANS) for the default TCP timeout value.
+
+Fixes: 15d03055cf39f ("pNFS/flexfiles: Set reasonable default ...")
+Cc: stable@vger.kernel.org # 4.8+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/flexfilelayout/flexfilelayoutdev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -18,7 +18,7 @@
+
+ #define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+-static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
++static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
+ static unsigned int dataserver_retrans;
+
+ static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
mm-page_idle.c-fix-oops-because-end_pfn-is-larger-than-max_pfn.patch
dm-log-writes-make-sure-super-sector-log-updates-are-written-in-order.patch
scsi-vmw_pscsi-fix-use-after-free-in-pvscsi_queue_lck.patch
+x86-speculation-allow-guests-to-use-ssbd-even-if-host-does-not.patch
+x86-microcode-fix-the-microcode-load-on-cpu-hotplug-for-real.patch
+x86-resctrl-prevent-possible-overrun-during-bitmap-operations.patch
+kvm-x86-mmu-allocate-pae-root-array-when-using-svm-s-32-bit-npt.patch
+nfs-flexfiles-use-the-correct-tcp-timeout-for-flexfiles-i-o.patch
+cpu-speculation-warn-on-unsupported-mitigations-parameter.patch
+sunrpc-clean-up-initialisation-of-the-struct-rpc_rqst.patch
+irqchip-mips-gic-use-the-correct-local-interrupt-map-registers.patch
--- /dev/null
+From 9dc6edcf676fe188430e8b119f91280bbf285163 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Wed, 22 Aug 2018 14:24:16 -0400
+Subject: SUNRPC: Clean up initialisation of the struct rpc_rqst
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 9dc6edcf676fe188430e8b119f91280bbf285163 upstream.
+
+Move the initialisation back into xprt.c.
+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Cc: Yihao Wu <wuyihao@linux.alibaba.com>
+Cc: Caspar Zhang <caspar@linux.alibaba.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sunrpc/xprt.h | 1
+ net/sunrpc/clnt.c | 1
+ net/sunrpc/xprt.c | 91 ++++++++++++++++++++++++--------------------
+ 3 files changed, 51 insertions(+), 42 deletions(-)
+
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -325,7 +325,6 @@ struct xprt_class {
+ struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
+ void xprt_connect(struct rpc_task *task);
+ void xprt_reserve(struct rpc_task *task);
+-void xprt_request_init(struct rpc_task *task);
+ void xprt_retry_reserve(struct rpc_task *task);
+ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1558,7 +1558,6 @@ call_reserveresult(struct rpc_task *task
+ task->tk_status = 0;
+ if (status >= 0) {
+ if (task->tk_rqstp) {
+- xprt_request_init(task);
+ task->tk_action = call_refresh;
+ return;
+ }
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1257,6 +1257,55 @@ void xprt_free(struct rpc_xprt *xprt)
+ }
+ EXPORT_SYMBOL_GPL(xprt_free);
+
++static __be32
++xprt_alloc_xid(struct rpc_xprt *xprt)
++{
++ __be32 xid;
++
++ spin_lock(&xprt->reserve_lock);
++ xid = (__force __be32)xprt->xid++;
++ spin_unlock(&xprt->reserve_lock);
++ return xid;
++}
++
++static void
++xprt_init_xid(struct rpc_xprt *xprt)
++{
++ xprt->xid = prandom_u32();
++}
++
++static void
++xprt_request_init(struct rpc_task *task)
++{
++ struct rpc_xprt *xprt = task->tk_xprt;
++ struct rpc_rqst *req = task->tk_rqstp;
++
++ INIT_LIST_HEAD(&req->rq_list);
++ req->rq_timeout = task->tk_client->cl_timeout->to_initval;
++ req->rq_task = task;
++ req->rq_xprt = xprt;
++ req->rq_buffer = NULL;
++ req->rq_xid = xprt_alloc_xid(xprt);
++ req->rq_connect_cookie = xprt->connect_cookie - 1;
++ req->rq_bytes_sent = 0;
++ req->rq_snd_buf.len = 0;
++ req->rq_snd_buf.buflen = 0;
++ req->rq_rcv_buf.len = 0;
++ req->rq_rcv_buf.buflen = 0;
++ req->rq_release_snd_buf = NULL;
++ xprt_reset_majortimeo(req);
++ dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
++ req, ntohl(req->rq_xid));
++}
++
++static void
++xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
++{
++ xprt->ops->alloc_slot(xprt, task);
++ if (task->tk_rqstp != NULL)
++ xprt_request_init(task);
++}
++
+ /**
+ * xprt_reserve - allocate an RPC request slot
+ * @task: RPC task requesting a slot allocation
+@@ -1276,7 +1325,7 @@ void xprt_reserve(struct rpc_task *task)
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+ if (!xprt_throttle_congested(xprt, task))
+- xprt->ops->alloc_slot(xprt, task);
++ xprt_do_reserve(xprt, task);
+ }
+
+ /**
+@@ -1298,45 +1347,7 @@ void xprt_retry_reserve(struct rpc_task
+
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+- xprt->ops->alloc_slot(xprt, task);
+-}
+-
+-static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
+-{
+- __be32 xid;
+-
+- spin_lock(&xprt->reserve_lock);
+- xid = (__force __be32)xprt->xid++;
+- spin_unlock(&xprt->reserve_lock);
+- return xid;
+-}
+-
+-static inline void xprt_init_xid(struct rpc_xprt *xprt)
+-{
+- xprt->xid = prandom_u32();
+-}
+-
+-void xprt_request_init(struct rpc_task *task)
+-{
+- struct rpc_xprt *xprt = task->tk_xprt;
+- struct rpc_rqst *req = task->tk_rqstp;
+-
+- INIT_LIST_HEAD(&req->rq_list);
+- req->rq_timeout = task->tk_client->cl_timeout->to_initval;
+- req->rq_task = task;
+- req->rq_xprt = xprt;
+- req->rq_buffer = NULL;
+- req->rq_xid = xprt_alloc_xid(xprt);
+- req->rq_connect_cookie = xprt->connect_cookie - 1;
+- req->rq_bytes_sent = 0;
+- req->rq_snd_buf.len = 0;
+- req->rq_snd_buf.buflen = 0;
+- req->rq_rcv_buf.len = 0;
+- req->rq_rcv_buf.buflen = 0;
+- req->rq_release_snd_buf = NULL;
+- xprt_reset_majortimeo(req);
+- dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
+- req, ntohl(req->rq_xid));
++ xprt_do_reserve(xprt, task);
+ }
+
+ /**
--- /dev/null
+From 5423f5ce5ca410b3646f355279e4e937d452e622 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 18 Jun 2019 22:31:40 +0200
+Subject: x86/microcode: Fix the microcode load on CPU hotplug for real
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 5423f5ce5ca410b3646f355279e4e937d452e622 upstream.
+
+A recent change moved the microcode loader hotplug callback into the early
+startup phase which is running with interrupts disabled. It missed that
+the callbacks invoke sysfs functions which might sleep causing nice 'might
+sleep' splats with proper debugging enabled.
+
+Split the callbacks and only load the microcode in the early startup phase
+and move the sysfs handling back into the later threaded and preemptible
+bringup phase where it was before.
+
+Fixes: 78f4e932f776 ("x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: stable@vger.kernel.org
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1906182228350.1766@nanos.tec.linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/core.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -790,13 +790,16 @@ static struct syscore_ops mc_syscore_ops
+ .resume = mc_bp_resume,
+ };
+
+-static int mc_cpu_online(unsigned int cpu)
++static int mc_cpu_starting(unsigned int cpu)
+ {
+- struct device *dev;
+-
+- dev = get_cpu_device(cpu);
+ microcode_update_cpu(cpu);
+ pr_debug("CPU%d added\n", cpu);
++ return 0;
++}
++
++static int mc_cpu_online(unsigned int cpu)
++{
++ struct device *dev = get_cpu_device(cpu);
+
+ if (sysfs_create_group(&dev->kobj, &mc_attr_group))
+ pr_err("Failed to create group for CPU%d\n", cpu);
+@@ -873,7 +876,9 @@ int __init microcode_init(void)
+ goto out_ucode_group;
+
+ register_syscore_ops(&mc_syscore_ops);
+- cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
++ cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
++ mc_cpu_starting, NULL);
++ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+ mc_cpu_online, mc_cpu_down_prep);
+
+ pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
--- /dev/null
+From 32f010deab575199df4ebe7b6aec20c17bb7eccd Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Wed, 19 Jun 2019 13:27:16 -0700
+Subject: x86/resctrl: Prevent possible overrun during bitmap operations
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit 32f010deab575199df4ebe7b6aec20c17bb7eccd upstream.
+
+While the DOC at the beginning of lib/bitmap.c explicitly states that
+"The number of valid bits in a given bitmap does _not_ need to be an
+exact multiple of BITS_PER_LONG.", some of the bitmap operations do
+indeed access BITS_PER_LONG portions of the provided bitmap no matter
+the size of the provided bitmap.
+
+For example, if find_first_bit() is provided with an 8 bit bitmap the
+operation will access BITS_PER_LONG bits from the provided bitmap. While
+the operation ensures that these extra bits do not affect the result,
+the memory is still accessed.
+
+The capacity bitmasks (CBMs) are typically stored in u32 since they
+can never exceed 32 bits. A few instances exist where a bitmap_*
+operation is performed on a CBM by simply pointing the bitmap operation
+to the stored u32 value.
+
+The consequence of this pattern is that some bitmap_* operations will
+access out-of-bounds memory when interacting with the provided CBM.
+
+This same issue has previously been addressed with commit 49e00eee0061
+("x86/intel_rdt: Fix out-of-bounds memory access in CBM tests")
+but at that time not all instances of the issue were fixed.
+
+Fix this by using an unsigned long to store the capacity bitmask data
+that is passed to bitmap functions.
+
+Fixes: e651901187ab ("x86/intel_rdt: Introduce "bit_usage" to display cache allocations details")
+Fixes: f4e80d67a527 ("x86/intel_rdt: Resctrl files reflect pseudo-locked information")
+Fixes: 95f0b77efa57 ("x86/intel_rdt: Initialize new resource group with sane defaults")
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: stable <stable@vger.kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/58c9b6081fd9bf599af0dfc01a6fdd335768efef.1560975645.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 35 ++++++++++++++-----------------
+ 1 file changed, 16 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+@@ -792,8 +792,12 @@ static int rdt_bit_usage_show(struct ker
+ struct seq_file *seq, void *v)
+ {
+ struct rdt_resource *r = of->kn->parent->priv;
+- u32 sw_shareable = 0, hw_shareable = 0;
+- u32 exclusive = 0, pseudo_locked = 0;
++ /*
++ * Use unsigned long even though only 32 bits are used to ensure
++ * test_bit() is used safely.
++ */
++ unsigned long sw_shareable = 0, hw_shareable = 0;
++ unsigned long exclusive = 0, pseudo_locked = 0;
+ struct rdt_domain *dom;
+ int i, hwb, swb, excl, psl;
+ enum rdtgrp_mode mode;
+@@ -838,10 +842,10 @@ static int rdt_bit_usage_show(struct ker
+ }
+ for (i = r->cache.cbm_len - 1; i >= 0; i--) {
+ pseudo_locked = dom->plr ? dom->plr->cbm : 0;
+- hwb = test_bit(i, (unsigned long *)&hw_shareable);
+- swb = test_bit(i, (unsigned long *)&sw_shareable);
+- excl = test_bit(i, (unsigned long *)&exclusive);
+- psl = test_bit(i, (unsigned long *)&pseudo_locked);
++ hwb = test_bit(i, &hw_shareable);
++ swb = test_bit(i, &sw_shareable);
++ excl = test_bit(i, &exclusive);
++ psl = test_bit(i, &pseudo_locked);
+ if (hwb && swb)
+ seq_putc(seq, 'X');
+ else if (hwb && !swb)
+@@ -2320,26 +2324,19 @@ out_destroy:
+ */
+ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
+ {
+- /*
+- * Convert the u32 _val to an unsigned long required by all the bit
+- * operations within this function. No more than 32 bits of this
+- * converted value can be accessed because all bit operations are
+- * additionally provided with cbm_len that is initialized during
+- * hardware enumeration using five bits from the EAX register and
+- * thus never can exceed 32 bits.
+- */
+- unsigned long *val = (unsigned long *)_val;
++ unsigned long val = *_val;
+ unsigned int cbm_len = r->cache.cbm_len;
+ unsigned long first_bit, zero_bit;
+
+- if (*val == 0)
++ if (val == 0)
+ return;
+
+- first_bit = find_first_bit(val, cbm_len);
+- zero_bit = find_next_zero_bit(val, cbm_len, first_bit);
++ first_bit = find_first_bit(&val, cbm_len);
++ zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
+
+ /* Clear any remaining bits to ensure contiguous region */
+- bitmap_clear(val, zero_bit, cbm_len - zero_bit);
++ bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
++ *_val = (u32)val;
+ }
+
+ /**
--- /dev/null
+From c1f7fec1eb6a2c86d01bc22afce772c743451d88 Mon Sep 17 00:00:00 2001
+From: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Date: Mon, 10 Jun 2019 13:20:10 -0400
+Subject: x86/speculation: Allow guests to use SSBD even if host does not
+
+From: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+
+commit c1f7fec1eb6a2c86d01bc22afce772c743451d88 upstream.
+
+The bits set in x86_spec_ctrl_mask are used to calculate the guest's value
+of SPEC_CTRL that is written to the MSR before VMENTRY, and control which
+mitigations the guest can enable. In the case of SSBD, unless the host has
+enabled SSBD always on mode (by passing "spec_store_bypass_disable=on" in
+the kernel parameters), the SSBD bit is not set in the mask and the guest
+can not properly enable the SSBD always on mitigation mode.
+
+This has been confirmed by running the SSBD PoC on a guest using the SSBD
+always on mitigation mode (booted with kernel parameter
+"spec_store_bypass_disable=on"), and verifying that the guest is vulnerable
+unless the host is also using SSBD always on mode. In addition, the guest
+OS incorrectly reports the SSB vulnerability as mitigated.
+
+Always set the SSBD bit in x86_spec_ctrl_mask when the host CPU supports
+it, allowing the guest to use SSBD whether or not the host has chosen to
+enable the mitigation in any of its modes.
+
+Fixes: be6fcb5478e9 ("x86/bugs: Rework spec_ctrl base and mask logic")
+Signed-off-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
+Reviewed-by: Mark Kanda <mark.kanda@oracle.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: bp@alien8.de
+Cc: rkrcmar@redhat.com
+Cc: kvm@vger.kernel.org
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/1560187210-11054-1-git-send-email-alejandro.j.jimenez@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -821,6 +821,16 @@ static enum ssb_mitigation __init __ssb_
+ }
+
+ /*
++ * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
++ * bit in the mask to allow guests to use the mitigation even in the
++ * case where the host does not enable it.
++ */
++ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
++ }
++
++ /*
+ * We have three CPU feature flags that are in play here:
+ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+@@ -837,7 +847,6 @@ static enum ssb_mitigation __init __ssb_
+ x86_amd_ssb_disable();
+ } else {
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+- x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ }
+ }