]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Jun 2015 07:22:52 +0000 (16:22 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Jun 2015 07:22:52 +0000 (16:22 +0900)
added patches:
iommu-amd-fix-bug-in-put_pasid_state_wait.patch
iommu-arm-smmu-fix-sign-extension-of-upstream-bus-addresses-at-stage-1.patch
ktime-fix-ktime_divns-to-do-signed-division.patch
kvm-fix-crash-in-kvm_vcpu_reload_apic_access_page.patch
kvm-fpu-enable-eager-restore-kvm-fpu-for-mpx.patch
kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch
kvm-mmu-fix-smap-permission-check.patch
kvm-mmu-fix-smap-virtualization.patch
revert-kvm-x86-drop-fpu_activate-hook.patch
x86-fpu-disable-xsaves-support-for-now.patch
x86-mce-fix-mce-severity-messages.patch

12 files changed:
queue-4.0/iommu-amd-fix-bug-in-put_pasid_state_wait.patch [new file with mode: 0644]
queue-4.0/iommu-arm-smmu-fix-sign-extension-of-upstream-bus-addresses-at-stage-1.patch [new file with mode: 0644]
queue-4.0/ktime-fix-ktime_divns-to-do-signed-division.patch [new file with mode: 0644]
queue-4.0/kvm-fix-crash-in-kvm_vcpu_reload_apic_access_page.patch [new file with mode: 0644]
queue-4.0/kvm-fpu-enable-eager-restore-kvm-fpu-for-mpx.patch [new file with mode: 0644]
queue-4.0/kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch [new file with mode: 0644]
queue-4.0/kvm-mmu-fix-smap-permission-check.patch [new file with mode: 0644]
queue-4.0/kvm-mmu-fix-smap-virtualization.patch [new file with mode: 0644]
queue-4.0/revert-kvm-x86-drop-fpu_activate-hook.patch [new file with mode: 0644]
queue-4.0/series
queue-4.0/x86-fpu-disable-xsaves-support-for-now.patch [new file with mode: 0644]
queue-4.0/x86-mce-fix-mce-severity-messages.patch [new file with mode: 0644]

diff --git a/queue-4.0/iommu-amd-fix-bug-in-put_pasid_state_wait.patch b/queue-4.0/iommu-amd-fix-bug-in-put_pasid_state_wait.patch
new file mode 100644 (file)
index 0000000..f9675da
--- /dev/null
@@ -0,0 +1,34 @@
+From 1bf1b431d98d7e5b5419876d4c219469e60693e1 Mon Sep 17 00:00:00 2001
+From: Oded Gabbay <oded.gabbay@amd.com>
+Date: Thu, 16 Apr 2015 17:08:44 +0300
+Subject: iommu/amd: Fix bug in put_pasid_state_wait
+
+From: Oded Gabbay <oded.gabbay@amd.com>
+
+commit 1bf1b431d98d7e5b5419876d4c219469e60693e1 upstream.
+
+This patch fixes a bug in put_pasid_state_wait that appeared in kernel 4.0
+The bug is that pasid_state->count wasn't decremented before entering the
+wait_event. Thus, the condition in wait_event will never be true.
+
+The fix is to decrement (atomically) the pasid_state->count before the
+wait_event.
+
+Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/amd_iommu_v2.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/iommu/amd_iommu_v2.c
++++ b/drivers/iommu/amd_iommu_v2.c
+@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid
+ static void put_pasid_state_wait(struct pasid_state *pasid_state)
+ {
++      atomic_dec(&pasid_state->count);
+       wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
+       free_pasid_state(pasid_state);
+ }
diff --git a/queue-4.0/iommu-arm-smmu-fix-sign-extension-of-upstream-bus-addresses-at-stage-1.patch b/queue-4.0/iommu-arm-smmu-fix-sign-extension-of-upstream-bus-addresses-at-stage-1.patch
new file mode 100644 (file)
index 0000000..eed958e
--- /dev/null
@@ -0,0 +1,88 @@
+From 5dc5616ee850eaba055bb469a6c4a471d489140e Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 8 May 2015 17:44:22 +0100
+Subject: iommu/arm-smmu: Fix sign-extension of upstream bus addresses at stage 1
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 5dc5616ee850eaba055bb469a6c4a471d489140e upstream.
+
+Stage 1 translation is controlled by two sets of page tables (TTBR0 and
+TTBR1) which grow up and down from zero respectively in the ARMv8
+translation regime. For the SMMU, we only care about TTBR0 and, in the
+case of a 48-bit virtual space, we expect to map virtual addresses 0x0
+through to 0xffff_ffff_ffff.
+
+Given that some masters may be incapable of emitting virtual addresses
+targetting TTBR1 (e.g. because they sit on a 48-bit bus), the SMMU
+architecture allows bit 47 to be sign-extended, halving the virtual
+range of TTBR0 but allowing TTBR1 to be used. This is controlled by the
+SEP field in TTBCR2.
+
+The SMMU driver incorrectly enables this sign-extension feature, which
+causes problems when userspace addresses are programmed into a master
+device with the SMMU expecting to map the incoming transactions via
+TTBR0; if the top bit of address is set, we will instead get a
+translation fault since TTBR1 walks are disabled in the TTBCR.
+
+This patch fixes the issue by disabling sign-extension of a fixed
+virtual address bit and instead basing the behaviour on the upstream bus
+size: the incoming address is zero extended unless the upstream bus is
+only 49 bits wide, in which case bit 48 is used as the sign bit and is
+replicated to the upper bits.
+
+Reported-by: Varun Sethi <varun.sethi@freescale.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu.c |   30 ++----------------------------
+ 1 file changed, 2 insertions(+), 28 deletions(-)
+
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -224,14 +224,7 @@
+ #define RESUME_TERMINATE              (1 << 0)
+ #define TTBCR2_SEP_SHIFT              15
+-#define TTBCR2_SEP_MASK                       0x7
+-
+-#define TTBCR2_ADDR_32                        0
+-#define TTBCR2_ADDR_36                        1
+-#define TTBCR2_ADDR_40                        2
+-#define TTBCR2_ADDR_42                        3
+-#define TTBCR2_ADDR_44                        4
+-#define TTBCR2_ADDR_48                        5
++#define TTBCR2_SEP_UPSTREAM           (0x7 << TTBCR2_SEP_SHIFT)
+ #define TTBRn_HI_ASID_SHIFT            16
+@@ -783,26 +776,7 @@ static void arm_smmu_init_context_bank(s
+               writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+               if (smmu->version > ARM_SMMU_V1) {
+                       reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+-                      switch (smmu->va_size) {
+-                      case 32:
+-                              reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
+-                              break;
+-                      case 36:
+-                              reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
+-                              break;
+-                      case 40:
+-                              reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
+-                              break;
+-                      case 42:
+-                              reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
+-                              break;
+-                      case 44:
+-                              reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
+-                              break;
+-                      case 48:
+-                              reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
+-                              break;
+-                      }
++                      reg |= TTBCR2_SEP_UPSTREAM;
+                       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+               }
+       } else {
diff --git a/queue-4.0/ktime-fix-ktime_divns-to-do-signed-division.patch b/queue-4.0/ktime-fix-ktime_divns-to-do-signed-division.patch
new file mode 100644 (file)
index 0000000..597c2cd
--- /dev/null
@@ -0,0 +1,123 @@
+From f7bcb70ebae0dcdb5a2d859b09e4465784d99029 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Fri, 8 May 2015 13:47:23 -0700
+Subject: ktime: Fix ktime_divns to do signed division
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit f7bcb70ebae0dcdb5a2d859b09e4465784d99029 upstream.
+
+It was noted that the 32bit implementation of ktime_divns()
+was doing unsigned division and didn't properly handle
+negative values.
+
+And when a ktime helper was changed to utilize
+ktime_divns, it caused a regression on some IR blasters.
+See the following bugzilla for details:
+  https://bugzilla.redhat.com/show_bug.cgi?id=1200353
+
+This patch fixes the problem in ktime_divns by checking
+and preserving the sign bit, and then reapplying it if
+appropriate after the division, it also changes the return
+type to a s64 to make it more obvious this is expected.
+
+Nicolas also pointed out that negative dividers would
+cause infinite loops on 32bit systems, negative dividers
+is unlikely for users of this function, but out of caution
+this patch adds checks for negative dividers for both
+32-bit (BUG_ON) and 64-bit(WARN_ON) versions to make sure
+no such use cases creep in.
+
+[ tglx: Hand an u64 to do_div() to avoid the compiler warning ]
+
+Fixes: 166afb64511e 'ktime: Sanitize ktime_to_us/ms conversion'
+Reported-and-tested-by: Trevor Cordes <trevor@tecnopolis.ca>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Josh Boyer <jwboyer@redhat.com>
+Cc: One Thousand Gnomes <gnomes@lxorguk.ukuu.org.uk>
+Link: http://lkml.kernel.org/r/1431118043-23452-1-git-send-email-john.stultz@linaro.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/ktime.h |   27 +++++++++++++++++++++------
+ kernel/time/hrtimer.c |   14 ++++++++------
+ 2 files changed, 29 insertions(+), 12 deletions(-)
+
+--- a/include/linux/ktime.h
++++ b/include/linux/ktime.h
+@@ -166,19 +166,34 @@ static inline bool ktime_before(const kt
+ }
+ #if BITS_PER_LONG < 64
+-extern u64 __ktime_divns(const ktime_t kt, s64 div);
+-static inline u64 ktime_divns(const ktime_t kt, s64 div)
++extern s64 __ktime_divns(const ktime_t kt, s64 div);
++static inline s64 ktime_divns(const ktime_t kt, s64 div)
+ {
++      /*
++       * Negative divisors could cause an inf loop,
++       * so bug out here.
++       */
++      BUG_ON(div < 0);
+       if (__builtin_constant_p(div) && !(div >> 32)) {
+-              u64 ns = kt.tv64;
+-              do_div(ns, div);
+-              return ns;
++              s64 ns = kt.tv64;
++              u64 tmp = ns < 0 ? -ns : ns;
++
++              do_div(tmp, div);
++              return ns < 0 ? -tmp : tmp;
+       } else {
+               return __ktime_divns(kt, div);
+       }
+ }
+ #else /* BITS_PER_LONG < 64 */
+-# define ktime_divns(kt, div)         (u64)((kt).tv64 / (div))
++static inline s64 ktime_divns(const ktime_t kt, s64 div)
++{
++      /*
++       * 32-bit implementation cannot handle negative divisors,
++       * so catch them on 64bit as well.
++       */
++      WARN_ON(div < 0);
++      return kt.tv64 / div;
++}
+ #endif
+ static inline s64 ktime_to_us(const ktime_t kt)
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *
+ /*
+  * Divide a ktime value by a nanosecond value
+  */
+-u64 __ktime_divns(const ktime_t kt, s64 div)
++s64 __ktime_divns(const ktime_t kt, s64 div)
+ {
+-      u64 dclc;
+       int sft = 0;
++      s64 dclc;
++      u64 tmp;
+       dclc = ktime_to_ns(kt);
++      tmp = dclc < 0 ? -dclc : dclc;
++
+       /* Make sure the divisor is less than 2^32: */
+       while (div >> 32) {
+               sft++;
+               div >>= 1;
+       }
+-      dclc >>= sft;
+-      do_div(dclc, (unsigned long) div);
+-
+-      return dclc;
++      tmp >>= sft;
++      do_div(tmp, (unsigned long) div);
++      return dclc < 0 ? -tmp : tmp;
+ }
+ EXPORT_SYMBOL_GPL(__ktime_divns);
+ #endif /* BITS_PER_LONG >= 64 */
diff --git a/queue-4.0/kvm-fix-crash-in-kvm_vcpu_reload_apic_access_page.patch b/queue-4.0/kvm-fix-crash-in-kvm_vcpu_reload_apic_access_page.patch
new file mode 100644 (file)
index 0000000..25221f0
--- /dev/null
@@ -0,0 +1,72 @@
+From e8fd5e9e9984675f45b9a5485909c143fbde248f Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Fri, 8 May 2015 14:32:56 +0200
+Subject: kvm: fix crash in kvm_vcpu_reload_apic_access_page
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit e8fd5e9e9984675f45b9a5485909c143fbde248f upstream.
+
+memslot->userfault_addr is set by the kernel with a mmap executed
+from the kernel but the userland can still munmap it and lead to the
+below oops after memslot->userfault_addr points to a host virtual
+address that has no vma or mapping.
+
+[  327.538306] BUG: unable to handle kernel paging request at fffffffffffffffe
+[  327.538407] IP: [<ffffffff811a7b55>] put_page+0x5/0x50
+[  327.538474] PGD 1a01067 PUD 1a03067 PMD 0
+[  327.538529] Oops: 0000 [#1] SMP
+[  327.538574] Modules linked in: macvtap macvlan xt_CHECKSUM iptable_mangle ipt_MASQUERADE nf_nat_masquerade_ipv4 iptable_nat nf_nat_ipv4 nf_nat nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack ipt_REJECT iptable_filter ip_tables tun bridge stp llc rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache xprtrdma ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ipmi_devintf iTCO_wdt iTCO_vendor_support intel_powerclamp coretemp dcdbas intel_rapl kvm_intel kvm crct10dif_pclmul crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul glue_helper ablk_helper cryptd pcspkr sb_edac edac_core ipmi_si ipmi_msghandler acpi_pad wmi acpi_power_meter lpc_ich mfd_core mei_me
+[  327.539488]  mei shpchp nfsd auth_rpcgss nfs_acl lockd grace sunrpc mlx4_ib ib_sa ib_mad ib_core mlx4_en vxlan ib_addr ip_tunnel xfs libcrc32c sd_mod crc_t10dif crct10dif_common crc32c_intel mgag200 syscopyarea sysfillrect sysimgblt i2c_algo_bit drm_kms_helper ttm drm ahci i2c_core libahci mlx4_core libata tg3 ptp pps_core megaraid_sas ntb dm_mirror dm_region_hash dm_log dm_mod
+[  327.539956] CPU: 3 PID: 3161 Comm: qemu-kvm Not tainted 3.10.0-240.el7.userfault19.4ca4011.x86_64.debug #1
+[  327.540045] Hardware name: Dell Inc. PowerEdge R420/0CN7CM, BIOS 2.1.2 01/20/2014
+[  327.540115] task: ffff8803280ccf00 ti: ffff880317c58000 task.ti: ffff880317c58000
+[  327.540184] RIP: 0010:[<ffffffff811a7b55>]  [<ffffffff811a7b55>] put_page+0x5/0x50
+[  327.540261] RSP: 0018:ffff880317c5bcf8  EFLAGS: 00010246
+[  327.540313] RAX: 00057ffffffff000 RBX: ffff880616a20000 RCX: 0000000000000000
+[  327.540379] RDX: 0000000000002014 RSI: 00057ffffffff000 RDI: fffffffffffffffe
+[  327.540445] RBP: ffff880317c5bd10 R08: 0000000000000103 R09: 0000000000000000
+[  327.540511] R10: 0000000000000000 R11: 0000000000000000 R12: fffffffffffffffe
+[  327.540576] R13: 0000000000000000 R14: ffff880317c5bd70 R15: ffff880317c5bd50
+[  327.540643] FS:  00007fd230b7f700(0000) GS:ffff880630800000(0000) knlGS:0000000000000000
+[  327.540717] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  327.540771] CR2: fffffffffffffffe CR3: 000000062a2c3000 CR4: 00000000000427e0
+[  327.540837] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[  327.540904] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+[  327.540974] Stack:
+[  327.541008]  ffffffffa05d6d0c ffff880616a20000 0000000000000000 ffff880317c5bdc0
+[  327.541093]  ffffffffa05ddaa2 0000000000000000 00000000002191bf 00000042f3feab2d
+[  327.541177]  00000042f3feab2d 0000000000000002 0000000000000001 0321000000000000
+[  327.541261] Call Trace:
+[  327.541321]  [<ffffffffa05d6d0c>] ? kvm_vcpu_reload_apic_access_page+0x6c/0x80 [kvm]
+[  327.543615]  [<ffffffffa05ddaa2>] vcpu_enter_guest+0x3f2/0x10f0 [kvm]
+[  327.545918]  [<ffffffffa05e2f10>] kvm_arch_vcpu_ioctl_run+0x2b0/0x5a0 [kvm]
+[  327.548211]  [<ffffffffa05e2d02>] ? kvm_arch_vcpu_ioctl_run+0xa2/0x5a0 [kvm]
+[  327.550500]  [<ffffffffa05ca845>] kvm_vcpu_ioctl+0x2b5/0x680 [kvm]
+[  327.552768]  [<ffffffff810b8d12>] ? creds_are_invalid.part.1+0x12/0x50
+[  327.555069]  [<ffffffff810b8d71>] ? creds_are_invalid+0x21/0x30
+[  327.557373]  [<ffffffff812d6066>] ? inode_has_perm.isra.49.constprop.65+0x26/0x80
+[  327.559663]  [<ffffffff8122d985>] do_vfs_ioctl+0x305/0x530
+[  327.561917]  [<ffffffff8122dc51>] SyS_ioctl+0xa1/0xc0
+[  327.564185]  [<ffffffff816de829>] system_call_fastpath+0x16/0x1b
+[  327.566480] Code: 0b 31 f6 4c 89 e7 e8 4b 7f ff ff 0f 0b e8 24 fd ff ff e9 a9 fd ff ff 66 66 66 66 66 66 2e 0f 1f 84 00 00 00 00 00 66 66 66 66 90 <48> f7 07 00 c0 00 00 55 48 89 e5 75 2a 8b 47 1c 85 c0 74 1e f0
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6141,6 +6141,8 @@ void kvm_vcpu_reload_apic_access_page(st
+               return;
+       page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
++      if (is_error_page(page))
++              return;
+       kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
+       /*
diff --git a/queue-4.0/kvm-fpu-enable-eager-restore-kvm-fpu-for-mpx.patch b/queue-4.0/kvm-fpu-enable-eager-restore-kvm-fpu-for-mpx.patch
new file mode 100644 (file)
index 0000000..5b56bea
--- /dev/null
@@ -0,0 +1,108 @@
+From c447e76b4cabb49ddae8e49c5758f031f35d55fb Mon Sep 17 00:00:00 2001
+From: Liang Li <liang.z.li@intel.com>
+Date: Thu, 21 May 2015 04:41:25 +0800
+Subject: kvm/fpu: Enable eager restore kvm FPU for MPX
+
+From: Liang Li <liang.z.li@intel.com>
+
+commit c447e76b4cabb49ddae8e49c5758f031f35d55fb upstream.
+
+The MPX feature requires eager KVM FPU restore support. We have verified
+that MPX cannot work correctly with the current lazy KVM FPU restore
+mechanism. Eager KVM FPU restore should be enabled if the MPX feature is
+exposed to VM.
+
+Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
+Signed-off-by: Liang Li <liang.z.li@intel.com>
+[Also activate the FPU on AMD processors. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h |    1 +
+ arch/x86/kvm/cpuid.c            |    4 ++++
+ arch/x86/kvm/cpuid.h            |    8 ++++++++
+ arch/x86/kvm/x86.c              |   16 ++++++++++++++--
+ 4 files changed, 27 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -405,6 +405,7 @@ struct kvm_vcpu_arch {
+       struct kvm_mmu_memory_cache mmu_page_header_cache;
+       struct fpu guest_fpu;
++      bool eager_fpu;
+       u64 xcr0;
+       u64 guest_supported_xcr0;
+       u32 guest_xstate_size;
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -16,6 +16,8 @@
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
++#include <asm/i387.h> /* For use_eager_fpu.  Ugh! */
++#include <asm/fpu-internal.h> /* For use_eager_fpu.  Ugh! */
+ #include <asm/user.h>
+ #include <asm/xsave.h>
+ #include "cpuid.h"
+@@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vc
+       if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+               best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
++      vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
++
+       /*
+        * The existing code assumes virtual address is 48-bit in the canonical
+        * address checks; exit if it is ever changed.
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -111,4 +111,12 @@ static inline bool guest_cpuid_has_rtm(s
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_RTM));
+ }
++
++static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
++{
++      struct kvm_cpuid_entry2 *best;
++
++      best = kvm_find_cpuid_entry(vcpu, 7, 0);
++      return best && (best->ebx & bit(X86_FEATURE_MPX));
++}
+ #endif
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6996,7 +6996,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *
+       fpu_save_init(&vcpu->arch.guest_fpu);
+       __kernel_fpu_end();
+       ++vcpu->stat.fpu_reload;
+-      kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
++      if (!vcpu->arch.eager_fpu)
++              kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
++
+       trace_kvm_fpu(0);
+ }
+@@ -7012,11 +7014,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu
+ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+                                               unsigned int id)
+ {
++      struct kvm_vcpu *vcpu;
++
+       if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
+               printk_once(KERN_WARNING
+               "kvm: SMP vm created on host with unstable TSC; "
+               "guest TSC will not be reliable\n");
+-      return kvm_x86_ops->vcpu_create(kvm, id);
++
++      vcpu = kvm_x86_ops->vcpu_create(kvm, id);
++
++      /*
++       * Activate fpu unconditionally in case the guest needs eager FPU.  It will be
++       * deactivated soon if it doesn't.
++       */
++      kvm_x86_ops->fpu_activate(vcpu);
++      return vcpu;
+ }
+ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
diff --git a/queue-4.0/kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch b/queue-4.0/kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch
new file mode 100644 (file)
index 0000000..21ac551
--- /dev/null
@@ -0,0 +1,32 @@
+From 898761158be7682082955e3efa4ad24725305fc7 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 2 Apr 2015 11:04:05 +0200
+Subject: KVM: MMU: fix CR4.SMEP=1, CR0.WP=0 with shadow pages
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 898761158be7682082955e3efa4ad24725305fc7 upstream.
+
+smep_andnot_wp is initialized in kvm_init_shadow_mmu and shadow pages
+should not be reused for different values of it.  Thus, it has to be
+added to the mask in kvm_mmu_pte_write.
+
+Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4238,7 +4238,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
+       ++vcpu->kvm->stat.mmu_pte_write;
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+-      mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
++      mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
+       for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+               if (detect_write_misaligned(sp, gpa, bytes) ||
+                     detect_write_flooding(sp)) {
diff --git a/queue-4.0/kvm-mmu-fix-smap-permission-check.patch b/queue-4.0/kvm-mmu-fix-smap-permission-check.patch
new file mode 100644 (file)
index 0000000..8eb4da4
--- /dev/null
@@ -0,0 +1,51 @@
+From 7cbeed9bce7580479bb97457dad220cb3594b875 Mon Sep 17 00:00:00 2001
+From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+Date: Thu, 7 May 2015 16:20:15 +0800
+Subject: KVM: MMU: fix smap permission check
+
+From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+
+commit 7cbeed9bce7580479bb97457dad220cb3594b875 upstream.
+
+Current permission check assumes that RSVD bit in PFEC is always zero,
+however, it is not true since MMIO #PF will use it to quickly identify
+MMIO access
+
+Fix it by clearing the bit if walking guest page table is needed
+
+Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.h         |    2 ++
+ arch/x86/kvm/paging_tmpl.h |    7 +++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -166,6 +166,8 @@ static inline bool permission_fault(stru
+       int index = (pfec >> 1) +
+                   (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
++      WARN_ON(pfec & PFERR_RSVD_MASK);
++
+       return (mmu->permissions[index] >> pte_access) & 1;
+ }
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_
+                                             mmu_is_nested(vcpu));
+               if (likely(r != RET_MMIO_PF_INVALID))
+                       return r;
++
++              /*
++               * page fault with PFEC.RSVD  = 1 is caused by shadow
++               * page fault, should not be used to walk guest page
++               * table.
++               */
++              error_code &= ~PFERR_RSVD_MASK;
+       };
+       r = mmu_topup_memory_caches(vcpu);
diff --git a/queue-4.0/kvm-mmu-fix-smap-virtualization.patch b/queue-4.0/kvm-mmu-fix-smap-virtualization.patch
new file mode 100644 (file)
index 0000000..8e30892
--- /dev/null
@@ -0,0 +1,165 @@
+From 0be0226f07d14b153a5eedf2bb86e1eb7dcefab5 Mon Sep 17 00:00:00 2001
+From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+Date: Mon, 11 May 2015 22:55:21 +0800
+Subject: KVM: MMU: fix SMAP virtualization
+
+From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+
+commit 0be0226f07d14b153a5eedf2bb86e1eb7dcefab5 upstream.
+
+KVM may turn a user page to a kernel page when kernel writes a readonly
+user page if CR0.WP = 1. This shadow page entry will be reused after
+SMAP is enabled so that kernel is allowed to access this user page
+
+Fix it by setting SMAP && !CR0.WP into shadow page's role and reset mmu
+once CR4.SMAP is updated
+
+Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/virtual/kvm/mmu.txt |   18 ++++++++++++++----
+ arch/x86/include/asm/kvm_host.h   |    1 +
+ arch/x86/kvm/mmu.c                |   16 ++++++++++++----
+ arch/x86/kvm/mmu.h                |    2 --
+ arch/x86/kvm/x86.c                |    8 +++-----
+ 5 files changed, 30 insertions(+), 15 deletions(-)
+
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -169,6 +169,10 @@ Shadow pages contain the following infor
+     Contains the value of cr4.smep && !cr0.wp for which the page is valid
+     (pages for which this is true are different from other pages; see the
+     treatment of cr0.wp=0 below).
++  role.smap_andnot_wp:
++    Contains the value of cr4.smap && !cr0.wp for which the page is valid
++    (pages for which this is true are different from other pages; see the
++    treatment of cr0.wp=0 below).
+   gfn:
+     Either the guest page table containing the translations shadowed by this
+     page, or the base page frame for linear translations.  See role.direct.
+@@ -344,10 +348,16 @@ on fault type:
+ (user write faults generate a #PF)
+-In the first case there is an additional complication if CR4.SMEP is
+-enabled: since we've turned the page into a kernel page, the kernel may now
+-execute it.  We handle this by also setting spte.nx.  If we get a user
+-fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
++In the first case there are two additional complications:
++- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
++  the kernel may now execute it.  We handle this by also setting spte.nx.
++  If we get a user fetch or read fault, we'll change spte.u=1 and
++  spte.nx=gpte.nx back.
++- if CR4.SMAP is disabled: since the page has been changed to a kernel
++  page, it can not be reused when CR4.SMAP is enabled. We set
++  CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
++  here we do not care the case that CR4.SMAP is enabled since KVM will
++  directly inject #PF to guest due to failed permission check.
+ To prevent an spte that was converted into a kernel page with cr0.wp=0
+ from being written by the kernel after cr0.wp has changed to 1, we make
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -212,6 +212,7 @@ union kvm_mmu_page_role {
+               unsigned nxe:1;
+               unsigned cr0_wp:1;
+               unsigned smep_andnot_wp:1;
++              unsigned smap_andnot_wp:1;
+       };
+ };
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(st
+       }
+ }
+-void update_permission_bitmask(struct kvm_vcpu *vcpu,
+-              struct kvm_mmu *mmu, bool ept)
++static void update_permission_bitmask(struct kvm_vcpu *vcpu,
++                                    struct kvm_mmu *mmu, bool ept)
+ {
+       unsigned bit, byte, pfec;
+       u8 map;
+@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_
+ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+ {
+       bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
++      bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+       struct kvm_mmu *context = &vcpu->arch.mmu;
+       MMU_WARN_ON(VALID_PAGE(context->root_hpa));
+@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu
+       context->base_role.cr0_wp  = is_write_protection(vcpu);
+       context->base_role.smep_andnot_wp
+               = smep && !is_write_protection(vcpu);
++      context->base_role.smap_andnot_wp
++              = smap && !is_write_protection(vcpu);
+ }
+ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
+@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
+                      const u8 *new, int bytes)
+ {
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+-      union kvm_mmu_page_role mask = { .word = 0 };
+       struct kvm_mmu_page *sp;
+       LIST_HEAD(invalid_list);
+       u64 entry, gentry, *spte;
+       int npte;
+       bool remote_flush, local_flush, zap_page;
++      union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
++              .cr0_wp = 1,
++              .cr4_pae = 1,
++              .nxe = 1,
++              .smep_andnot_wp = 1,
++              .smap_andnot_wp = 1,
++      };
+       /*
+        * If we don't have indirect shadow pages, it means no page is
+@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
+       ++vcpu->kvm->stat.mmu_pte_write;
+       kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+-      mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
+       for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+               if (detect_write_misaligned(sp, gpa, bytes) ||
+                     detect_write_flooding(sp)) {
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -71,8 +71,6 @@ enum {
+ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
+ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
+-void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+-              bool ept);
+ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+ {
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
+ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+       unsigned long old_cr4 = kvm_read_cr4(vcpu);
+-      unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
+-                                 X86_CR4_PAE | X86_CR4_SMEP;
++      unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
++                                 X86_CR4_SMEP | X86_CR4_SMAP;
++
+       if (cr4 & CR4_RESERVED_BITS)
+               return 1;
+@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, u
+           (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
+               kvm_mmu_reset_context(vcpu);
+-      if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
+-              update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
+-
+       if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
+               kvm_update_cpuid(vcpu);
diff --git a/queue-4.0/revert-kvm-x86-drop-fpu_activate-hook.patch b/queue-4.0/revert-kvm-x86-drop-fpu_activate-hook.patch
new file mode 100644 (file)
index 0000000..81be3c5
--- /dev/null
@@ -0,0 +1,51 @@
+From 0fdd74f7784b5cdff7075736992bbb149b1ae49c Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 20 May 2015 11:33:43 +0200
+Subject: Revert "KVM: x86: drop fpu_activate hook"
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 0fdd74f7784b5cdff7075736992bbb149b1ae49c upstream.
+
+This reverts commit 4473b570a7ebb502f63f292ccfba7df622e5fdd3.  We'll
+use the hook again.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h |    1 +
+ arch/x86/kvm/svm.c              |    1 +
+ arch/x86/kvm/vmx.c              |    1 +
+ 3 files changed, 3 insertions(+)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -735,6 +735,7 @@ struct kvm_x86_ops {
+       void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+       unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+       void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
++      void (*fpu_activate)(struct kvm_vcpu *vcpu);
+       void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
+       void (*tlb_flush)(struct kvm_vcpu *vcpu);
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4374,6 +4374,7 @@ static struct kvm_x86_ops svm_x86_ops =
+       .cache_reg = svm_cache_reg,
+       .get_rflags = svm_get_rflags,
+       .set_rflags = svm_set_rflags,
++      .fpu_activate = svm_fpu_activate,
+       .fpu_deactivate = svm_fpu_deactivate,
+       .tlb_flush = svm_flush_tlb,
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -10179,6 +10179,7 @@ static struct kvm_x86_ops vmx_x86_ops =
+       .cache_reg = vmx_cache_reg,
+       .get_rflags = vmx_get_rflags,
+       .set_rflags = vmx_set_rflags,
++      .fpu_activate = vmx_fpu_activate,
+       .fpu_deactivate = vmx_fpu_deactivate,
+       .tlb_flush = vmx_flush_tlb,
index 7d04ad7dc3ea970e3b0846abae0cb11f004fa109..ea061f51ede35e2d3f9f4d0262452fc3daa6f17a 100644 (file)
@@ -1,2 +1,13 @@
 mnt-fail-collect_mounts-when-applied-to-unmounted-mounts.patch
 fs_pin-allow-for-the-possibility-that-m_list-or-s_list-go-unused.patch
+iommu-amd-fix-bug-in-put_pasid_state_wait.patch
+iommu-arm-smmu-fix-sign-extension-of-upstream-bus-addresses-at-stage-1.patch
+revert-kvm-x86-drop-fpu_activate-hook.patch
+x86-mce-fix-mce-severity-messages.patch
+x86-fpu-disable-xsaves-support-for-now.patch
+kvm-mmu-fix-cr4.smep-1-cr0.wp-0-with-shadow-pages.patch
+kvm-mmu-fix-smap-permission-check.patch
+kvm-fix-crash-in-kvm_vcpu_reload_apic_access_page.patch
+kvm-mmu-fix-smap-virtualization.patch
+kvm-fpu-enable-eager-restore-kvm-fpu-for-mpx.patch
+ktime-fix-ktime_divns-to-do-signed-division.patch
diff --git a/queue-4.0/x86-fpu-disable-xsaves-support-for-now.patch b/queue-4.0/x86-fpu-disable-xsaves-support-for-now.patch
new file mode 100644 (file)
index 0000000..37b618e
--- /dev/null
@@ -0,0 +1,78 @@
+From e88221c50cadade0eb4f7f149f4967d760212695 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Wed, 20 May 2015 11:45:30 +0200
+Subject: x86/fpu: Disable XSAVES* support for now
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit e88221c50cadade0eb4f7f149f4967d760212695 upstream.
+
+The kernel's handling of 'compacted' xsave state layout is buggy:
+
+    http://marc.info/?l=linux-kernel&m=142967852317199
+
+I don't have such a system, and the description there is vague, but
+from extrapolation I guess that there were two kinds of bugs
+observed:
+
+  - boot crashes, due to size calculations being wrong and the dynamic
+    allocation allocating a too small xstate area. (This is now fixed
+    in the new FPU code - but still present in stable kernels.)
+
+  - FPU state corruption and ABI breakage: if signal handlers try to
+    change the FPU state in standard format, which then the kernel
+    tries to restore in the compacted format.
+
+These breakages are scary, but they only occur on a small number of
+systems that have XSAVES* CPU support. Yet we have had XSAVES support
+in the upstream kernel for a large number of stable kernel releases,
+and the fixes are involved and unproven.
+
+So do the safe resolution first: disable XSAVES* support and only
+use the standard xstate format. This makes the code work and is
+easy to backport.
+
+On top of this we can work on enabling (and testing!) proper
+compacted format support, without backporting pressure, on top of the
+new, cleaned up FPU code.
+
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/i387.c |   15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -169,6 +169,21 @@ static void init_thread_xstate(void)
+               xstate_size = sizeof(struct i387_fxsave_struct);
+       else
+               xstate_size = sizeof(struct i387_fsave_struct);
++
++      /*
++       * Quirk: we don't yet handle the XSAVES* instructions
++       * correctly, as we don't correctly convert between
++       * standard and compacted format when interfacing
++       * with user-space - so disable it for now.
++       *
++       * The difference is small: with recent CPUs the
++       * compacted format is only marginally smaller than
++       * the standard FPU state format.
++       *
++       * ( This is easy to backport while we are fixing
++       *   XSAVES* support. )
++       */
++      setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+ }
+ /*
diff --git a/queue-4.0/x86-mce-fix-mce-severity-messages.patch b/queue-4.0/x86-mce-fix-mce-severity-messages.patch
new file mode 100644 (file)
index 0000000..244897d
--- /dev/null
@@ -0,0 +1,72 @@
+From 17fea54bf0ab34fa09a06bbde2f58ed7bbdf9299 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 18 May 2015 10:07:17 +0200
+Subject: x86/mce: Fix MCE severity messages
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 17fea54bf0ab34fa09a06bbde2f58ed7bbdf9299 upstream.
+
+Derek noticed that a critical MCE gets reported with the wrong
+error type description:
+
+  [Hardware Error]: CPU 34: Machine Check Exception: 5 Bank 9: f200003f000100b0
+  [Hardware Error]: RIP !INEXACT! 10:<ffffffff812e14c1> {intel_idle+0xb1/0x170}
+  [Hardware Error]: TSC 49587b8e321cb
+  [Hardware Error]: PROCESSOR 0:306e4 TIME 1431561296 SOCKET 1 APIC 29
+  [Hardware Error]: Some CPUs didn't answer in synchronization
+  [Hardware Error]: Machine check: Invalid
+                                  ^^^^^^^
+
+The last line with 'Invalid' should have printed the high level
+MCE error type description we get from mce_severity, i.e.
+something like:
+
+  [Hardware Error]: Machine check: Action required: data load error in a user process
+
+this happens due to the fact that mce_no_way_out() iterates over
+all MCA banks and possibly overwrites the @msg argument which is
+used in the panic printing later.
+
+Change behavior to take the message of only and the (last)
+critical MCE it detects.
+
+Reported-by: Derek <denc716@gmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Link: http://lkml.kernel.org/r/1431936437-25286-3-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mcheck/mce.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -705,6 +705,7 @@ static int mce_no_way_out(struct mce *m,
+                         struct pt_regs *regs)
+ {
+       int i, ret = 0;
++      char *tmp;
+       for (i = 0; i < mca_cfg.banks; i++) {
+               m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+@@ -713,9 +714,11 @@ static int mce_no_way_out(struct mce *m,
+                       if (quirk_no_way_out)
+                               quirk_no_way_out(i, m, regs);
+               }
+-              if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
+-                  MCE_PANIC_SEVERITY)
++
++              if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
++                      *msg = tmp;
+                       ret = 1;
++              }
+       }
+       return ret;
+ }