]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.14.48/x86-mce-amd-carve-out-smca-get_block_address-code.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.14.48 / x86-mce-amd-carve-out-smca-get_block_address-code.patch
CommitLineData
9ff8ff93
GKH
1From 8a331f4a0863bea758561c921b94b4d28f7c4029 Mon Sep 17 00:00:00 2001
2From: Yazen Ghannam <yazen.ghannam@amd.com>
3Date: Wed, 21 Feb 2018 11:19:00 +0100
4Subject: x86/mce/AMD: Carve out SMCA get_block_address() code
5
6From: Yazen Ghannam <yazen.ghannam@amd.com>
7
8commit 8a331f4a0863bea758561c921b94b4d28f7c4029 upstream.
9
10Carve out the SMCA code in get_block_address() into a separate helper
11function.
12
13No functional change.
14
15Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
16[ Save an indentation level. ]
17Signed-off-by: Borislav Petkov <bp@suse.de>
18Cc: Borislav Petkov <bp@alien8.de>
19Cc: Linus Torvalds <torvalds@linux-foundation.org>
20Cc: Peter Zijlstra <peterz@infradead.org>
21Cc: Thomas Gleixner <tglx@linutronix.de>
22Cc: Tony Luck <tony.luck@intel.com>
23Cc: linux-edac <linux-edac@vger.kernel.org>
24Link: http://lkml.kernel.org/r/20180215210943.11530-4-Yazen.Ghannam@amd.com
25Signed-off-by: Ingo Molnar <mingo@kernel.org>
26Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27
28---
29 arch/x86/kernel/cpu/mcheck/mce_amd.c | 57 +++++++++++++++++++----------------
30 1 file changed, 31 insertions(+), 26 deletions(-)
31
32--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
33+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
34@@ -429,6 +429,35 @@ static void deferred_error_interrupt_ena
35 wrmsr(MSR_CU_DEF_ERR, low, high);
36 }
37
38+static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
39+ unsigned int block)
40+{
41+ u32 low, high;
42+ u32 addr = 0;
43+
44+ if (smca_get_bank_type(bank) == SMCA_RESERVED)
45+ return addr;
46+
47+ if (!block)
48+ return MSR_AMD64_SMCA_MCx_MISC(bank);
49+
50+ /*
51+ * For SMCA enabled processors, BLKPTR field of the first MISC register
52+ * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
53+ */
54+ if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
55+ return addr;
56+
57+ if (!(low & MCI_CONFIG_MCAX))
58+ return addr;
59+
60+ if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
61+ (low & MASK_BLKPTR_LO))
62+ return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
63+
64+ return addr;
65+}
66+
67 static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
68 unsigned int bank, unsigned int block)
69 {
70@@ -449,32 +478,8 @@ static u32 get_block_address(unsigned in
71 }
72 }
73
74- if (mce_flags.smca) {
75- if (smca_get_bank_type(bank) == SMCA_RESERVED)
76- return addr;
77-
78- if (!block) {
79- addr = MSR_AMD64_SMCA_MCx_MISC(bank);
80- } else {
81- /*
82- * For SMCA enabled processors, BLKPTR field of the
83- * first MISC register (MCx_MISC0) indicates presence of
84- * additional MISC register set (MISC1-4).
85- */
86- u32 low, high;
87-
88- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
89- return addr;
90-
91- if (!(low & MCI_CONFIG_MCAX))
92- return addr;
93-
94- if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
95- (low & MASK_BLKPTR_LO))
96- addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
97- }
98- return addr;
99- }
100+ if (mce_flags.smca)
101+ return smca_get_block_address(cpu, bank, block);
102
103 /* Fall back to method we used for older processors: */
104 switch (block) {