--- /dev/null
+From 934a51fb4701229a1ad7aa0b89f43fec38206b62 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Wed, 14 Aug 2019 15:59:50 +0300
+Subject: scsi: ufs: Fix NULL pointer dereference in ufshcd_config_vreg_hpm()
+
+[ Upstream commit 7c7cfdcf7f1777c7376fc9a239980de04b6b5ea1 ]
+
+Fix the following BUG:
+
+ [ 187.065689] BUG: kernel NULL pointer dereference, address: 000000000000001c
+ [ 187.065790] RIP: 0010:ufshcd_vreg_set_hpm+0x3c/0x110 [ufshcd_core]
+ [ 187.065938] Call Trace:
+ [ 187.065959] ufshcd_resume+0x72/0x290 [ufshcd_core]
+ [ 187.065980] ufshcd_system_resume+0x54/0x140 [ufshcd_core]
+ [ 187.065993] ? pci_pm_restore+0xb0/0xb0
+ [ 187.066005] ufshcd_pci_resume+0x15/0x20 [ufshcd_pci]
+ [ 187.066017] pci_pm_thaw+0x4c/0x90
+ [ 187.066030] dpm_run_callback+0x5b/0x150
+ [ 187.066043] device_resume+0x11b/0x220
+
+Voltage regulators are optional, so functions must check they exist
+before dereferencing.
+
+Note this issue is hidden if CONFIG_REGULATORS is not set, because the
+offending code is optimised away.
+
+Notes for stable:
+
+The issue first appears in commit 57d104c153d3 ("ufs: add UFS power
+management support") but is inadvertently fixed in commit 60f0187031c0
+("scsi: ufs: disable vccq if it's not needed by UFS device") which in
+turn was reverted by commit 730679817d83 ("Revert "scsi: ufs: disable vccq
+if it's not needed by UFS device""). So fix applies v3.18 to v4.5 and
+v5.1+
+
+Fixes: 57d104c153d3 ("ufs: add UFS power management support")
+Fixes: 730679817d83 ("Revert "scsi: ufs: disable vccq if it's not needed by UFS device"")
+Cc: stable@vger.kernel.org
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index b140e81c4f7da..fd8bbd2b5d0eb 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -4418,6 +4418,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+ static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+ {
++ if (!vreg)
++ return 0;
++
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+ }
+
+--
+2.20.1
+
--- /dev/null
+From bc467c2aca1750eb5c623c179bcce7fcca36c1bc Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 19 Aug 2019 15:52:35 +0000
+Subject: x86/CPU/AMD: Clear RDRAND CPUID bit on AMD family 15h/16h
+
+[ Upstream commit c49a0a80137c7ca7d6ced4c812c9e07a949f6f24 ]
+
+There have been reports of RDRAND issues after resuming from suspend on
+some AMD family 15h and family 16h systems. This issue stems from a BIOS
+not performing the proper steps during resume to ensure RDRAND continues
+to function properly.
+
+RDRAND support is indicated by CPUID Fn00000001_ECX[30]. This bit can be
+reset by clearing MSR C001_1004[62]. Any software that checks for RDRAND
+support using CPUID, including the kernel, will believe that RDRAND is
+not supported.
+
+Update the CPU initialization to clear the RDRAND CPUID bit for any family
+15h and 16h processor that supports RDRAND. If it is known that the family
+15h or family 16h system does not have an RDRAND resume issue or that the
+system will not be placed in suspend, the "rdrand=force" kernel parameter
+can be used to stop the clearing of the RDRAND CPUID bit.
+
+Additionally, update the suspend and resume path to save and restore the
+MSR C001_1004 value to ensure that the RDRAND CPUID setting remains in
+place after resuming from suspend.
+
+Note, that clearing the RDRAND CPUID bit does not prevent a processor
+that normally supports the RDRAND instruction from executing it. So any
+code that determined the support based on family and model won't #UD.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Chen Yu <yu.c.chen@intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: "linux-doc@vger.kernel.org" <linux-doc@vger.kernel.org>
+Cc: "linux-pm@vger.kernel.org" <linux-pm@vger.kernel.org>
+Cc: Nathan Chancellor <natechancellor@gmail.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Pavel Machek <pavel@ucw.cz>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: <stable@vger.kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: "x86@kernel.org" <x86@kernel.org>
+Link: https://lkml.kernel.org/r/7543af91666f491547bd86cebb1e17c66824ab9f.1566229943.git.thomas.lendacky@amd.com
+[sl: adjust context in docs]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/kernel-parameters.txt | 7 +++
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kernel/cpu/amd.c | 66 ++++++++++++++++++++++
+ arch/x86/power/cpu.c | 86 ++++++++++++++++++++++++-----
+ 4 files changed, 147 insertions(+), 13 deletions(-)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 7a9fd54a0186a..5b94c0bfba859 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3415,6 +3415,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ Run specified binary instead of /init from the ramdisk,
+ used for early userspace startup. See initrd.
+
++ rdrand= [X86]
++ force - Override the decision by the kernel to hide the
++ advertisement of RDRAND support (this affects
++ certain AMD processors because of buggy BIOS
++ support, specifically around the suspend/resume
++ path).
++
+ reboot= [KNL]
+ Format (x86 or x86_64):
+ [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index d4f5b8209393f..30183770132a0 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -311,6 +311,7 @@
+ #define MSR_AMD64_PATCH_LEVEL 0x0000008b
+ #define MSR_AMD64_TSC_RATIO 0xc0000104
+ #define MSR_AMD64_NB_CFG 0xc001001f
++#define MSR_AMD64_CPUID_FN_1 0xc0011004
+ #define MSR_AMD64_PATCH_LOADER 0xc0010020
+ #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
+ #define MSR_AMD64_OSVW_STATUS 0xc0010141
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 6f2483292de0b..424d8a636615a 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -684,6 +684,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
+ msr_set_bit(MSR_AMD64_DE_CFG, 31);
+ }
+
++static bool rdrand_force;
++
++static int __init rdrand_cmdline(char *str)
++{
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "force"))
++ rdrand_force = true;
++ else
++ return -EINVAL;
++
++ return 0;
++}
++early_param("rdrand", rdrand_cmdline);
++
++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
++{
++ /*
++ * Saving of the MSR used to hide the RDRAND support during
++ * suspend/resume is done by arch/x86/power/cpu.c, which is
++ * dependent on CONFIG_PM_SLEEP.
++ */
++ if (!IS_ENABLED(CONFIG_PM_SLEEP))
++ return;
++
++ /*
++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
++ * RDRAND support using the CPUID function directly.
++ */
++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
++ return;
++
++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
++
++ /*
++ * Verify that the CPUID change has occurred in case the kernel is
++ * running virtualized and the hypervisor doesn't support the MSR.
++ */
++ if (cpuid_ecx(1) & BIT(30)) {
++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
++ return;
++ }
++
++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
++}
++
++static void init_amd_jg(struct cpuinfo_x86 *c)
++{
++ /*
++ * Some BIOS implementations do not restore proper RDRAND support
++ * across suspend and resume. Check on whether to hide the RDRAND
++ * instruction support via CPUID.
++ */
++ clear_rdrand_cpuid_bit(c);
++}
++
+ static void init_amd_bd(struct cpuinfo_x86 *c)
+ {
+ u64 value;
+@@ -711,6 +769,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ wrmsrl_safe(0xc0011021, value);
+ }
+ }
++
++ /*
++ * Some BIOS implementations do not restore proper RDRAND support
++ * across suspend and resume. Check on whether to hide the RDRAND
++ * instruction support via CPUID.
++ */
++ clear_rdrand_cpuid_bit(c);
+ }
+
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+@@ -755,6 +820,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ case 0x10: init_amd_gh(c); break;
+ case 0x12: init_amd_ln(c); break;
+ case 0x15: init_amd_bd(c); break;
++ case 0x16: init_amd_jg(c); break;
+ case 0x17: init_amd_zn(c); break;
+ }
+
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index d5f64996394a9..2e5052b2d2382 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -12,6 +12,7 @@
+ #include <linux/export.h>
+ #include <linux/smp.h>
+ #include <linux/perf_event.h>
++#include <linux/dmi.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+@@ -23,7 +24,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/cpu.h>
+ #include <asm/mmu_context.h>
+-#include <linux/dmi.h>
++#include <asm/cpu_device_id.h>
+
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -347,15 +348,14 @@ static int __init bsp_pm_check_init(void)
+
+ core_initcall(bsp_pm_check_init);
+
+-static int msr_init_context(const u32 *msr_id, const int total_num)
++static int msr_build_context(const u32 *msr_id, const int num)
+ {
+- int i = 0;
++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
+ struct saved_msr *msr_array;
++ int total_num;
++ int i, j;
+
+- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
+- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
+- return -EINVAL;
+- }
++ total_num = saved_msrs->num + num;
+
+ msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
+ if (!msr_array) {
+@@ -363,19 +363,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
+ return -ENOMEM;
+ }
+
+- for (i = 0; i < total_num; i++) {
+- msr_array[i].info.msr_no = msr_id[i];
++ if (saved_msrs->array) {
++ /*
++ * Multiple callbacks can invoke this function, so copy any
++ * MSR save requests from previous invocations.
++ */
++ memcpy(msr_array, saved_msrs->array,
++ sizeof(struct saved_msr) * saved_msrs->num);
++
++ kfree(saved_msrs->array);
++ }
++
++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
++ msr_array[i].info.msr_no = msr_id[j];
+ msr_array[i].valid = false;
+ msr_array[i].info.reg.q = 0;
+ }
+- saved_context.saved_msrs.num = total_num;
+- saved_context.saved_msrs.array = msr_array;
++ saved_msrs->num = total_num;
++ saved_msrs->array = msr_array;
+
+ return 0;
+ }
+
+ /*
+- * The following section is a quirk framework for problematic BIOSen:
++ * The following sections are a quirk framework for problematic BIOSen:
+ * Sometimes MSRs are modified by the BIOSen after suspended to
+ * RAM, this might cause unexpected behavior after wakeup.
+ * Thus we save/restore these specified MSRs across suspend/resume
+@@ -390,7 +401,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
+ u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
+
+ pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
+- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+ }
+
+ static struct dmi_system_id msr_save_dmi_table[] = {
+@@ -405,9 +416,58 @@ static struct dmi_system_id msr_save_dmi_table[] = {
+ {}
+ };
+
++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
++{
++ u32 cpuid_msr_id[] = {
++ MSR_AMD64_CPUID_FN_1,
++ };
++
++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
++ c->family);
++
++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
++}
++
++static const struct x86_cpu_id msr_save_cpu_table[] = {
++ {
++ .vendor = X86_VENDOR_AMD,
++ .family = 0x15,
++ .model = X86_MODEL_ANY,
++ .feature = X86_FEATURE_ANY,
++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
++ },
++ {
++ .vendor = X86_VENDOR_AMD,
++ .family = 0x16,
++ .model = X86_MODEL_ANY,
++ .feature = X86_FEATURE_ANY,
++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
++ },
++ {}
++};
++
++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
++static int pm_cpu_check(const struct x86_cpu_id *c)
++{
++ const struct x86_cpu_id *m;
++ int ret = 0;
++
++ m = x86_match_cpu(msr_save_cpu_table);
++ if (m) {
++ pm_cpu_match_t fn;
++
++ fn = (pm_cpu_match_t)m->driver_data;
++ ret = fn(m);
++ }
++
++ return ret;
++}
++
+ static int pm_check_save_msr(void)
+ {
+ dmi_check_system(msr_save_dmi_table);
++ pm_cpu_check(msr_save_cpu_table);
++
+ return 0;
+ }
+
+--
+2.20.1
+
--- /dev/null
+From d63273440aa0fdebc30d0c931f15f79beb213134 Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Wed, 25 Nov 2015 01:03:41 +0800
+Subject: x86/pm: Introduce quirk framework to save/restore extra MSR registers
+ around suspend/resume
+
+A bug was reported that on certain Broadwell platforms, after
+resuming from S3, the CPU is running at an anomalously low
+speed.
+
+It turns out that the BIOS has modified the value of the
+THERM_CONTROL register during S3, and changed it from 0 to 0x10,
+thus enabled clock modulation(bit4), but with undefined CPU Duty
+Cycle(bit1:3) - which causes the problem.
+
+Here is a simple scenario to reproduce the issue:
+
+ 1. Boot up the system
+ 2. Get MSR 0x19a, it should be 0
+ 3. Put the system into sleep, then wake it up
+ 4. Get MSR 0x19a, it shows 0x10, while it should be 0
+
+Although some BIOSen want to change the CPU Duty Cycle during
+S3, in our case we don't want the BIOS to do any modification.
+
+Fix this issue by introducing a more generic x86 framework to
+save/restore specified MSR registers(THERM_CONTROL in this case)
+for suspend/resume. This allows us to fix similar bugs in a much
+simpler way in the future.
+
+When the kernel wants to protect certain MSRs during suspending,
+we simply add a quirk entry in msr_save_dmi_table, and customize
+the MSR registers inside the quirk callback, for example:
+
+ u32 msr_id_need_to_save[] = {MSR_ID0, MSR_ID1, MSR_ID2...};
+
+and the quirk mechanism ensures that, once resumed from suspend,
+the MSRs indicated by these IDs will be restored to their
+original, pre-suspend values.
+
+Since both 64-bit and 32-bit kernels are affected, this patch
+covers the common 64/32-bit suspend/resume code path. And
+because the MSRs specified by the user might not be available or
+readable in any situation, we use rdmsrl_safe() to safely save
+these MSRs.
+
+Reported-and-tested-by: Marcin Kaszewski <marcin.kaszewski@intel.com>
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bp@suse.de
+Cc: len.brown@intel.com
+Cc: linux@horizon.com
+Cc: luto@kernel.org
+Cc: rjw@rjwysocki.net
+Link: http://lkml.kernel.org/r/c9abdcbc173dd2f57e8990e304376f19287e92ba.1448382971.git.yu.c.chen@intel.com
+[ More edits to the naming of data structures. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/x86/include/asm/msr.h | 10 ++++
+ arch/x86/include/asm/suspend_32.h | 1 +
+ arch/x86/include/asm/suspend_64.h | 1 +
+ arch/x86/power/cpu.c | 92 +++++++++++++++++++++++++++++++
+ 4 files changed, 104 insertions(+)
+
+diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
+index 5a10ac8c131ea..20f822fec8af0 100644
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -32,6 +32,16 @@ struct msr_regs_info {
+ int err;
+ };
+
++struct saved_msr {
++ bool valid;
++ struct msr_info info;
++};
++
++struct saved_msrs {
++ unsigned int num;
++ struct saved_msr *array;
++};
++
+ static inline unsigned long long native_read_tscp(unsigned int *aux)
+ {
+ unsigned long low, high;
+diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
+index d1793f06854d2..8e9dbe7b73a1f 100644
+--- a/arch/x86/include/asm/suspend_32.h
++++ b/arch/x86/include/asm/suspend_32.h
+@@ -15,6 +15,7 @@ struct saved_context {
+ unsigned long cr0, cr2, cr3, cr4;
+ u64 misc_enable;
+ bool misc_enable_saved;
++ struct saved_msrs saved_msrs;
+ struct desc_ptr gdt_desc;
+ struct desc_ptr idt;
+ u16 ldt;
+diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
+index 7ebf0ebe4e687..6136a18152af2 100644
+--- a/arch/x86/include/asm/suspend_64.h
++++ b/arch/x86/include/asm/suspend_64.h
+@@ -24,6 +24,7 @@ struct saved_context {
+ unsigned long cr0, cr2, cr3, cr4, cr8;
+ u64 misc_enable;
+ bool misc_enable_saved;
++ struct saved_msrs saved_msrs;
+ unsigned long efer;
+ u16 gdt_pad; /* Unused */
+ struct desc_ptr gdt_desc;
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 9ab52791fed59..d5f64996394a9 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -23,6 +23,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/cpu.h>
+ #include <asm/mmu_context.h>
++#include <linux/dmi.h>
+
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -32,6 +33,29 @@ __visible unsigned long saved_context_eflags;
+ #endif
+ struct saved_context saved_context;
+
++static void msr_save_context(struct saved_context *ctxt)
++{
++ struct saved_msr *msr = ctxt->saved_msrs.array;
++ struct saved_msr *end = msr + ctxt->saved_msrs.num;
++
++ while (msr < end) {
++ msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
++ msr++;
++ }
++}
++
++static void msr_restore_context(struct saved_context *ctxt)
++{
++ struct saved_msr *msr = ctxt->saved_msrs.array;
++ struct saved_msr *end = msr + ctxt->saved_msrs.num;
++
++ while (msr < end) {
++ if (msr->valid)
++ wrmsrl(msr->info.msr_no, msr->info.reg.q);
++ msr++;
++ }
++}
++
+ /**
+ * __save_processor_state - save CPU registers before creating a
+ * hibernation image and before restoring the memory state from it
+@@ -111,6 +135,7 @@ static void __save_processor_state(struct saved_context *ctxt)
+ #endif
+ ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
+ &ctxt->misc_enable);
++ msr_save_context(ctxt);
+ }
+
+ /* Needed by apm.c */
+@@ -229,6 +254,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
+ x86_platform.restore_sched_clock_state();
+ mtrr_bp_restore();
+ perf_restore_debug_store();
++ msr_restore_context(ctxt);
+ }
+
+ /* Needed by apm.c */
+@@ -320,3 +346,69 @@ static int __init bsp_pm_check_init(void)
+ }
+
+ core_initcall(bsp_pm_check_init);
++
++static int msr_init_context(const u32 *msr_id, const int total_num)
++{
++ int i = 0;
++ struct saved_msr *msr_array;
++
++ if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
++ pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
++ return -EINVAL;
++ }
++
++ msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
++ if (!msr_array) {
++ pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < total_num; i++) {
++ msr_array[i].info.msr_no = msr_id[i];
++ msr_array[i].valid = false;
++ msr_array[i].info.reg.q = 0;
++ }
++ saved_context.saved_msrs.num = total_num;
++ saved_context.saved_msrs.array = msr_array;
++
++ return 0;
++}
++
++/*
++ * The following section is a quirk framework for problematic BIOSen:
++ * Sometimes MSRs are modified by the BIOSen after suspended to
++ * RAM, this might cause unexpected behavior after wakeup.
++ * Thus we save/restore these specified MSRs across suspend/resume
++ * in order to work around it.
++ *
++ * For any further problematic BIOSen/platforms,
++ * please add your own function similar to msr_initialize_bdw.
++ */
++static int msr_initialize_bdw(const struct dmi_system_id *d)
++{
++ /* Add any extra MSR ids into this array. */
++ u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
++
++ pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
++ return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
++}
++
++static struct dmi_system_id msr_save_dmi_table[] = {
++ {
++ .callback = msr_initialize_bdw,
++ .ident = "BROADWELL BDX_EP",
++ .matches = {
++ DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
++ },
++ },
++ {}
++};
++
++static int pm_check_save_msr(void)
++{
++ dmi_check_system(msr_save_dmi_table);
++ return 0;
++}
++
++device_initcall(pm_check_save_msr);
+--
+2.20.1
+