--- /dev/null
+From 75b0cea7bf307f362057cc778efe89af4c615354 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Mon, 15 Jun 2020 04:43:32 -0600
+Subject: ACPI: configfs: Disallow loading ACPI tables when locked down
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 75b0cea7bf307f362057cc778efe89af4c615354 upstream.
+
+Like other vectors already patched, this one here allows the root
+user to load ACPI tables, which enables arbitrary physical address
+writes, which in turn makes it possible to disable lockdown.
+
+Prevents this by checking the lockdown status before allowing a new
+ACPI table to be installed. The link in the trailer shows a PoC of
+how this might be used.
+
+Link: https://git.zx2c4.com/american-unsigned-language/tree/american-unsigned-language-2.sh
+Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpi_configfs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/acpi_configfs.c
++++ b/drivers/acpi/acpi_configfs.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <linux/configfs.h>
+ #include <linux/acpi.h>
++#include <linux/security.h>
+
+ #include "acpica/accommon.h"
+ #include "acpica/actables.h"
+@@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(stru
+ {
+ const struct acpi_table_header *header = data;
+ struct acpi_table *table;
+- int ret;
++ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
++
++ if (ret)
++ return ret;
+
+ table = container_of(cfg, struct acpi_table, cfg);
+
--- /dev/null
+From e6d701dca9893990d999fd145e3e07223c002b06 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <natechancellor@gmail.com>
+Date: Thu, 11 Jun 2020 21:51:50 -0700
+Subject: ACPI: sysfs: Fix pm_profile_attr type
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+commit e6d701dca9893990d999fd145e3e07223c002b06 upstream.
+
+When running a kernel with Clang's Control Flow Integrity implemented,
+there is a violation that happens when accessing
+/sys/firmware/acpi/pm_profile:
+
+$ cat /sys/firmware/acpi/pm_profile
+0
+
+$ dmesg
+...
+[ 17.352564] ------------[ cut here ]------------
+[ 17.352568] CFI failure (target: acpi_show_profile+0x0/0x8):
+[ 17.352572] WARNING: CPU: 3 PID: 497 at kernel/cfi.c:29 __cfi_check_fail+0x33/0x40
+[ 17.352573] Modules linked in:
+[ 17.352575] CPU: 3 PID: 497 Comm: cat Tainted: G W 5.7.0-microsoft-standard+ #1
+[ 17.352576] RIP: 0010:__cfi_check_fail+0x33/0x40
+[ 17.352577] Code: 48 c7 c7 50 b3 85 84 48 c7 c6 50 0a 4e 84 e8 a4 d8 60 00 85 c0 75 02 5b c3 48 c7 c7 dc 5e 49 84 48 89 de 31 c0 e8 7d 06 eb ff <0f> 0b 5b c3 00 00 cc cc 00 00 cc cc 00 85 f6 74 25 41 b9 ea ff ff
+[ 17.352577] RSP: 0018:ffffaa6dc3c53d30 EFLAGS: 00010246
+[ 17.352578] RAX: 331267e0c06cee00 RBX: ffffffff83d85890 RCX: ffffffff8483a6f8
+[ 17.352579] RDX: ffff9cceabbb37c0 RSI: 0000000000000082 RDI: ffffffff84bb9e1c
+[ 17.352579] RBP: ffffffff845b2bc8 R08: 0000000000000001 R09: ffff9cceabbba200
+[ 17.352579] R10: 000000000000019d R11: 0000000000000000 R12: ffff9cc947766f00
+[ 17.352580] R13: ffffffff83d6bd50 R14: ffff9ccc6fa80000 R15: ffffffff845bd328
+[ 17.352582] FS: 00007fdbc8d13580(0000) GS:ffff9cce91ac0000(0000) knlGS:0000000000000000
+[ 17.352582] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 17.352583] CR2: 00007fdbc858e000 CR3: 00000005174d0000 CR4: 0000000000340ea0
+[ 17.352584] Call Trace:
+[ 17.352586] ? rev_id_show+0x8/0x8
+[ 17.352587] ? __cfi_check+0x45bac/0x4b640
+[ 17.352589] ? kobj_attr_show+0x73/0x80
+[ 17.352590] ? sysfs_kf_seq_show+0xc1/0x140
+[ 17.352592] ? ext4_seq_options_show.cfi_jt+0x8/0x8
+[ 17.352593] ? seq_read+0x180/0x600
+[ 17.352595] ? sysfs_create_file_ns.cfi_jt+0x10/0x10
+[ 17.352596] ? tlbflush_read_file+0x8/0x8
+[ 17.352597] ? __vfs_read+0x6b/0x220
+[ 17.352598] ? handle_mm_fault+0xa23/0x11b0
+[ 17.352599] ? vfs_read+0xa2/0x130
+[ 17.352599] ? ksys_read+0x6a/0xd0
+[ 17.352601] ? __do_sys_getpgrp+0x8/0x8
+[ 17.352602] ? do_syscall_64+0x72/0x120
+[ 17.352603] ? entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 17.352604] ---[ end trace 7b1fa81dc897e419 ]---
+
+When /sys/firmware/acpi/pm_profile is read, sysfs_kf_seq_show is called,
+which in turn calls kobj_attr_show, which gets the ->show callback
+member by calling container_of on attr (casting it to struct
+kobj_attribute) then calls it.
+
+There is a CFI violation because pm_profile_attr is of type
+struct device_attribute but kobj_attr_show calls ->show expecting it
+to be from struct kobj_attribute. CFI checking ensures that function
+pointer types match when doing indirect calls. Fix pm_profile_attr to
+be defined in terms of kobj_attribute so there is no violation or
+mismatch.
+
+Fixes: 362b646062b2 ("ACPI: Export FADT pm_profile integer value to userspace")
+Link: https://github.com/ClangBuiltLinux/linux/issues/1051
+Reported-by: yuu ichii <byahu140@heisei.be>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Cc: 3.10+ <stable@vger.kernel.org> # 3.10+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/sysfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(
+ }
+
+ static ssize_t
+-acpi_show_profile(struct device *dev, struct device_attribute *attr,
++acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+ {
+ return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
+ }
+
+-static const struct device_attribute pm_profile_attr =
++static const struct kobj_attribute pm_profile_attr =
+ __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
+
+ static ssize_t hotplug_enabled_show(struct kobject *kobj,
--- /dev/null
+From adb36a8203831e40494a92095dacd566b2ad4a69 Mon Sep 17 00:00:00 2001
+From: Aaron Plattner <aplattner@nvidia.com>
+Date: Thu, 11 Jun 2020 11:08:45 -0700
+Subject: ALSA: hda: Add NVIDIA codec IDs 9a & 9d through a0 to patch table
+
+From: Aaron Plattner <aplattner@nvidia.com>
+
+commit adb36a8203831e40494a92095dacd566b2ad4a69 upstream.
+
+These IDs are for upcoming NVIDIA chips with audio functions that are largely
+similar to the existing ones.
+
+Signed-off-by: Aaron Plattner <aplattner@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200611180845.39942-1-aplattner@nvidia.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_hdmi.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4146,6 +4146,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI
+ HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
--- /dev/null
+From b2c22910fe5aae10b7e17b0721e63a3edf0c9553 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Wed, 17 Jun 2020 18:29:02 +0800
+Subject: ALSA: hda/realtek: Add mute LED and micmute LED support for HP systems
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit b2c22910fe5aae10b7e17b0721e63a3edf0c9553 upstream.
+
+There are two more HP systems control mute LED from HDA codec and need
+to expose micmute led class so SoF can control micmute LED.
+
+Add quirks to support them.
+
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200617102906.16156-2-kai.heng.feng@canonical.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7436,6 +7436,8 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++ SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
--- /dev/null
+From a0b03952a797591d4b6d6fa7b9b7872e27783729 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 16 Jun 2020 15:21:50 +0200
+Subject: ALSA: hda/realtek - Add quirk for MSI GE63 laptop
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit a0b03952a797591d4b6d6fa7b9b7872e27783729 upstream.
+
+MSI GE63 laptop with ALC1220 codec requires the very same quirk
+(ALC1220_FIXUP_CLEVO_P950) as other MSI devices for the proper sound
+output.
+
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=208057
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200616132150.8778-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2460,6 +2460,7 @@ static const struct snd_pci_quirk alc882
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
++ SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
--- /dev/null
+From 3c597282887fd55181578996dca52ce697d985a5 Mon Sep 17 00:00:00 2001
+From: Gao Xiang <hsiangkao@redhat.com>
+Date: Fri, 19 Jun 2020 07:43:49 +0800
+Subject: erofs: fix partially uninitialized misuse in z_erofs_onlinepage_fixup
+
+From: Gao Xiang <hsiangkao@redhat.com>
+
+commit 3c597282887fd55181578996dca52ce697d985a5 upstream.
+
+Hongyu reported "id != index" in z_erofs_onlinepage_fixup() with
+specific aarch64 environment easily, which wasn't shown before.
+
+After digging into that, I found that high 32 bits of page->private
+was set to 0xaaaaaaaa rather than 0 (due to z_erofs_onlinepage_init
+behavior with specific compiler options). Actually we only use low
+32 bits to keep the page information since page->private is only 4
+bytes on most 32-bit platforms. However z_erofs_onlinepage_fixup()
+uses the upper 32 bits by mistake.
+
+Let's fix it now.
+
+Reported-and-tested-by: Hongyu Jin <hongyu.jin@unisoc.com>
+Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
+Cc: <stable@vger.kernel.org> # 4.19+
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Link: https://lore.kernel.org/r/20200618234349.22553-1-hsiangkao@aol.com
+Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/erofs/zdata.h | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/fs/erofs/zdata.h
++++ b/fs/erofs/zdata.h
+@@ -148,22 +148,22 @@ static inline void z_erofs_onlinepage_in
+ static inline void z_erofs_onlinepage_fixup(struct page *page,
+ uintptr_t index, bool down)
+ {
+- unsigned long *p, o, v, id;
+-repeat:
+- p = &page_private(page);
+- o = READ_ONCE(*p);
++ union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
++ int orig, orig_index, val;
+
+- id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+- if (id) {
++repeat:
++ orig = atomic_read(u.o);
++ orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
++ if (orig_index) {
+ if (!index)
+ return;
+
+- DBG_BUGON(id != index);
++ DBG_BUGON(orig_index != index);
+ }
+
+- v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+- ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+- if (cmpxchg(p, o, v) != o)
++ val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
++ ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
++ if (atomic_cmpxchg(u.o, orig, val) != orig)
+ goto repeat;
+ }
+
--- /dev/null
+From 2dbebf7ae1ed9a420d954305e2c9d5ed39ec57c3 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Mon, 22 Jun 2020 14:58:29 -0700
+Subject: KVM: nVMX: Plumb L2 GPA through to PML emulation
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 2dbebf7ae1ed9a420d954305e2c9d5ed39ec57c3 upstream.
+
+Explicitly pass the L2 GPA to kvm_arch_write_log_dirty(), which for all
+intents and purposes is vmx_write_pml_buffer(), instead of having the
+latter pull the GPA from vmcs.GUEST_PHYSICAL_ADDRESS. If the dirty bit
+update is the result of KVM emulation (rare for L2), then the GPA in the
+VMCS may be stale and/or hold a completely unrelated GPA.
+
+Fixes: c5f983f6e8455 ("nVMX: Implement emulated Page Modification Logging")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Message-Id: <20200622215832.22090-2-sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kvm/mmu.c | 4 ++--
+ arch/x86/kvm/mmu.h | 2 +-
+ arch/x86/kvm/paging_tmpl.h | 7 ++++---
+ arch/x86/kvm/vmx/vmx.c | 6 +++---
+ 5 files changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1160,7 +1160,7 @@ struct kvm_x86_ops {
+ void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t offset, unsigned long mask);
+- int (*write_log_dirty)(struct kvm_vcpu *vcpu);
++ int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
+
+ /* pmu operations of sub-arch */
+ const struct kvm_pmu_ops *pmu_ops;
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -1819,10 +1819,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_ma
+ * Emulate arch specific page modification logging for the
+ * nested hypervisor
+ */
+-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
++int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
+ {
+ if (kvm_x86_ops->write_log_dirty)
+- return kvm_x86_ops->write_log_dirty(vcpu);
++ return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa);
+
+ return 0;
+ }
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -209,7 +209,7 @@ void kvm_mmu_gfn_disallow_lpage(struct k
+ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
+ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+ struct kvm_memory_slot *slot, u64 gfn);
+-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
++int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
+
+ int kvm_mmu_post_init_vm(struct kvm *kvm);
+ void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -220,7 +220,7 @@ static inline unsigned FNAME(gpte_access
+ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu,
+ struct guest_walker *walker,
+- int write_fault)
++ gpa_t addr, int write_fault)
+ {
+ unsigned level, index;
+ pt_element_t pte, orig_pte;
+@@ -245,7 +245,7 @@ static int FNAME(update_accessed_dirty_b
+ !(pte & PT_GUEST_DIRTY_MASK)) {
+ trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
+ #if PTTYPE == PTTYPE_EPT
+- if (kvm_arch_write_log_dirty(vcpu))
++ if (kvm_arch_write_log_dirty(vcpu, addr))
+ return -EINVAL;
+ #endif
+ pte |= PT_GUEST_DIRTY_MASK;
+@@ -442,7 +442,8 @@ retry_walk:
+ (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
+
+ if (unlikely(!accessed_dirty)) {
+- ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
++ ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
++ addr, write_fault);
+ if (unlikely(ret < 0))
+ goto error;
+ else if (ret)
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7272,11 +7272,11 @@ static void vmx_flush_log_dirty(struct k
+ kvm_flush_pml_buffers(kvm);
+ }
+
+-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
++static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
+ {
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+- gpa_t gpa, dst;
++ gpa_t dst;
+
+ if (is_guest_mode(vcpu)) {
+ WARN_ON_ONCE(vmx->nested.pml_full);
+@@ -7295,7 +7295,7 @@ static int vmx_write_pml_buffer(struct k
+ return 1;
+ }
+
+- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
++ gpa &= ~0xFFFull;
+ dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
+
+ if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
--- /dev/null
+From bf09fb6cba4f7099620cc9ed32d94c27c4af992e Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Mon, 22 Jun 2020 17:51:35 -0700
+Subject: KVM: VMX: Stop context switching MSR_IA32_UMWAIT_CONTROL
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit bf09fb6cba4f7099620cc9ed32d94c27c4af992e upstream.
+
+Remove support for context switching between the guest's and host's
+desired UMWAIT_CONTROL. Propagating the guest's value to hardware isn't
+required for correct functionality, e.g. KVM intercepts reads and writes
+to the MSR, and the latency effects of the settings controlled by the
+MSR are not architecturally visible.
+
+As a general rule, KVM should not allow the guest to control power
+management settings unless explicitly enabled by userspace, e.g. see
+KVM_CAP_X86_DISABLE_EXITS. E.g. Intel's SDM explicitly states that C0.2
+can improve the performance of SMT siblings. A devious guest could
+disable C0.2 so as to improve the performance of their workloads at the
+detriment to workloads running in the host or on other VMs.
+
+Wholesale removal of UMWAIT_CONTROL context switching also fixes a race
+condition where updates from the host may cause KVM to enter the guest
+with the incorrect value. Because updates are are propagated to all
+CPUs via IPI (SMP function callback), the value in hardware may be
+stale with respect to the cached value and KVM could enter the guest
+with the wrong value in hardware. As above, the guest can't observe the
+bad value, but it's a weird and confusing wart in the implementation.
+
+Removal also fixes the unnecessary usage of VMX's atomic load/store MSR
+lists. Using the lists is only necessary for MSRs that are required for
+correct functionality immediately upon VM-Enter/VM-Exit, e.g. EFER on
+old hardware, or for MSRs that need to-the-uop precision, e.g. perf
+related MSRs. For UMWAIT_CONTROL, the effects are only visible in the
+kernel via TPAUSE/delay(), and KVM doesn't do any form of delay in
+vcpu_vmx_run(). Using the atomic lists is undesirable as they are more
+expensive than direct RDMSR/WRMSR.
+
+Furthermore, even if giving the guest control of the MSR is legitimate,
+e.g. in pass-through scenarios, it's not clear that the benefits would
+outweigh the overhead. E.g. saving and restoring an MSR across a VMX
+roundtrip costs ~250 cycles, and if the guest diverged from the host
+that cost would be paid on every run of the guest. In other words, if
+there is a legitimate use case then it should be enabled by a new
+per-VM capability.
+
+Note, KVM still needs to emulate MSR_IA32_UMWAIT_CONTROL so that it can
+correctly expose other WAITPKG features to the guest, e.g. TPAUSE,
+UMWAIT and UMONITOR.
+
+Fixes: 6e3ba4abcea56 ("KVM: vmx: Emulate MSR IA32_UMWAIT_CONTROL")
+Cc: stable@vger.kernel.org
+Cc: Jingqi Liu <jingqi.liu@intel.com>
+Cc: Tao Xu <tao3.xu@intel.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Message-Id: <20200623005135.10414-1-sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/umwait.c | 6 ------
+ arch/x86/kvm/vmx/vmx.c | 18 ------------------
+ arch/x86/kvm/vmx/vmx.h | 2 --
+ 3 files changed, 26 deletions(-)
+
+--- a/arch/x86/kernel/cpu/umwait.c
++++ b/arch/x86/kernel/cpu/umwait.c
+@@ -17,12 +17,6 @@
+ */
+ static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
+
+-u32 get_umwait_control_msr(void)
+-{
+- return umwait_control_cached;
+-}
+-EXPORT_SYMBOL_GPL(get_umwait_control_msr);
+-
+ /*
+ * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
+ * hardware or BIOS before kernel boot.
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6427,23 +6427,6 @@ static void atomic_switch_perf_msrs(stru
+ msrs[i].host, false);
+ }
+
+-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
+-{
+- u32 host_umwait_control;
+-
+- if (!vmx_has_waitpkg(vmx))
+- return;
+-
+- host_umwait_control = get_umwait_control_msr();
+-
+- if (vmx->msr_ia32_umwait_control != host_umwait_control)
+- add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
+- vmx->msr_ia32_umwait_control,
+- host_umwait_control, false);
+- else
+- clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
+-}
+-
+ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -6533,7 +6516,6 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ pt_guest_enter(vmx);
+
+ atomic_switch_perf_msrs(vmx);
+- atomic_switch_umwait_control_msr(vmx);
+
+ if (enable_preemption_timer)
+ vmx_update_hv_timer(vcpu);
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -14,8 +14,6 @@
+ extern const u32 vmx_msr_index[];
+ extern u64 host_efer;
+
+-extern u32 get_umwait_control_msr(void);
+-
+ #define MSR_TYPE_R 1
+ #define MSR_TYPE_W 2
+ #define MSR_TYPE_RW 3
--- /dev/null
+From bf10bd0be53282183f374af23577b18b5fbf7801 Mon Sep 17 00:00:00 2001
+From: Xiaoyao Li <xiaoyao.li@intel.com>
+Date: Tue, 16 Jun 2020 15:33:07 +0800
+Subject: KVM: X86: Fix MSR range of APIC registers in X2APIC mode
+
+From: Xiaoyao Li <xiaoyao.li@intel.com>
+
+commit bf10bd0be53282183f374af23577b18b5fbf7801 upstream.
+
+Only MSR address range 0x800 through 0x8ff is architecturally reserved
+and dedicated for accessing APIC registers in x2APIC mode.
+
+Fixes: 0105d1a52640 ("KVM: x2apic interface to lapic")
+Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
+Message-Id: <20200616073307.16440-1-xiaoyao.li@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2753,7 +2753,7 @@ int kvm_set_msr_common(struct kvm_vcpu *
+ return kvm_mtrr_set_msr(vcpu, msr, data);
+ case MSR_IA32_APICBASE:
+ return kvm_set_apic_base(vcpu, msr_info);
+- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
+ return kvm_x2apic_msr_write(vcpu, msr, data);
+ case MSR_IA32_TSCDEADLINE:
+ kvm_set_lapic_tscdeadline_msr(vcpu, data);
+@@ -3057,7 +3057,7 @@ int kvm_get_msr_common(struct kvm_vcpu *
+ case MSR_IA32_APICBASE:
+ msr_info->data = kvm_get_apic_base(vcpu);
+ break;
+- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
+ return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
+ break;
+ case MSR_IA32_TSCDEADLINE:
blktrace-break-out-of-blktrace-setup-on-concurrent-c.patch
block-update-hctx-map-when-use-multiple-maps.patch
risc-v-don-t-allow-write-exec-only-page-mapping-requ.patch
+alsa-hda-add-nvidia-codec-ids-9a-9d-through-a0-to-patch-table.patch
+alsa-hda-realtek-add-quirk-for-msi-ge63-laptop.patch
+alsa-hda-realtek-add-mute-led-and-micmute-led-support-for-hp-systems.patch
+acpi-sysfs-fix-pm_profile_attr-type.patch
+acpi-configfs-disallow-loading-acpi-tables-when-locked-down.patch
+erofs-fix-partially-uninitialized-misuse-in-z_erofs_onlinepage_fixup.patch
+kvm-x86-fix-msr-range-of-apic-registers-in-x2apic-mode.patch
+kvm-nvmx-plumb-l2-gpa-through-to-pml-emulation.patch
+kvm-vmx-stop-context-switching-msr_ia32_umwait_control.patch
+x86-cpu-use-pinning-mask-for-cr4-bits-needing-to-be-0.patch
+x86-asm-64-align-start-of-__clear_user-loop-to-16-bytes.patch
--- /dev/null
+From bb5570ad3b54e7930997aec76ab68256d5236d94 Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Thu, 18 Jun 2020 11:20:02 +0100
+Subject: x86/asm/64: Align start of __clear_user() loop to 16-bytes
+
+From: Matt Fleming <matt@codeblueprint.co.uk>
+
+commit bb5570ad3b54e7930997aec76ab68256d5236d94 upstream.
+
+x86 CPUs can suffer severe performance drops if a tight loop, such as
+the ones in __clear_user(), straddles a 16-byte instruction fetch
+window, or worse, a 64-byte cacheline. This issues was discovered in the
+SUSE kernel with the following commit,
+
+ 1153933703d9 ("x86/asm/64: Micro-optimize __clear_user() - Use immediate constants")
+
+which increased the code object size from 10 bytes to 15 bytes and
+caused the 8-byte copy loop in __clear_user() to be split across a
+64-byte cacheline.
+
+Aligning the start of the loop to 16-bytes makes this fit neatly inside
+a single instruction fetch window again and restores the performance of
+__clear_user() which is used heavily when reading from /dev/zero.
+
+Here are some numbers from running libmicro's read_z* and pread_z*
+microbenchmarks which read from /dev/zero:
+
+ Zen 1 (Naples)
+
+ libmicro-file
+ 5.7.0-rc6 5.7.0-rc6 5.7.0-rc6
+ revert-1153933703d9+ align16+
+ Time mean95-pread_z100k 9.9195 ( 0.00%) 5.9856 ( 39.66%) 5.9938 ( 39.58%)
+ Time mean95-pread_z10k 1.1378 ( 0.00%) 0.7450 ( 34.52%) 0.7467 ( 34.38%)
+ Time mean95-pread_z1k 0.2623 ( 0.00%) 0.2251 ( 14.18%) 0.2252 ( 14.15%)
+ Time mean95-pread_zw100k 9.9974 ( 0.00%) 6.0648 ( 39.34%) 6.0756 ( 39.23%)
+ Time mean95-read_z100k 9.8940 ( 0.00%) 5.9885 ( 39.47%) 5.9994 ( 39.36%)
+ Time mean95-read_z10k 1.1394 ( 0.00%) 0.7483 ( 34.33%) 0.7482 ( 34.33%)
+
+Note that this doesn't affect Haswell or Broadwell microarchitectures
+which seem to avoid the alignment issue by executing the loop straight
+out of the Loop Stream Detector (verified using perf events).
+
+Fixes: 1153933703d9 ("x86/asm/64: Micro-optimize __clear_user() - Use immediate constants")
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@vger.kernel.org> # v4.19+
+Link: https://lkml.kernel.org/r/20200618102002.30034-1-matt@codeblueprint.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/lib/usercopy_64.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *
+ asm volatile(
+ " testq %[size8],%[size8]\n"
+ " jz 4f\n"
++ " .align 16\n"
+ "0: movq $0,(%[dst])\n"
+ " addq $8,%[dst]\n"
+ " decl %%ecx ; jnz 0b\n"
--- /dev/null
+From a13b9d0b97211579ea63b96c606de79b963c0f47 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Mon, 8 Jun 2020 20:15:09 -0700
+Subject: x86/cpu: Use pinning mask for CR4 bits needing to be 0
+
+From: Kees Cook <keescook@chromium.org>
+
+commit a13b9d0b97211579ea63b96c606de79b963c0f47 upstream.
+
+The X86_CR4_FSGSBASE bit of CR4 should not change after boot[1]. Older
+kernels should enforce this bit to zero, and newer kernels need to
+enforce it depending on boot-time configuration (e.g. "nofsgsbase").
+To support a pinned bit being either 1 or 0, use an explicit mask in
+combination with the expected pinned bit values.
+
+[1] https://lore.kernel.org/lkml/20200527103147.GI325280@hirez.programming.kicks-ass.net
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/202006082013.71E29A42@keescook
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/common.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -366,6 +366,9 @@ out:
+ cr4_clear_bits(X86_CR4_UMIP);
+ }
+
++/* These bits should not change their value after CPU init is finished. */
++static const unsigned long cr4_pinned_mask =
++ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
+ static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
+ static unsigned long cr4_pinned_bits __ro_after_init;
+
+@@ -390,20 +393,20 @@ EXPORT_SYMBOL(native_write_cr0);
+
+ void native_write_cr4(unsigned long val)
+ {
+- unsigned long bits_missing = 0;
++ unsigned long bits_changed = 0;
+
+ set_register:
+ asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
+
+ if (static_branch_likely(&cr_pinning)) {
+- if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
+- bits_missing = ~val & cr4_pinned_bits;
+- val |= bits_missing;
++ if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
++ bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
++ val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
+ goto set_register;
+ }
+- /* Warn after we've set the missing bits. */
+- WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
+- bits_missing);
++ /* Warn after we've corrected the changed bits. */
++ WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
++ bits_changed);
+ }
+ }
+ EXPORT_SYMBOL(native_write_cr4);
+@@ -415,7 +418,7 @@ void cr4_init(void)
+ if (boot_cpu_has(X86_FEATURE_PCID))
+ cr4 |= X86_CR4_PCIDE;
+ if (static_branch_likely(&cr_pinning))
+- cr4 |= cr4_pinned_bits;
++ cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
+
+ __write_cr4(cr4);
+
+@@ -430,10 +433,7 @@ void cr4_init(void)
+ */
+ static void __init setup_cr_pinning(void)
+ {
+- unsigned long mask;
+-
+- mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
+- cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
++ cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
+ static_key_enable(&cr_pinning.key);
+ }
+